1 /* 2 * Copyright (c) 1987 Carnegie-Mellon University. 3 * Copyright (c) 1991 Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * The Mach Operating System project at Carnegie-Mellon University. 8 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 9 * 10 * Permission to use, copy, modify and distribute this software and 11 * its documentation is hereby granted, provided that both the copyright 12 * notice and this permission notice appear in all copies of the 13 * software, derivative works or modified versions, and any portions 14 * thereof, and that both notices appear in supporting documentation. 15 * 16 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 17 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 18 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 19 * 20 * Carnegie Mellon requests users of this software to return to Carnegie 21 * any improvements or extensions that they make and grant Carnegie 22 * the rights to redistribute these changes. We can be reached at 23 * mach@cs.cmu.edu or 24 * Project Mach 25 * School of Computer Science 26 * Carnegie Mellon University 27 * Pittsburgh PA 15213-3890 28 * 29 * %sccs.include.redist.c% 30 * 31 * @(#)vm_fault.c 7.2 (Berkeley) 03/19/91 32 */ 33 34 /* 35 * Page fault handling module. 36 */ 37 38 #include "param.h" 39 #include "../vm/vm_param.h" 40 #include "../vm/vm_map.h" 41 #include "../vm/vm_object.h" 42 #include "../vm/vm_page.h" 43 #include "../vm/pmap.h" 44 #include "../vm/vm_statistics.h" 45 #include "../vm/vm_pageout.h" 46 47 /* 48 * vm_fault: 49 * 50 * Handle a page fault occuring at the given address, 51 * requiring the given permissions, in the map specified. 52 * If successful, the page is inserted into the 53 * associated physical map. 54 * 55 * NOTE: the given address should be truncated to the 56 * proper page address. 57 * 58 * KERN_SUCCESS is returned if the page fault is handled; otherwise, 59 * a standard error specifying why the fault is fatal is returned. 60 * 61 * 62 * The map in question must be referenced, and remains so. 63 * Caller may hold no locks. 64 */ 65 vm_fault(map, vaddr, fault_type, change_wiring) 66 vm_map_t map; 67 vm_offset_t vaddr; 68 vm_prot_t fault_type; 69 boolean_t change_wiring; 70 { 71 vm_object_t first_object; 72 vm_offset_t first_offset; 73 vm_map_entry_t entry; 74 register vm_object_t object; 75 register vm_offset_t offset; 76 register vm_page_t m; 77 vm_page_t first_m; 78 vm_prot_t prot; 79 int result; 80 boolean_t wired; 81 boolean_t su; 82 boolean_t lookup_still_valid; 83 boolean_t page_exists; 84 vm_page_t old_m; 85 vm_object_t next_object; 86 87 vm_stat.faults++; /* needs lock XXX */ 88 /* 89 * Recovery actions 90 */ 91 #define FREE_PAGE(m) { \ 92 PAGE_WAKEUP(m); \ 93 vm_page_lock_queues(); \ 94 vm_page_free(m); \ 95 vm_page_unlock_queues(); \ 96 } 97 98 #define RELEASE_PAGE(m) { \ 99 PAGE_WAKEUP(m); \ 100 vm_page_lock_queues(); \ 101 vm_page_activate(m); \ 102 vm_page_unlock_queues(); \ 103 } 104 105 #define UNLOCK_MAP { \ 106 if (lookup_still_valid) { \ 107 vm_map_lookup_done(map, entry); \ 108 lookup_still_valid = FALSE; \ 109 } \ 110 } 111 112 #define UNLOCK_THINGS { \ 113 object->paging_in_progress--; \ 114 vm_object_unlock(object); \ 115 if (object != first_object) { \ 116 vm_object_lock(first_object); \ 117 FREE_PAGE(first_m); \ 118 first_object->paging_in_progress--; \ 119 vm_object_unlock(first_object); \ 120 } \ 121 UNLOCK_MAP; \ 122 } 123 124 #define UNLOCK_AND_DEALLOCATE { \ 125 UNLOCK_THINGS; \ 126 vm_object_deallocate(first_object); \ 127 } 128 129 RetryFault: ; 130 131 /* 132 * Find the backing store object and offset into 133 * it to begin the search. 134 */ 135 136 if ((result = vm_map_lookup(&map, vaddr, fault_type, &entry, 137 &first_object, &first_offset, 138 &prot, &wired, &su)) != KERN_SUCCESS) { 139 return(result); 140 } 141 lookup_still_valid = TRUE; 142 143 if (wired) 144 fault_type = prot; 145 146 first_m = VM_PAGE_NULL; 147 148 /* 149 * Make a reference to this object to 150 * prevent its disposal while we are messing with 151 * it. Once we have the reference, the map is free 152 * to be diddled. Since objects reference their 153 * shadows (and copies), they will stay around as well. 154 */ 155 156 vm_object_lock(first_object); 157 158 first_object->ref_count++; 159 first_object->paging_in_progress++; 160 161 /* 162 * INVARIANTS (through entire routine): 163 * 164 * 1) At all times, we must either have the object 165 * lock or a busy page in some object to prevent 166 * some other thread from trying to bring in 167 * the same page. 168 * 169 * Note that we cannot hold any locks during the 170 * pager access or when waiting for memory, so 171 * we use a busy page then. 172 * 173 * Note also that we aren't as concerned about 174 * more than one thead attempting to pager_data_unlock 175 * the same page at once, so we don't hold the page 176 * as busy then, but do record the highest unlock 177 * value so far. [Unlock requests may also be delivered 178 * out of order.] 179 * 180 * 2) Once we have a busy page, we must remove it from 181 * the pageout queues, so that the pageout daemon 182 * will not grab it away. 183 * 184 * 3) To prevent another thread from racing us down the 185 * shadow chain and entering a new page in the top 186 * object before we do, we must keep a busy page in 187 * the top object while following the shadow chain. 188 * 189 * 4) We must increment paging_in_progress on any object 190 * for which we have a busy page, to prevent 191 * vm_object_collapse from removing the busy page 192 * without our noticing. 193 */ 194 195 /* 196 * Search for the page at object/offset. 197 */ 198 199 object = first_object; 200 offset = first_offset; 201 202 /* 203 * See whether this page is resident 204 */ 205 206 while (TRUE) { 207 m = vm_page_lookup(object, offset); 208 if (m != VM_PAGE_NULL) { 209 /* 210 * If the page is being brought in, 211 * wait for it and then retry. 212 */ 213 if (m->busy) { 214 #ifdef DOTHREADS 215 int wait_result; 216 217 PAGE_ASSERT_WAIT(m, !change_wiring); 218 UNLOCK_THINGS; 219 thread_block(); 220 wait_result = current_thread()->wait_result; 221 vm_object_deallocate(first_object); 222 if (wait_result != THREAD_AWAKENED) 223 return(KERN_SUCCESS); 224 goto RetryFault; 225 #else 226 PAGE_ASSERT_WAIT(m, !change_wiring); 227 UNLOCK_THINGS; 228 thread_block(); 229 vm_object_deallocate(first_object); 230 goto RetryFault; 231 #endif 232 } 233 234 if (m->absent) 235 panic("vm_fault: absent"); 236 237 /* 238 * If the desired access to this page has 239 * been locked out, request that it be unlocked. 240 */ 241 242 if (fault_type & m->page_lock) { 243 #ifdef DOTHREADS 244 int wait_result; 245 246 if ((fault_type & m->unlock_request) != fault_type) 247 panic("vm_fault: pager_data_unlock"); 248 249 PAGE_ASSERT_WAIT(m, !change_wiring); 250 UNLOCK_THINGS; 251 thread_block(); 252 wait_result = current_thread()->wait_result; 253 vm_object_deallocate(first_object); 254 if (wait_result != THREAD_AWAKENED) 255 return(KERN_SUCCESS); 256 goto RetryFault; 257 #else 258 if ((fault_type & m->unlock_request) != fault_type) 259 panic("vm_fault: pager_data_unlock"); 260 261 PAGE_ASSERT_WAIT(m, !change_wiring); 262 UNLOCK_THINGS; 263 thread_block(); 264 vm_object_deallocate(first_object); 265 goto RetryFault; 266 #endif 267 } 268 269 /* 270 * Remove the page from the pageout daemon's 271 * reach while we play with it. 272 */ 273 274 vm_page_lock_queues(); 275 if (m->inactive) { 276 queue_remove(&vm_page_queue_inactive, m, 277 vm_page_t, pageq); 278 m->inactive = FALSE; 279 vm_page_inactive_count--; 280 vm_stat.reactivations++; 281 } 282 283 if (m->active) { 284 queue_remove(&vm_page_queue_active, m, 285 vm_page_t, pageq); 286 m->active = FALSE; 287 vm_page_active_count--; 288 } 289 vm_page_unlock_queues(); 290 291 /* 292 * Mark page busy for other threads. 293 */ 294 m->busy = TRUE; 295 m->absent = FALSE; 296 break; 297 } 298 299 if (((object->pager != vm_pager_null) && 300 (!change_wiring || wired)) 301 || (object == first_object)) { 302 303 /* 304 * Allocate a new page for this object/offset 305 * pair. 306 */ 307 308 m = vm_page_alloc(object, offset); 309 310 if (m == VM_PAGE_NULL) { 311 UNLOCK_AND_DEALLOCATE; 312 VM_WAIT; 313 goto RetryFault; 314 } 315 } 316 317 if ((object->pager != vm_pager_null) && 318 (!change_wiring || wired)) { 319 int rv; 320 321 /* 322 * Now that we have a busy page, we can 323 * release the object lock. 324 */ 325 vm_object_unlock(object); 326 327 /* 328 * Call the pager to retrieve the data, if any, 329 * after releasing the lock on the map. 330 */ 331 UNLOCK_MAP; 332 333 rv = vm_pager_get(object->pager, m, TRUE); 334 if (rv == VM_PAGER_OK) { 335 /* 336 * Found the page. 337 * Leave it busy while we play with it. 338 */ 339 vm_object_lock(object); 340 341 /* 342 * Relookup in case pager changed page. 343 * Pager is responsible for disposition 344 * of old page if moved. 345 */ 346 m = vm_page_lookup(object, offset); 347 348 vm_stat.pageins++; 349 m->fake = FALSE; 350 pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 351 break; 352 } 353 354 /* 355 * Remove the bogus page (which does not 356 * exist at this object/offset); before 357 * doing so, we must get back our object 358 * lock to preserve our invariant. 359 * 360 * Also wake up any other thread that may want 361 * to bring in this page. 362 * 363 * If this is the top-level object, we must 364 * leave the busy page to prevent another 365 * thread from rushing past us, and inserting 366 * the page in that object at the same time 367 * that we are. 368 */ 369 370 vm_object_lock(object); 371 /* 372 * Data outside the range of the pager; an error 373 */ 374 if (rv == VM_PAGER_BAD) { 375 FREE_PAGE(m); 376 UNLOCK_AND_DEALLOCATE; 377 return(KERN_PROTECTION_FAILURE); /* XXX */ 378 } 379 if (object != first_object) { 380 FREE_PAGE(m); 381 /* 382 * XXX - we cannot just fall out at this 383 * point, m has been freed and is invalid! 384 */ 385 } 386 } 387 388 /* 389 * We get here if the object has no pager (or unwiring) 390 * or the pager doesn't have the page. 391 */ 392 if (object == first_object) 393 first_m = m; 394 395 /* 396 * Move on to the next object. Lock the next 397 * object before unlocking the current one. 398 */ 399 400 offset += object->shadow_offset; 401 next_object = object->shadow; 402 if (next_object == VM_OBJECT_NULL) { 403 /* 404 * If there's no object left, fill the page 405 * in the top object with zeros. 406 */ 407 if (object != first_object) { 408 object->paging_in_progress--; 409 vm_object_unlock(object); 410 411 object = first_object; 412 offset = first_offset; 413 m = first_m; 414 vm_object_lock(object); 415 } 416 first_m = VM_PAGE_NULL; 417 418 vm_page_zero_fill(m); 419 vm_stat.zero_fill_count++; 420 m->fake = FALSE; 421 m->absent = FALSE; 422 break; 423 } 424 else { 425 vm_object_lock(next_object); 426 if (object != first_object) 427 object->paging_in_progress--; 428 vm_object_unlock(object); 429 object = next_object; 430 object->paging_in_progress++; 431 } 432 } 433 434 if (m->absent || m->active || m->inactive || !m->busy) 435 panic("vm_fault: absent or active or inactive or not busy after main loop"); 436 437 /* 438 * PAGE HAS BEEN FOUND. 439 * [Loop invariant still holds -- the object lock 440 * is held.] 441 */ 442 443 old_m = m; /* save page that would be copied */ 444 445 /* 446 * If the page is being written, but isn't 447 * already owned by the top-level object, 448 * we have to copy it into a new page owned 449 * by the top-level object. 450 */ 451 452 if (object != first_object) { 453 /* 454 * We only really need to copy if we 455 * want to write it. 456 */ 457 458 if (fault_type & VM_PROT_WRITE) { 459 460 /* 461 * If we try to collapse first_object at this 462 * point, we may deadlock when we try to get 463 * the lock on an intermediate object (since we 464 * have the bottom object locked). We can't 465 * unlock the bottom object, because the page 466 * we found may move (by collapse) if we do. 467 * 468 * Instead, we first copy the page. Then, when 469 * we have no more use for the bottom object, 470 * we unlock it and try to collapse. 471 * 472 * Note that we copy the page even if we didn't 473 * need to... that's the breaks. 474 */ 475 476 /* 477 * We already have an empty page in 478 * first_object - use it. 479 */ 480 481 vm_page_copy(m, first_m); 482 first_m->fake = FALSE; 483 first_m->absent = FALSE; 484 485 /* 486 * If another map is truly sharing this 487 * page with us, we have to flush all 488 * uses of the original page, since we 489 * can't distinguish those which want the 490 * original from those which need the 491 * new copy. 492 */ 493 494 vm_page_lock_queues(); 495 if (!su) { 496 /* 497 * Also, once it's no longer in 498 * use by any maps, move it to 499 * the inactive queue instead. 500 */ 501 502 vm_page_deactivate(m); 503 pmap_remove_all(VM_PAGE_TO_PHYS(m)); 504 } 505 else { 506 /* 507 * Old page is only (possibly) 508 * in use by faulting map. We 509 * should do a pmap_remove on 510 * that mapping, but we know 511 * that pmap_enter will remove 512 * the old mapping before 513 * inserting the new one. 514 */ 515 vm_page_activate(m); 516 } 517 vm_page_unlock_queues(); 518 519 /* 520 * We no longer need the old page or object. 521 */ 522 PAGE_WAKEUP(m); 523 object->paging_in_progress--; 524 vm_object_unlock(object); 525 526 /* 527 * Only use the new page below... 528 */ 529 530 vm_stat.cow_faults++; 531 m = first_m; 532 object = first_object; 533 offset = first_offset; 534 535 /* 536 * Now that we've gotten the copy out of the 537 * way, let's try to collapse the top object. 538 */ 539 vm_object_lock(object); 540 /* 541 * But we have to play ugly games with 542 * paging_in_progress to do that... 543 */ 544 object->paging_in_progress--; 545 vm_object_collapse(object); 546 object->paging_in_progress++; 547 } 548 else { 549 prot &= (~VM_PROT_WRITE); 550 m->copy_on_write = TRUE; 551 } 552 } 553 554 if (m->active || m->inactive) 555 panic("vm_fault: active or inactive before copy object handling"); 556 557 /* 558 * If the page is being written, but hasn't been 559 * copied to the copy-object, we have to copy it there. 560 */ 561 RetryCopy: 562 if (first_object->copy != VM_OBJECT_NULL) { 563 vm_object_t copy_object = first_object->copy; 564 vm_offset_t copy_offset; 565 vm_page_t copy_m; 566 567 /* 568 * We only need to copy if we want to write it. 569 */ 570 if ((fault_type & VM_PROT_WRITE) == 0) { 571 prot &= ~VM_PROT_WRITE; 572 m->copy_on_write = TRUE; 573 } 574 else { 575 /* 576 * Try to get the lock on the copy_object. 577 */ 578 if (!vm_object_lock_try(copy_object)) { 579 vm_object_unlock(object); 580 /* should spin a bit here... */ 581 vm_object_lock(object); 582 goto RetryCopy; 583 } 584 585 /* 586 * Make another reference to the copy-object, 587 * to keep it from disappearing during the 588 * copy. 589 */ 590 copy_object->ref_count++; 591 592 /* 593 * Does the page exist in the copy? 594 */ 595 copy_offset = first_offset 596 - copy_object->shadow_offset; 597 copy_m = vm_page_lookup(copy_object, copy_offset); 598 if (page_exists = (copy_m != VM_PAGE_NULL)) { 599 if (copy_m->busy) { 600 #ifdef DOTHREADS 601 int wait_result; 602 603 /* 604 * If the page is being brought 605 * in, wait for it and then retry. 606 */ 607 PAGE_ASSERT_WAIT(copy_m, !change_wiring); 608 RELEASE_PAGE(m); 609 copy_object->ref_count--; 610 vm_object_unlock(copy_object); 611 UNLOCK_THINGS; 612 thread_block(); 613 wait_result = current_thread()->wait_result; 614 vm_object_deallocate(first_object); 615 if (wait_result != THREAD_AWAKENED) 616 return(KERN_SUCCESS); 617 goto RetryFault; 618 #else 619 /* 620 * If the page is being brought 621 * in, wait for it and then retry. 622 */ 623 PAGE_ASSERT_WAIT(copy_m, !change_wiring); 624 RELEASE_PAGE(m); 625 copy_object->ref_count--; 626 vm_object_unlock(copy_object); 627 UNLOCK_THINGS; 628 thread_block(); 629 vm_object_deallocate(first_object); 630 goto RetryFault; 631 #endif 632 } 633 } 634 635 /* 636 * If the page is not in memory (in the object) 637 * and the object has a pager, we have to check 638 * if the pager has the data in secondary 639 * storage. 640 */ 641 if (!page_exists) { 642 643 /* 644 * If we don't allocate a (blank) page 645 * here... another thread could try 646 * to page it in, allocate a page, and 647 * then block on the busy page in its 648 * shadow (first_object). Then we'd 649 * trip over the busy page after we 650 * found that the copy_object's pager 651 * doesn't have the page... 652 */ 653 copy_m = vm_page_alloc(copy_object, 654 copy_offset); 655 if (copy_m == VM_PAGE_NULL) { 656 /* 657 * Wait for a page, then retry. 658 */ 659 RELEASE_PAGE(m); 660 copy_object->ref_count--; 661 vm_object_unlock(copy_object); 662 UNLOCK_AND_DEALLOCATE; 663 VM_WAIT; 664 goto RetryFault; 665 } 666 667 if (copy_object->pager != vm_pager_null) { 668 vm_object_unlock(object); 669 vm_object_unlock(copy_object); 670 UNLOCK_MAP; 671 672 page_exists = vm_pager_has_page( 673 copy_object->pager, 674 (copy_offset + copy_object->paging_offset)); 675 676 vm_object_lock(copy_object); 677 678 /* 679 * Since the map is unlocked, someone 680 * else could have copied this object 681 * and put a different copy_object 682 * between the two. Or, the last 683 * reference to the copy-object (other 684 * than the one we have) may have 685 * disappeared - if that has happened, 686 * we don't need to make the copy. 687 */ 688 if (copy_object->shadow != object || 689 copy_object->ref_count == 1) { 690 /* 691 * Gaah... start over! 692 */ 693 FREE_PAGE(copy_m); 694 vm_object_unlock(copy_object); 695 vm_object_deallocate(copy_object); 696 /* may block */ 697 vm_object_lock(object); 698 goto RetryCopy; 699 } 700 vm_object_lock(object); 701 702 if (page_exists) { 703 /* 704 * We didn't need the page 705 */ 706 FREE_PAGE(copy_m); 707 } 708 } 709 } 710 if (!page_exists) { 711 /* 712 * Must copy page into copy-object. 713 */ 714 vm_page_copy(m, copy_m); 715 copy_m->fake = FALSE; 716 copy_m->absent = FALSE; 717 718 /* 719 * Things to remember: 720 * 1. The copied page must be marked 'dirty' 721 * so it will be paged out to the copy 722 * object. 723 * 2. If the old page was in use by any users 724 * of the copy-object, it must be removed 725 * from all pmaps. (We can't know which 726 * pmaps use it.) 727 */ 728 vm_page_lock_queues(); 729 pmap_remove_all(VM_PAGE_TO_PHYS(old_m)); 730 copy_m->clean = FALSE; 731 vm_page_activate(copy_m); /* XXX */ 732 vm_page_unlock_queues(); 733 734 PAGE_WAKEUP(copy_m); 735 } 736 /* 737 * The reference count on copy_object must be 738 * at least 2: one for our extra reference, 739 * and at least one from the outside world 740 * (we checked that when we last locked 741 * copy_object). 742 */ 743 copy_object->ref_count--; 744 vm_object_unlock(copy_object); 745 m->copy_on_write = FALSE; 746 } 747 } 748 749 if (m->active || m->inactive) 750 panic("vm_fault: active or inactive before retrying lookup"); 751 752 /* 753 * We must verify that the maps have not changed 754 * since our last lookup. 755 */ 756 757 if (!lookup_still_valid) { 758 vm_object_t retry_object; 759 vm_offset_t retry_offset; 760 vm_prot_t retry_prot; 761 762 /* 763 * Since map entries may be pageable, make sure we can 764 * take a page fault on them. 765 */ 766 vm_object_unlock(object); 767 768 /* 769 * To avoid trying to write_lock the map while another 770 * thread has it read_locked (in vm_map_pageable), we 771 * do not try for write permission. If the page is 772 * still writable, we will get write permission. If it 773 * is not, or has been marked needs_copy, we enter the 774 * mapping without write permission, and will merely 775 * take another fault. 776 */ 777 result = vm_map_lookup(&map, vaddr, 778 fault_type & ~VM_PROT_WRITE, &entry, 779 &retry_object, &retry_offset, &retry_prot, 780 &wired, &su); 781 782 vm_object_lock(object); 783 784 /* 785 * If we don't need the page any longer, put it on the 786 * active list (the easiest thing to do here). If no 787 * one needs it, pageout will grab it eventually. 788 */ 789 790 if (result != KERN_SUCCESS) { 791 RELEASE_PAGE(m); 792 UNLOCK_AND_DEALLOCATE; 793 return(result); 794 } 795 796 lookup_still_valid = TRUE; 797 798 if ((retry_object != first_object) || 799 (retry_offset != first_offset)) { 800 RELEASE_PAGE(m); 801 UNLOCK_AND_DEALLOCATE; 802 goto RetryFault; 803 } 804 805 /* 806 * Check whether the protection has changed or the object 807 * has been copied while we left the map unlocked. 808 * Changing from read to write permission is OK - we leave 809 * the page write-protected, and catch the write fault. 810 * Changing from write to read permission means that we 811 * can't mark the page write-enabled after all. 812 */ 813 prot &= retry_prot; 814 if (m->copy_on_write) 815 prot &= ~VM_PROT_WRITE; 816 } 817 818 /* 819 * (the various bits we're fiddling with here are locked by 820 * the object's lock) 821 */ 822 823 /* XXX This distorts the meaning of the copy_on_write bit */ 824 825 if (prot & VM_PROT_WRITE) 826 m->copy_on_write = FALSE; 827 828 /* 829 * It's critically important that a wired-down page be faulted 830 * only once in each map for which it is wired. 831 */ 832 833 if (m->active || m->inactive) 834 panic("vm_fault: active or inactive before pmap_enter"); 835 836 vm_object_unlock(object); 837 838 /* 839 * Put this page into the physical map. 840 * We had to do the unlock above because pmap_enter 841 * may cause other faults. We don't put the 842 * page back on the active queue until later so 843 * that the page-out daemon won't find us (yet). 844 */ 845 846 pmap_enter(map->pmap, vaddr, VM_PAGE_TO_PHYS(m), 847 prot & ~(m->page_lock), wired); 848 849 /* 850 * If the page is not wired down, then put it where the 851 * pageout daemon can find it. 852 */ 853 vm_object_lock(object); 854 vm_page_lock_queues(); 855 if (change_wiring) { 856 if (wired) 857 vm_page_wire(m); 858 else 859 vm_page_unwire(m); 860 } 861 else 862 vm_page_activate(m); 863 vm_page_unlock_queues(); 864 865 /* 866 * Unlock everything, and return 867 */ 868 869 PAGE_WAKEUP(m); 870 UNLOCK_AND_DEALLOCATE; 871 872 return(KERN_SUCCESS); 873 874 } 875 876 /* 877 * vm_fault_wire: 878 * 879 * Wire down a range of virtual addresses in a map. 880 */ 881 void vm_fault_wire(map, start, end) 882 vm_map_t map; 883 vm_offset_t start, end; 884 { 885 886 register vm_offset_t va; 887 register pmap_t pmap; 888 889 pmap = vm_map_pmap(map); 890 891 /* 892 * Inform the physical mapping system that the 893 * range of addresses may not fault, so that 894 * page tables and such can be locked down as well. 895 */ 896 897 pmap_pageable(pmap, start, end, FALSE); 898 899 /* 900 * We simulate a fault to get the page and enter it 901 * in the physical map. 902 */ 903 904 for (va = start; va < end; va += PAGE_SIZE) { 905 (void) vm_fault(map, va, VM_PROT_NONE, TRUE); 906 } 907 } 908 909 910 /* 911 * vm_fault_unwire: 912 * 913 * Unwire a range of virtual addresses in a map. 914 */ 915 void vm_fault_unwire(map, start, end) 916 vm_map_t map; 917 vm_offset_t start, end; 918 { 919 920 register vm_offset_t va, pa; 921 register pmap_t pmap; 922 923 pmap = vm_map_pmap(map); 924 925 /* 926 * Since the pages are wired down, we must be able to 927 * get their mappings from the physical map system. 928 */ 929 930 vm_page_lock_queues(); 931 932 for (va = start; va < end; va += PAGE_SIZE) { 933 pa = pmap_extract(pmap, va); 934 if (pa == (vm_offset_t) 0) { 935 panic("unwire: page not in pmap"); 936 } 937 pmap_change_wiring(pmap, va, FALSE); 938 vm_page_unwire(PHYS_TO_VM_PAGE(pa)); 939 } 940 vm_page_unlock_queues(); 941 942 /* 943 * Inform the physical mapping system that the range 944 * of addresses may fault, so that page tables and 945 * such may be unwired themselves. 946 */ 947 948 pmap_pageable(pmap, start, end, TRUE); 949 950 } 951 952 /* 953 * Routine: 954 * vm_fault_copy_entry 955 * Function: 956 * Copy all of the pages from a wired-down map entry to another. 957 * 958 * In/out conditions: 959 * The source and destination maps must be locked for write. 960 * The source map entry must be wired down (or be a sharing map 961 * entry corresponding to a main map entry that is wired down). 962 */ 963 964 void vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry) 965 vm_map_t dst_map; 966 vm_map_t src_map; 967 vm_map_entry_t dst_entry; 968 vm_map_entry_t src_entry; 969 { 970 971 vm_object_t dst_object; 972 vm_object_t src_object; 973 vm_offset_t dst_offset; 974 vm_offset_t src_offset; 975 vm_prot_t prot; 976 vm_offset_t vaddr; 977 vm_page_t dst_m; 978 vm_page_t src_m; 979 980 #ifdef lint 981 src_map++; 982 #endif lint 983 984 src_object = src_entry->object.vm_object; 985 src_offset = src_entry->offset; 986 987 /* 988 * Create the top-level object for the destination entry. 989 * (Doesn't actually shadow anything - we copy the pages 990 * directly.) 991 */ 992 dst_object = vm_object_allocate( 993 (vm_size_t) (dst_entry->end - dst_entry->start)); 994 995 dst_entry->object.vm_object = dst_object; 996 dst_entry->offset = 0; 997 998 prot = dst_entry->max_protection; 999 1000 /* 1001 * Loop through all of the pages in the entry's range, copying 1002 * each one from the source object (it should be there) to the 1003 * destination object. 1004 */ 1005 for (vaddr = dst_entry->start, dst_offset = 0; 1006 vaddr < dst_entry->end; 1007 vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) { 1008 1009 /* 1010 * Allocate a page in the destination object 1011 */ 1012 vm_object_lock(dst_object); 1013 do { 1014 dst_m = vm_page_alloc(dst_object, dst_offset); 1015 if (dst_m == VM_PAGE_NULL) { 1016 vm_object_unlock(dst_object); 1017 VM_WAIT; 1018 vm_object_lock(dst_object); 1019 } 1020 } while (dst_m == VM_PAGE_NULL); 1021 1022 /* 1023 * Find the page in the source object, and copy it in. 1024 * (Because the source is wired down, the page will be 1025 * in memory.) 1026 */ 1027 vm_object_lock(src_object); 1028 src_m = vm_page_lookup(src_object, dst_offset + src_offset); 1029 if (src_m == VM_PAGE_NULL) 1030 panic("vm_fault_copy_wired: page missing"); 1031 1032 vm_page_copy(src_m, dst_m); 1033 1034 /* 1035 * Enter it in the pmap... 1036 */ 1037 vm_object_unlock(src_object); 1038 vm_object_unlock(dst_object); 1039 1040 pmap_enter(dst_map->pmap, vaddr, VM_PAGE_TO_PHYS(dst_m), 1041 prot, FALSE); 1042 1043 /* 1044 * Mark it no longer busy, and put it on the active list. 1045 */ 1046 vm_object_lock(dst_object); 1047 vm_page_lock_queues(); 1048 vm_page_activate(dst_m); 1049 vm_page_unlock_queues(); 1050 PAGE_WAKEUP(dst_m); 1051 vm_object_unlock(dst_object); 1052 } 1053 1054 } 1055