Lines Matching defs:fs
176 static void vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr,
178 static void vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra,
243 vm_fault_unlock_map(struct faultstate *fs)
246 if (fs->lookup_still_valid) {
247 vm_map_lookup_done(fs->map, fs->entry);
248 fs->lookup_still_valid = false;
253 vm_fault_unlock_vp(struct faultstate *fs)
256 if (fs->vp != NULL) {
257 vput(fs->vp);
258 fs->vp = NULL;
263 vm_fault_deallocate(struct faultstate *fs)
266 vm_fault_page_release(&fs->m_cow);
267 vm_fault_page_release(&fs->m);
268 vm_object_pip_wakeup(fs->object);
269 if (fs->object != fs->first_object) {
270 VM_OBJECT_WLOCK(fs->first_object);
271 vm_fault_page_free(&fs->first_m);
272 VM_OBJECT_WUNLOCK(fs->first_object);
273 vm_object_pip_wakeup(fs->first_object);
275 vm_object_deallocate(fs->first_object);
276 vm_fault_unlock_map(fs);
277 vm_fault_unlock_vp(fs);
281 vm_fault_unlock_and_deallocate(struct faultstate *fs)
284 VM_OBJECT_UNLOCK(fs->object);
285 vm_fault_deallocate(fs);
289 vm_fault_dirty(struct faultstate *fs, vm_page_t m)
293 if (((fs->prot & VM_PROT_WRITE) == 0 &&
294 (fs->fault_flags & VM_FAULT_DIRTY) == 0) ||
300 need_dirty = ((fs->fault_type & VM_PROT_WRITE) != 0 &&
301 (fs->fault_flags & VM_FAULT_WIRE) == 0) ||
302 (fs->fault_flags & VM_FAULT_DIRTY) != 0;
323 if ((fs->entry->eflags & MAP_ENTRY_NOSYNC) != 0)
332 * Unlocks fs.first_object and fs.map on success.
335 vm_fault_soft_fast(struct faultstate *fs)
345 MPASS(fs->vp == NULL);
354 m = vm_page_lookup_unlocked(fs->first_object, fs->first_pindex);
356 ((fs->prot & VM_PROT_WRITE) != 0 && vm_page_busied(m))) {
357 VM_OBJECT_WLOCK(fs->first_object);
361 vaddr = fs->vaddr;
363 VM_OBJECT_RLOCK(fs->first_object);
370 if (m->object != fs->first_object || m->pindex != fs->first_pindex)
373 vm_object_busy(fs->first_object);
376 ((fs->prot & VM_PROT_WRITE) != 0 && vm_page_busied(m)))
388 if ((fs->prot & VM_PROT_WRITE) != 0) {
396 if ((fs->first_object->flags & OBJ_UNMANAGED) == 0)
399 while (rounddown2(vaddr, pagesizes[psind]) < fs->entry->start ||
400 roundup2(vaddr + 1, pagesizes[psind]) > fs->entry->end ||
404 !pmap_ps_enabled(fs->map->pmap)) {
419 fs->fault_type |= VM_PROT_WRITE;
423 if (pmap_enter(fs->map->pmap, vaddr, m_map, fs->prot, fs->fault_type |
424 PMAP_ENTER_NOSLEEP | (fs->wired ? PMAP_ENTER_WIRED : 0), psind) !=
427 if (fs->m_hold != NULL) {
428 (*fs->m_hold) = m;
431 if (psind == 0 && !fs->wired)
432 vm_fault_prefault(fs, vaddr, PFBAK, PFFOR, true);
433 VM_OBJECT_RUNLOCK(fs->first_object);
434 vm_fault_dirty(fs, m);
435 vm_object_unbusy(fs->first_object);
436 vm_map_lookup_done(fs->map, fs->entry);
440 vm_object_unbusy(fs->first_object);
442 if (!VM_OBJECT_TRYUPGRADE(fs->first_object)) {
443 VM_OBJECT_RUNLOCK(fs->first_object);
444 VM_OBJECT_WLOCK(fs->first_object);
450 vm_fault_restore_map_lock(struct faultstate *fs)
453 VM_OBJECT_ASSERT_WLOCKED(fs->first_object);
454 MPASS(blockcount_read(&fs->first_object->paging_in_progress) > 0);
456 if (!vm_map_trylock_read(fs->map)) {
457 VM_OBJECT_WUNLOCK(fs->first_object);
458 vm_map_lock_read(fs->map);
459 VM_OBJECT_WLOCK(fs->first_object);
461 fs->lookup_still_valid = true;
498 vm_fault_populate(struct faultstate *fs)
506 MPASS(fs->object == fs->first_object);
507 VM_OBJECT_ASSERT_WLOCKED(fs->first_object);
508 MPASS(blockcount_read(&fs->first_object->paging_in_progress) > 0);
509 MPASS(fs->first_object->backing_object == NULL);
510 MPASS(fs->lookup_still_valid);
512 pager_first = OFF_TO_IDX(fs->entry->offset);
513 pager_last = pager_first + atop(fs->entry->end - fs->entry->start) - 1;
514 vm_fault_unlock_map(fs);
515 vm_fault_unlock_vp(fs);
527 rv = vm_pager_populate(fs->first_object, fs->first_pindex,
528 fs->fault_type, fs->entry->max_protection, &pager_first,
531 VM_OBJECT_ASSERT_WLOCKED(fs->first_object);
537 vm_fault_restore_map_lock(fs);
538 if (fs->map->timestamp != fs->map_generation)
547 MPASS(fs->first_pindex <= pager_last);
548 MPASS(fs->first_pindex >= pager_first);
549 MPASS(pager_last < fs->first_object->size);
551 vm_fault_restore_map_lock(fs);
552 bdry_idx = MAP_ENTRY_SPLIT_BOUNDARY_INDEX(fs->entry);
553 if (fs->map->timestamp != fs->map_generation) {
555 vm_fault_populate_cleanup(fs->first_object, pager_first,
558 m = vm_page_lookup(fs->first_object, pager_first);
559 if (m != fs->m)
574 m = vm_page_lookup(fs->first_object, pager_first);
576 VM_OBJECT_WUNLOCK(fs->first_object);
577 vaddr = fs->entry->start + IDX_TO_OFF(pager_first) -
578 fs->entry->offset;
582 (uintmax_t)fs->entry->start, (uintmax_t)pager_first,
583 (uintmax_t)fs->entry->offset, (uintmax_t)vaddr));
587 rv = pmap_enter(fs->map->pmap, vaddr, m, fs->prot,
588 fs->fault_type | (fs->wired ? PMAP_ENTER_WIRED : 0) |
590 VM_OBJECT_WLOCK(fs->first_object);
596 if ((fs->fault_flags & VM_FAULT_WIRE) != 0) {
600 if (fs->m_hold != NULL) {
601 *fs->m_hold = m + (fs->first_pindex - pager_first);
602 vm_page_wire(*fs->m_hold);
614 map_first = OFF_TO_IDX(fs->entry->offset);
616 vm_fault_populate_cleanup(fs->first_object, pager_first,
620 map_last = map_first + atop(fs->entry->end - fs->entry->start) - 1;
622 vm_fault_populate_cleanup(fs->first_object, map_last + 1,
626 for (pidx = pager_first, m = vm_page_lookup(fs->first_object, pidx);
629 vaddr = fs->entry->start + IDX_TO_OFF(pidx) - fs->entry->offset;
635 !pmap_ps_enabled(fs->map->pmap)))
641 vm_fault_dirty(fs, &m[i]);
643 VM_OBJECT_WUNLOCK(fs->first_object);
644 rv = pmap_enter(fs->map->pmap, vaddr, m, fs->prot, fs->fault_type |
645 (fs->wired ? PMAP_ENTER_WIRED : 0), psind);
658 MPASS(!fs->wired);
660 rv = pmap_enter(fs->map->pmap, vaddr + ptoa(i),
661 &m[i], fs->prot, fs->fault_type, 0);
666 VM_OBJECT_WLOCK(fs->first_object);
668 if ((fs->fault_flags & VM_FAULT_WIRE) != 0 &&
669 m[i].pindex == fs->first_pindex)
673 if (fs->m_hold != NULL &&
674 m[i].pindex == fs->first_pindex) {
675 (*fs->m_hold) = &m[i];
787 vm_fault_object_ensure_wlocked(struct faultstate *fs)
789 if (fs->object == fs->first_object)
790 VM_OBJECT_ASSERT_WLOCKED(fs->object);
792 if (!fs->can_read_lock) {
793 VM_OBJECT_ASSERT_WLOCKED(fs->object);
797 if (VM_OBJECT_WOWNED(fs->object))
800 if (VM_OBJECT_TRYUPGRADE(fs->object))
807 vm_fault_lock_vnode(struct faultstate *fs, bool objlocked)
812 if (fs->object->type != OBJT_VNODE)
814 vp = fs->object->handle;
815 if (vp == fs->vp) {
824 vm_fault_unlock_vp(fs);
838 fs->vp = vp;
844 vm_fault_unlock_and_deallocate(fs);
846 vm_fault_deallocate(fs);
849 fs->vp = vp;
860 vm_fault_readahead(struct faultstate *fs)
865 KASSERT(fs->lookup_still_valid, ("map unlocked"));
866 era = fs->entry->read_ahead;
867 behavior = vm_map_entry_behavior(fs->entry);
872 if (fs->vaddr == fs->entry->next_read)
873 vm_fault_dontneed(fs, fs->vaddr, nera);
874 } else if (fs->vaddr == fs->entry->next_read) {
889 vm_fault_dontneed(fs, fs->vaddr, nera);
901 fs->entry->read_ahead = nera;
908 vm_fault_lookup(struct faultstate *fs)
912 KASSERT(!fs->lookup_still_valid,
914 result = vm_map_lookup(&fs->map, fs->vaddr, fs->fault_type |
915 VM_PROT_FAULT_LOOKUP, &fs->entry, &fs->first_object,
916 &fs->first_pindex, &fs->prot, &fs->wired);
918 vm_fault_unlock_vp(fs);
922 fs->map_generation = fs->map->timestamp;
924 if (fs->entry->eflags & MAP_ENTRY_NOFAULT) {
926 __func__, (u_long)fs->vaddr);
929 if (fs->entry->eflags & MAP_ENTRY_IN_TRANSITION &&
930 fs->entry->wiring_thread != curthread) {
931 vm_map_unlock_read(fs->map);
932 vm_map_lock(fs->map);
933 if (vm_map_lookup_entry(fs->map, fs->vaddr, &fs->entry) &&
934 (fs->entry->eflags & MAP_ENTRY_IN_TRANSITION)) {
935 vm_fault_unlock_vp(fs);
936 fs->entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
937 vm_map_unlock_and_wait(fs->map, 0);
939 vm_map_unlock(fs->map);
943 MPASS((fs->entry->eflags & MAP_ENTRY_GUARD) == 0);
945 if (fs->wired)
946 fs->fault_type = fs->prot | (fs->fault_type & VM_PROT_COPY);
948 KASSERT((fs->fault_flags & VM_FAULT_WIRE) == 0,
949 ("!fs->wired && VM_FAULT_WIRE"));
950 fs->lookup_still_valid = true;
956 vm_fault_relookup(struct faultstate *fs)
963 if (!vm_map_trylock_read(fs->map))
966 fs->lookup_still_valid = true;
967 if (fs->map->timestamp == fs->map_generation)
970 result = vm_map_lookup_locked(&fs->map, fs->vaddr, fs->fault_type,
971 &fs->entry, &retry_object, &retry_pindex, &retry_prot,
972 &fs->wired);
982 if (retry_object != fs->first_object ||
983 retry_pindex != fs->first_pindex)
994 fs->prot &= retry_prot;
995 fs->fault_type &= retry_prot;
996 if (fs->prot == 0)
1000 KASSERT(fs->wired || (fs->fault_flags & VM_FAULT_WIRE) == 0,
1007 vm_fault_cow(struct faultstate *fs)
1011 KASSERT(fs->object != fs->first_object,
1027 fs->object->shadow_count == 1 && fs->object->ref_count == 1 &&
1031 fs->object->handle == NULL && (fs->object->flags & OBJ_ANON) != 0 &&
1035 (is_first_object_locked = VM_OBJECT_TRYWLOCK(fs->first_object)) &&
1036 fs->object == fs->first_object->backing_object &&
1037 VM_OBJECT_TRYWLOCK(fs->object)) {
1039 * Remove but keep xbusy for replace. fs->m is moved into
1040 * fs->first_object and left busy while fs->first_m is
1043 vm_page_remove_xbusy(fs->m);
1044 vm_page_replace(fs->m, fs->first_object, fs->first_pindex,
1045 fs->first_m);
1046 vm_page_dirty(fs->m);
1051 vm_reserv_rename(fs->m, fs->first_object, fs->object,
1052 OFF_TO_IDX(fs->first_object->backing_object_offset));
1054 VM_OBJECT_WUNLOCK(fs->object);
1055 VM_OBJECT_WUNLOCK(fs->first_object);
1056 fs->first_m = fs->m;
1057 fs->m = NULL;
1061 VM_OBJECT_WUNLOCK(fs->first_object);
1065 pmap_copy_page(fs->m, fs->first_m);
1066 vm_page_valid(fs->first_m);
1067 if (fs->wired && (fs->fault_flags & VM_FAULT_WIRE) == 0) {
1068 vm_page_wire(fs->first_m);
1069 vm_page_unwire(fs->m, PQ_INACTIVE);
1075 fs->m_cow = fs->m;
1076 fs->m = NULL;
1095 vm_page_assert_xbusied(fs->m_cow);
1096 if ((fs->first_object->flags & OBJ_ONEMAPPING) == 0)
1097 pmap_remove_all(fs->m_cow);
1100 vm_object_pip_wakeup(fs->object);
1105 fs->object = fs->first_object;
1106 fs->pindex = fs->first_pindex;
1107 fs->m = fs->first_m;
1113 vm_fault_next(struct faultstate *fs)
1117 if (fs->object == fs->first_object || !fs->can_read_lock)
1118 VM_OBJECT_ASSERT_WLOCKED(fs->object);
1120 VM_OBJECT_ASSERT_LOCKED(fs->object);
1132 if (fs->object == fs->first_object) {
1133 fs->first_m = fs->m;
1134 fs->m = NULL;
1135 } else if (fs->m != NULL) {
1136 if (!vm_fault_object_ensure_wlocked(fs)) {
1137 fs->can_read_lock = false;
1138 vm_fault_unlock_and_deallocate(fs);
1141 vm_fault_page_free(&fs->m);
1148 next_object = fs->object->backing_object;
1151 MPASS(fs->first_m != NULL);
1152 KASSERT(fs->object != next_object, ("object loop %p", next_object));
1153 if (fs->can_read_lock)
1158 if (fs->object != fs->first_object)
1159 vm_object_pip_wakeup(fs->object);
1160 fs->pindex += OFF_TO_IDX(fs->object->backing_object_offset);
1161 VM_OBJECT_UNLOCK(fs->object);
1162 fs->object = next_object;
1168 vm_fault_zerofill(struct faultstate *fs)
1175 if (fs->object != fs->first_object) {
1176 vm_object_pip_wakeup(fs->object);
1177 fs->object = fs->first_object;
1178 fs->pindex = fs->first_pindex;
1180 MPASS(fs->first_m != NULL);
1181 MPASS(fs->m == NULL);
1182 fs->m = fs->first_m;
1183 fs->first_m = NULL;
1188 if ((fs->m->flags & PG_ZERO) == 0) {
1189 pmap_zero_page(fs->m);
1194 vm_page_valid(fs->m);
1202 vm_fault_allocate_oom(struct faultstate *fs)
1206 vm_fault_unlock_and_deallocate(fs);
1209 if (!fs->oom_started) {
1210 fs->oom_started = true;
1211 getmicrotime(&fs->oom_start_time);
1216 timevalsub(&now, &fs->oom_start_time);
1225 fs->oom_started = false;
1233 vm_fault_allocate(struct faultstate *fs)
1238 if ((fs->object->flags & OBJ_SIZEVNLOCK) != 0) {
1239 res = vm_fault_lock_vnode(fs, true);
1245 if (fs->pindex >= fs->object->size) {
1246 vm_fault_unlock_and_deallocate(fs);
1250 if (fs->object == fs->first_object &&
1251 (fs->first_object->flags & OBJ_POPULATE) != 0 &&
1252 fs->first_object->shadow_count == 0) {
1253 res = vm_fault_populate(fs);
1258 vm_fault_unlock_and_deallocate(fs);
1283 dset = fs->object->domain.dr_policy;
1288 vm_object_color(fs->object, atop(fs->vaddr) - fs->pindex);
1290 if (!vm_pager_can_alloc_page(fs->object, fs->pindex)) {
1291 vm_fault_unlock_and_deallocate(fs);
1294 fs->m = vm_page_alloc(fs->object, fs->pindex,
1297 if (fs->m == NULL) {
1298 if (vm_fault_allocate_oom(fs))
1302 fs->oom_started = false;
1313 vm_fault_getpages(struct faultstate *fs, int *behindp, int *aheadp)
1328 e_start = fs->entry->start;
1329 e_end = fs->entry->end;
1330 behavior = vm_map_entry_behavior(fs->entry);
1343 if (fs->nera == -1 && !P_KILLED(curproc))
1344 fs->nera = vm_fault_readahead(fs);
1352 vm_fault_unlock_map(fs);
1354 status = vm_fault_lock_vnode(fs, false);
1358 KASSERT(fs->vp == NULL || !vm_map_is_system(fs->map),
1365 if (fs->nera == -1 || behavior == MAP_ENTRY_BEHAV_RANDOM ||
1371 if (fs->nera > 0) {
1373 ahead = fs->nera;
1385 cluster_offset = fs->pindex % VM_FAULT_READ_DEFAULT;
1387 atop(fs->vaddr - e_start));
1390 ahead = ulmin(ahead, atop(e_end - fs->vaddr) - 1);
1394 rv = vm_pager_get_pages(fs->object, &fs->m, 1, behindp, aheadp);
1406 VM_OBJECT_WLOCK(fs->object);
1407 vm_fault_page_free(&fs->m);
1408 vm_fault_unlock_and_deallocate(fs);
1430 vm_fault_busy_sleep(struct faultstate *fs)
1437 vm_page_aflag_set(fs->m, PGA_REFERENCED);
1438 if (fs->object != fs->first_object) {
1439 vm_fault_page_release(&fs->first_m);
1440 vm_object_pip_wakeup(fs->first_object);
1442 vm_object_pip_wakeup(fs->object);
1443 vm_fault_unlock_map(fs);
1444 if (fs->m != vm_page_lookup(fs->object, fs->pindex) ||
1445 !vm_page_busy_sleep(fs->m, "vmpfw", 0))
1446 VM_OBJECT_UNLOCK(fs->object);
1448 vm_object_deallocate(fs->first_object);
1460 vm_fault_object(struct faultstate *fs, int *behindp, int *aheadp)
1465 if (fs->object == fs->first_object || !fs->can_read_lock)
1466 VM_OBJECT_ASSERT_WLOCKED(fs->object);
1468 VM_OBJECT_ASSERT_LOCKED(fs->object);
1475 if ((fs->object->flags & OBJ_DEAD) != 0) {
1476 dead = fs->object->type == OBJT_DEAD;
1477 vm_fault_unlock_and_deallocate(fs);
1487 fs->m = vm_page_lookup(fs->object, fs->pindex);
1488 if (fs->m != NULL) {
1489 if (!vm_page_tryxbusy(fs->m)) {
1490 vm_fault_busy_sleep(fs);
1499 if (vm_page_all_valid(fs->m)) {
1500 VM_OBJECT_UNLOCK(fs->object);
1510 if (fs->m == NULL && (vm_fault_object_needs_getpages(fs->object) ||
1511 fs->object == fs->first_object)) {
1512 if (!vm_fault_object_ensure_wlocked(fs)) {
1513 fs->can_read_lock = false;
1514 vm_fault_unlock_and_deallocate(fs);
1517 res = vm_fault_allocate(fs);
1527 if (vm_fault_object_needs_getpages(fs->object)) {
1538 VM_OBJECT_UNLOCK(fs->object);
1539 res = vm_fault_getpages(fs, behindp, aheadp);
1541 VM_OBJECT_WLOCK(fs->object);
1552 struct faultstate fs;
1563 fs.vp = NULL;
1564 fs.vaddr = vaddr;
1565 fs.m_hold = m_hold;
1566 fs.fault_flags = fault_flags;
1567 fs.map = map;
1568 fs.lookup_still_valid = false;
1569 fs.oom_started = false;
1570 fs.nera = -1;
1571 fs.can_read_lock = true;
1576 fs.fault_type = fault_type;
1582 rv = vm_fault_lookup(&fs);
1596 if (fs.vp == NULL /* avoid locked vnode leak */ &&
1597 (fs.entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) == 0 &&
1598 (fs.fault_flags & (VM_FAULT_WIRE | VM_FAULT_DIRTY)) == 0) {
1599 res = vm_fault_soft_fast(&fs);
1601 VM_OBJECT_ASSERT_UNLOCKED(fs.first_object);
1604 VM_OBJECT_ASSERT_WLOCKED(fs.first_object);
1606 VM_OBJECT_WLOCK(fs.first_object);
1618 vm_object_reference_locked(fs.first_object);
1619 vm_object_pip_add(fs.first_object, 1);
1621 fs.m_cow = fs.m = fs.first_m = NULL;
1626 fs.object = fs.first_object;
1627 fs.pindex = fs.first_pindex;
1629 if ((fs.entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) != 0) {
1630 res = vm_fault_allocate(&fs);
1648 KASSERT(fs.m == NULL,
1649 ("page still set %p at loop start", fs.m));
1651 res = vm_fault_object(&fs, &behind, &ahead);
1680 res_next = vm_fault_next(&fs);
1686 if ((fs.fault_flags & VM_FAULT_NOFILL) != 0) {
1687 if (fs.first_object == fs.object)
1688 vm_fault_page_free(&fs.first_m);
1689 vm_fault_unlock_and_deallocate(&fs);
1692 VM_OBJECT_UNLOCK(fs.object);
1693 vm_fault_zerofill(&fs);
1704 vm_page_assert_xbusied(fs.m);
1705 VM_OBJECT_ASSERT_UNLOCKED(fs.object);
1712 if (fs.object != fs.first_object) {
1716 if ((fs.fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) {
1717 vm_fault_cow(&fs);
1728 fs.prot &= ~VM_PROT_WRITE;
1736 if (!fs.lookup_still_valid) {
1737 rv = vm_fault_relookup(&fs);
1739 vm_fault_deallocate(&fs);
1745 VM_OBJECT_ASSERT_UNLOCKED(fs.object);
1754 fs.entry->next_read = vaddr + ptoa(ahead) + PAGE_SIZE;
1760 vm_page_assert_xbusied(fs.m);
1761 KASSERT(vm_page_all_valid(fs.m),
1762 ("vm_fault: page %p partially invalid", fs.m));
1764 vm_fault_dirty(&fs, fs.m);
1772 pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot,
1773 fs.fault_type | (fs.wired ? PMAP_ENTER_WIRED : 0), 0);
1774 if (faultcount != 1 && (fs.fault_flags & VM_FAULT_WIRE) == 0 &&
1775 fs.wired == 0)
1776 vm_fault_prefault(&fs, vaddr,
1784 if ((fs.fault_flags & VM_FAULT_WIRE) != 0)
1785 vm_page_wire(fs.m);
1787 vm_page_activate(fs.m);
1788 if (fs.m_hold != NULL) {
1789 (*fs.m_hold) = fs.m;
1790 vm_page_wire(fs.m);
1792 vm_page_xunbusy(fs.m);
1793 fs.m = NULL;
1798 vm_fault_deallocate(&fs);
1803 if (racct_enable && fs.object->type == OBJT_VNODE) {
1805 if ((fs.fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) {
1831 * When "fs->first_object" is a shadow object, the pages in the backing object
1836 vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, int ahead)
1845 VM_OBJECT_ASSERT_UNLOCKED(fs->object);
1846 first_object = fs->first_object;
1855 (entry = fs->entry)->start < end) {
1860 pmap_advise(fs->map->pmap, start, end, MADV_DONTNEED);
1900 vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra,
1912 pmap = fs->map->pmap;
1916 entry = fs->entry;
1931 if ((fs->prot & VM_PROT_WRITE) != 0)