Lines Matching +full:use +full:- +full:guard +full:- +full:pages

1 /*-
2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
8 * The Mach Operating System project at Carnegie-Mellon University.
10 * Redistribution and use in source and binary forms, with or without
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * Permission to use, copy, modify and distribute this software and
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55 * Pittsburgh PA 15213-3890
106 * Maps consist of an ordered doubly-linked list of simple
107 * entries; a self-adjusting binary search tree of these
120 * another, and then marking both regions as copy-on-write.
148 #define ENTRY_CHARGED(e) ((e)->cred != NULL || \
149 ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \
150 !((e)->eflags & MAP_ENTRY_NEEDS_COPY)))
160 * VM_MAP_RANGE_CHECK: [ internal use only ]
237 * The worst-case upper bound on the number of kernel map entries that may be
261 * Disable the use of per-CPU buckets: map entry allocation is
268 /* Reserve an extra map entry for use when replenishing the reserve. */
293 map = &vm->vm_map;
296 sx_init(&map->lock, "vm map (user)");
308 KASSERT(vm->vm_map.nentries == 0,
309 ("vmspace %p nentries == %d on free", vm, vm->vm_map.nentries));
310 KASSERT(vm->vm_map.size == 0,
311 ("vmspace %p size == %ju on free", vm, (uintmax_t)vm->vm_map.size));
325 KASSERT(vm->vm_map.pmap == NULL, ("vm_map.pmap must be NULL"));
331 _vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max);
332 refcount_init(&vm->vm_refcnt, 1);
333 vm->vm_shm = NULL;
334 vm->vm_swrss = 0;
335 vm->vm_tsize = 0;
336 vm->vm_dsize = 0;
337 vm->vm_ssize = 0;
338 vm->vm_taddr = 0;
339 vm->vm_daddr = 0;
340 vm->vm_maxsaddr = 0;
373 * Delete all of the mappings and pages they hold, then call
376 (void)vm_map_remove(&vm->vm_map, vm_map_min(&vm->vm_map),
377 vm_map_max(&vm->vm_map));
380 vm->vm_map.pmap = NULL;
391 if (refcount_release(&vm->vm_refcnt))
401 vm = p->p_vmspace;
402 p->p_vmspace = NULL;
415 p = td->td_proc;
416 vm = p->p_vmspace;
426 if (!(released = refcount_release_if_last(&vm->vm_refcnt))) {
427 if (p->p_vmspace != &vmspace0) {
429 p->p_vmspace = &vmspace0;
433 released = refcount_release(&vm->vm_refcnt);
440 if (p->p_vmspace != vm) {
442 p->p_vmspace = vm;
448 p->p_vmspace = &vmspace0;
467 vm = p->p_vmspace;
468 if (vm == NULL || !refcount_acquire_if_not_zero(&vm->vm_refcnt)) {
472 if (vm != p->p_vmspace) {
491 * a result, the 'newvm' vmspace always has a non-zero reference
503 KASSERT(refcount_load(&newvm->vm_refcnt) > 0,
506 oldvm = curproc->p_vmspace;
513 curproc->p_vmspace = newvm;
514 refcount_acquire(&newvm->vm_refcnt);
527 mtx_lock_flags_(&map->system_mtx, 0, file, line);
529 sx_xlock_(&map->lock, file, line);
530 map->timestamp++;
540 if ((entry->eflags & MAP_ENTRY_VN_EXEC) == 0)
542 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
544 object = entry->object.vm_object;
546 if ((object->flags & OBJ_ANON) != 0)
547 object = object->handle;
549 KASSERT(object->backing_object == NULL,
550 ("non-anon object %p shadows", object));
552 entry, entry->object.vm_object));
574 * Use a different name for this vm_map_entry field when it's use
575 * is not consistent with its use as part of an ordered search tree.
587 entry = td->td_map_def_user;
588 td->td_map_def_user = NULL;
590 next = entry->defer_next;
591 MPASS((entry->eflags & (MAP_ENTRY_WRITECNT |
594 if ((entry->eflags & MAP_ENTRY_WRITECNT) != 0) {
599 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
601 object = entry->object.vm_object;
603 vm_pager_release_writecount(object, entry->start,
604 entry->end);
618 mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
620 sx_assert_(&map->lock, SA_XLOCKED, file, line);
641 if (map->nupdates > map->nentries) { \
643 map->nupdates = 0; \
662 if (map == kernel_map && (map->flags & MAP_REPLENISH) != 0) {
664 map->flags &= ~MAP_REPLENISH;
667 mtx_unlock_flags_(&map->system_mtx, 0, file, line);
669 sx_xunlock_(&map->lock, file, line);
679 mtx_lock_flags_(&map->system_mtx, 0, file, line);
681 sx_slock_(&map->lock, file, line);
689 KASSERT((map->flags & MAP_REPLENISH) == 0,
691 mtx_unlock_flags_(&map->system_mtx, 0, file, line);
693 sx_sunlock_(&map->lock, file, line);
704 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
705 !sx_try_xlock_(&map->lock, file, line);
707 map->timestamp++;
717 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
718 !sx_try_slock_(&map->lock, file, line);
723 * _vm_map_lock_upgrade: [ internal use only ]
727 * non-zero value if the upgrade fails. If the upgrade fails, the map is
738 mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
740 if (!sx_try_upgrade_(&map->lock, file, line)) {
741 last_timestamp = map->timestamp;
742 sx_sunlock_(&map->lock, file, line);
748 sx_xlock_(&map->lock, file, line);
749 if (last_timestamp != map->timestamp) {
750 sx_xunlock_(&map->lock, file, line);
755 map->timestamp++;
764 KASSERT((map->flags & MAP_REPLENISH) == 0,
766 mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
769 sx_downgrade_(&map->lock, file, line);
776 * Returns a non-zero value if the caller holds a write (exclusive) lock
784 return (mtx_owned(&map->system_mtx));
785 return (sx_xlocked(&map->lock));
809 KASSERT((map->flags & MAP_REPLENISH) == 0,
811 mtx_unlock_flags_(&map->system_mtx, 0, file, line);
813 sx_xunlock_(&map->lock, file, line);
815 return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps",
836 wakeup(&map->root);
844 map->busy++;
852 KASSERT(map->busy, ("vm_map_unbusy: not busy"));
853 if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) {
855 wakeup(&map->busy);
864 while (map->busy) {
867 msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0);
869 sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0);
871 map->timestamp++;
888 map->header.eflags = MAP_ENTRY_HEADER;
889 map->pmap = pmap;
890 map->header.end = min;
891 map->header.start = max;
892 map->flags = 0;
893 map->header.left = map->header.right = &map->header;
894 map->root = NULL;
895 map->timestamp = 0;
896 map->busy = 0;
897 map->anon_loc = 0;
899 map->nupdates = 0;
907 sx_init(&map->lock, "vm map (user)");
915 mtx_init(&map->system_mtx, "vm map (system)", NULL, MTX_DEF |
920 * vm_map_entry_dispose: [ internal use only ]
931 * vm_map_entry_create: [ internal use only ]
957 kernel_map->flags |= MAP_REPLENISH;
980 entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
995 return (root->left != left_ancestor ?
996 root->left->max_free : root->start - left_ancestor->end);
1003 return (root->right != right_ancestor ?
1004 root->right->max_free : right_ancestor->start - root->end);
1019 prior = entry->left;
1020 if (prior->right->start < entry->start) {
1022 prior = prior->right;
1023 while (prior->right != entry);
1040 * Infer root->right->max_free == root->max_free when \
1041 * y->max_free < root->max_free || root->max_free == 0. \
1044 y = root->left; \
1045 max_free = root->max_free; \
1050 if (max_free - 1 < vm_map_entry_max_free_left(root, llist)) \
1054 z = y->right; \
1056 root->left = z; \
1057 y->right = root; \
1058 if (max_free < y->max_free) \
1059 root->max_free = max_free = \
1060 vm_size_max(max_free, z->max_free); \
1061 } else if (max_free < y->max_free) \
1062 root->max_free = max_free = \
1063 vm_size_max(max_free, root->start - y->end);\
1065 y = root->left; \
1067 /* Copy right->max_free. Put root on rlist. */ \
1068 root->max_free = max_free; \
1071 root->left = rlist; \
1081 * Infer root->left->max_free == root->max_free when \
1082 * y->max_free < root->max_free || root->max_free == 0. \
1085 y = root->right; \
1086 max_free = root->max_free; \
1091 if (max_free - 1 < vm_map_entry_max_free_right(root, rlist)) \
1095 z = y->left; \
1097 root->right = z; \
1098 y->left = root; \
1099 if (max_free < y->max_free) \
1100 root->max_free = max_free = \
1101 vm_size_max(max_free, z->max_free); \
1102 } else if (max_free < y->max_free) \
1103 root->max_free = max_free = \
1104 vm_size_max(max_free, y->start - root->end);\
1106 y = root->right; \
1108 /* Copy left->max_free. Put root on llist. */ \
1109 root->max_free = max_free; \
1112 root->right = llist; \
1120 * subtrees with root->max_free < length as empty trees. llist and rlist are
1121 * the two sides in reverse order (bottom-up), with llist linked by the right
1123 * lists terminated by &map->header. This function, and the subsequent call to
1125 * values in &map->header.
1133 left = right = &map->header;
1134 root = map->root;
1135 while (root != NULL && root->max_free >= length) {
1136 KASSERT(left->end <= root->start &&
1137 root->end <= right->start,
1139 if (addr < root->start) {
1141 y->max_free >= length && addr < y->start);
1142 } else if (addr >= root->end) {
1144 y->max_free >= length && addr >= y->end);
1159 hi = root->right == right ? NULL : root->right;
1174 lo = root->left == left ? NULL : root->left;
1204 * llist->max_free and max_free. Update with the
1207 llist->max_free = max_free =
1208 vm_size_max(llist->max_free, max_free);
1209 vm_map_entry_swap(&llist->right, &tail);
1212 root->left = tail;
1225 max_free = root->start - llist->end;
1230 root->left = header;
1231 header->right = root;
1248 root->left == llist ? root : root->left,
1261 * rlist->max_free and max_free. Update with the
1264 rlist->max_free = max_free =
1265 vm_size_max(rlist->max_free, max_free);
1266 vm_map_entry_swap(&rlist->left, &tail);
1269 root->right = tail;
1282 max_free = rlist->start - root->end;
1287 root->right = header;
1288 header->left = root;
1305 root->right == rlist ? root : root->right,
1314 * The Sleator and Tarjan top-down splay algorithm with the
1315 * following variation. Max_free must be computed bottom-up, so
1342 header = &map->header;
1353 llist = root->right;
1362 rlist = root->left;
1369 root->max_free = vm_size_max(max_free_left, max_free_right);
1370 map->root = root;
1392 map->nentries, entry);
1394 map->nentries++;
1395 header = &map->header;
1396 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist);
1404 } else if (entry->start == root->start) {
1411 KASSERT(entry->end < root->end,
1414 if ((root->eflags & MAP_ENTRY_STACK_GAP) == 0)
1415 root->offset += entry->end - root->start;
1416 root->start = entry->end;
1418 max_free_right = root->max_free = vm_size_max(
1428 KASSERT(entry->end == root->end,
1431 if ((entry->eflags & MAP_ENTRY_STACK_GAP) == 0)
1432 entry->offset += entry->start - root->start;
1433 root->end = entry->start;
1434 max_free_left = root->max_free = vm_size_max(
1439 entry->max_free = vm_size_max(max_free_left, max_free_right);
1440 map->root = entry;
1457 header = &map->header;
1458 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist);
1465 rlist->start = root->start;
1466 MPASS((rlist->eflags & MAP_ENTRY_STACK_GAP) == 0);
1467 rlist->offset = root->offset;
1471 llist = root->right;
1476 rlist = root->left;
1480 header->left = header->right = header;
1484 root->max_free = vm_size_max(max_free_left, max_free_right);
1485 map->root = root;
1487 map->nentries--;
1489 map->nentries, entry);
1506 header = &map->header;
1507 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist);
1510 entry->end += grow_amount;
1511 root->max_free = vm_size_max(
1514 map->root = root;
1517 __func__, map, map->nentries, entry);
1521 * vm_map_lookup_entry: [ internal use only ]
1543 header = &map->header;
1544 cur = map->root;
1549 if (address >= cur->start && cur->end > address) {
1554 sx_try_upgrade(&map->lock)) {
1564 sx_downgrade(&map->lock);
1572 if (address < cur->start) {
1577 return (address < cur->end);
1585 if (address < cur->start) {
1587 cur = cur->left;
1590 } else if (cur->end <= address) {
1592 cur = cur->right;
1640 if ((map->flags & MAP_WXORX) != 0 && (prot & (VM_PROT_WRITE |
1655 if (next_entry->start < end)
1691 bdry = pagesizes[bidx] - 1;
1702 if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start))
1706 object->cred == NULL,
1708 cred = curthread->td_ucred;
1719 * of the object's pages. (Object granularity
1723 if ((object->flags & OBJ_ANON) != 0) {
1725 if (object->ref_count > 1 || object->shadow_count != 0)
1729 } else if ((prev_entry->eflags & ~MAP_ENTRY_USER_WIRED) ==
1732 prev_entry->end == start && (prev_entry->cred == cred ||
1733 (prev_entry->object.vm_object != NULL &&
1734 prev_entry->object.vm_object->cred == cred)) &&
1735 vm_object_coalesce(prev_entry->object.vm_object,
1736 prev_entry->offset,
1737 (vm_size_t)(prev_entry->end - prev_entry->start),
1738 (vm_size_t)(end - prev_entry->end), cred != NULL &&
1745 if (prev_entry->inheritance == inheritance &&
1746 prev_entry->protection == prot &&
1747 prev_entry->max_protection == max &&
1748 prev_entry->wired_count == 0) {
1749 KASSERT((prev_entry->eflags & MAP_ENTRY_USER_WIRED) ==
1752 if ((prev_entry->eflags & MAP_ENTRY_GUARD) == 0)
1753 map->size += end - prev_entry->end;
1755 end - prev_entry->end);
1767 object = prev_entry->object.vm_object;
1768 offset = prev_entry->offset +
1769 (prev_entry->end - prev_entry->start);
1771 if (cred != NULL && object != NULL && object->cred != NULL &&
1772 !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
1784 new_entry->start = start;
1785 new_entry->end = end;
1786 new_entry->cred = NULL;
1788 new_entry->eflags = protoeflags;
1789 new_entry->object.vm_object = object;
1790 new_entry->offset = offset;
1792 new_entry->inheritance = inheritance;
1793 new_entry->protection = prot;
1794 new_entry->max_protection = max;
1795 new_entry->wired_count = 0;
1796 new_entry->wiring_thread = NULL;
1797 new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT;
1798 new_entry->next_read = start;
1802 new_entry->cred = cred;
1808 if ((new_entry->eflags & MAP_ENTRY_GUARD) == 0)
1809 map->size += new_entry->end - new_entry->start;
1822 end - start, cow & MAP_PREFAULT_PARTIAL);
1836 * If object is non-NULL, ref count must be bumped by caller
1864 * vm_map_max(map)-length+1 if insufficient space.
1880 if (start >= vm_map_max(map) || length > vm_map_max(map) - start)
1881 return (vm_map_max(map) - length + 1);
1884 if (map->root == NULL)
1891 * enough; otherwise set gap_end to start skip gap-checking and move
1894 header = &map->header;
1896 gap_end = rlist->start;
1898 start = root->end;
1899 if (root->right != rlist)
1905 rlist = root->left;
1910 llist = root->right;
1914 root->max_free = vm_size_max(max_free_left, max_free_right);
1915 map->root = root;
1917 if (length <= gap_end - start)
1921 if (root->right == header || length > root->right->max_free)
1922 return (vm_map_max(map) - length + 1);
1925 * Splay for the least large-enough gap in the right subtree.
1940 llist = root->right;
1943 root->max_free = vm_size_max(max_free_left,
1947 rlist = y->left;
1948 y->max_free = vm_size_max(
1951 root->max_free = vm_size_max(max_free_left, y->max_free);
1953 map->root = root;
1955 return (root->end);
1968 ("vm_map_fixed: non-NULL backing object for stack"));
2025 * specified alignment. Performs an address-ordered, first-fit search from
2028 * given (object, offset) pair so as to enable the greatest possible use of
2104 * first-fit from the specified address; the region found is
2107 * If object is non-NULL, ref count must be bumped by caller
2136 ("non-NULL backing object for stack"));
2140 (object->flags & OBJ_COLORED) == 0))
2147 en_aslr = (map->flags & MAP_ASLR) != 0;
2149 (map->flags & MAP_IS_SUB_MAP) == 0 && max_addr == 0 &&
2156 (map->flags & MAP_ASLR_IGNSTART) != 0)
2160 curr_min_addr = map->anon_loc;
2178 * perform a first-fit search of the available address
2195 curr_min_addr = (map->flags & MAP_ASLR_IGNSTART) != 0 ?
2219 pidx--;
2279 if (update_anon && rv == KERN_SUCCESS && (map->anon_loc == 0 ||
2280 *addr < map->anon_loc))
2281 map->anon_loc = *addr;
2332 KASSERT((prev->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 ||
2333 (entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0,
2336 return (prev->end == entry->start &&
2337 prev->object.vm_object == entry->object.vm_object &&
2338 (prev->object.vm_object == NULL ||
2339 prev->offset + (prev->end - prev->start) == entry->offset) &&
2340 prev->eflags == entry->eflags &&
2341 prev->protection == entry->protection &&
2342 prev->max_protection == entry->max_protection &&
2343 prev->inheritance == entry->inheritance &&
2344 prev->wired_count == entry->wired_count &&
2345 prev->cred == entry->cred);
2356 * kept without causing a lock-order reversal with the vnode lock.
2359 * object->un_pager.vnp.writemappings, the writemappings value
2362 if (entry->object.vm_object != NULL)
2363 vm_object_deallocate(entry->object.vm_object);
2364 if (entry->cred != NULL)
2365 crfree(entry->cred);
2385 if ((entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 &&
2404 KASSERT(entry->object.vm_object == NULL,
2406 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
2408 object = vm_object_allocate_anon(atop(entry->end - entry->start), NULL,
2409 entry->cred, entry->end - entry->start);
2410 entry->object.vm_object = object;
2411 entry->offset = 0;
2412 entry->cred = NULL;
2426 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
2428 if (entry->object.vm_object == NULL && !vm_map_is_system(map) &&
2429 (entry->eflags & MAP_ENTRY_GUARD) == 0)
2431 else if (entry->object.vm_object != NULL &&
2432 ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
2433 entry->cred != NULL) {
2434 VM_OBJECT_WLOCK(entry->object.vm_object);
2435 KASSERT(entry->object.vm_object->cred == NULL,
2437 entry->object.vm_object->cred = entry->cred;
2438 entry->object.vm_object->charge = entry->end - entry->start;
2439 VM_OBJECT_WUNLOCK(entry->object.vm_object);
2440 entry->cred = NULL;
2465 if (new_entry->cred != NULL)
2466 crhold(entry->cred);
2467 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
2468 vm_object_reference(new_entry->object.vm_object);
2471 * The object->un_pager.vnp.writemappings for the object of
2473 * virtual pages are re-distributed among the clipped entries,
2481 * vm_map_clip_start: [ internal use only ]
2498 if (startaddr <= entry->start)
2502 KASSERT(entry->end > startaddr && entry->start < startaddr,
2507 if ((startaddr & (pagesizes[bdry_idx] - 1)) != 0)
2517 new_entry->end = startaddr;
2554 * vm_map_clip_end: [ internal use only ]
2571 if (endaddr >= entry->end)
2575 KASSERT(entry->start < endaddr && entry->end > endaddr,
2580 if ((endaddr & (pagesizes[bdry_idx] - 1)) != 0)
2590 new_entry->start = endaddr;
2597 * vm_map_submap: [ kernel use only ]
2627 submap->flags |= MAP_IS_SUB_MAP;
2632 if (vm_map_lookup_entry(map, start, &entry) && entry->end >= end &&
2633 (entry->eflags & MAP_ENTRY_COW) == 0 &&
2634 entry->object.vm_object == NULL) {
2641 entry->object.sub_map = submap;
2642 entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
2650 submap->flags &= ~MAP_IS_SUB_MAP;
2657 * The maximum number of pages to map if MAP_PREFAULT_PARTIAL is specified
2665 * object's memory-resident pages. No further physical pages are
2666 * allocated, and no further virtual pages are retrieved from secondary
2668 * limited number of page mappings are created at the low-end of the
2670 * counts as one page mapping.) Otherwise, all resident pages within
2684 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
2686 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
2687 pmap_object_init_pt(map->pmap, addr, object, pindex,
2697 if (psize + pindex > object->size) {
2698 if (pindex >= object->size) {
2702 psize = object->size - pindex;
2716 p != NULL && (tmpidx = p->pindex - pindex) < psize;
2720 * free pages allocating pv entries.
2735 for (psind = p->psind; psind > 0; psind--) {
2737 (pagesizes[psind] - 1)) == 0) {
2738 mask = atop(pagesizes[psind]) - 1;
2749 pmap_enter_object(map->pmap, start, addr +
2755 pmap_enter_object(map->pmap, start, addr + ptoa(psize),
2766 MPASS((entry->eflags & MAP_ENTRY_GUARD) != 0);
2767 if ((entry->eflags & MAP_ENTRY_STACK_GAP) == 0)
2770 old_prot = PROT_EXTRACT(entry->offset);
2772 entry->offset = PROT_MAX(new_maxprot) |
2776 entry->offset = new_prot | PROT_MAX(
2777 PROT_MAX_EXTRACT(entry->offset));
2812 if ((map->flags & MAP_WXORX) != 0 &&
2820 * Ensure that we are not concurrently wiring pages. vm_map_wire() may
2821 * need to fault pages into the map and will drop the map lock while
2833 (first_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0) {
2839 * guard).
2841 while (!CONTAINS_BITS(first_entry->eflags,
2845 start = first_entry->start;
2856 for (entry = first_entry; entry->start < end;
2858 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) {
2862 if ((entry->eflags & (MAP_ENTRY_GUARD |
2865 max_prot = (entry->eflags & MAP_ENTRY_STACK_GAP) != 0 ?
2866 PROT_MAX_EXTRACT(entry->offset) : entry->max_protection;
2871 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0)
2876 * Postpone the operation until all in-transition map entries have
2877 * stabilized. An in-transition entry might already have its pages
2883 in_tran->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2890 * private (i.e., copy-on-write) mappings that are transitioning from
2891 * read-only to read/write access. If a reservation fails, break out
2900 for (entry = first_entry; entry->start < end;
2909 ((new_prot & ~entry->protection) & VM_PROT_WRITE) == 0 ||
2911 (entry->eflags & MAP_ENTRY_GUARD) != 0)
2914 cred = curthread->td_ucred;
2915 obj = entry->object.vm_object;
2918 (entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0) {
2919 if (!swap_reserve(entry->end - entry->start)) {
2921 end = entry->end;
2925 entry->cred = cred;
2930 if ((obj->flags & OBJ_SWAP) == 0) {
2937 * we cannot distinguish between non-charged and
2940 KASSERT(obj->charge == 0,
2943 if (!swap_reserve(ptoa(obj->size))) {
2946 end = entry->end;
2951 obj->cred = cred;
2952 obj->charge = ptoa(obj->size);
2962 entry->start < end;
2968 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) {
2974 old_prot = entry->protection;
2977 entry->max_protection = new_maxprot;
2978 entry->protection = new_maxprot & old_prot;
2981 entry->protection = new_prot;
2986 * undesirable. Instead, immediately copy any pages that are
2987 * copy-on-write and enable write access in the physical map.
2989 if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0 &&
2990 (entry->protection & VM_PROT_WRITE) != 0 &&
2996 * about copy-on-write here.
2998 if ((old_prot & ~entry->protection) != 0) {
2999 #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
3001 pmap_protect(map->pmap, entry->start,
3002 entry->end,
3003 entry->protection & MASK(entry));
3033 * we need to use an exclusive lock on the map and we need to perform
3034 * various clipping operations. Otherwise we only need a read-lock
3080 for (; entry->start < end; prev_entry = entry,
3082 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
3105 entry->eflags |= MAP_ENTRY_NOSYNC;
3108 entry->eflags &= ~MAP_ENTRY_NOSYNC;
3111 entry->eflags |= MAP_ENTRY_NOCOREDUMP;
3114 entry->eflags &= ~MAP_ENTRY_NOCOREDUMP;
3135 for (; entry->start < end;
3139 if ((entry->eflags & (MAP_ENTRY_IS_SUB_MAP |
3146 * we hold the VM map read-locked, neither the
3151 entry->object.vm_object != NULL &&
3152 entry->object.vm_object->backing_object != NULL)
3155 pstart = OFF_TO_IDX(entry->offset);
3156 pend = pstart + atop(entry->end - entry->start);
3157 useStart = entry->start;
3158 useEnd = entry->end;
3160 if (entry->start < start) {
3161 pstart += atop(start - entry->start);
3164 if (entry->end > end) {
3165 pend -= atop(entry->end - end);
3183 pmap_advise(map->pmap, useStart, useEnd,
3186 vm_object_madvise(entry->object.vm_object, pstart,
3190 * Pre-populate paging structures in the
3195 entry->wired_count == 0) {
3198 entry->protection,
3199 entry->object.vm_object,
3201 ptoa(pend - pstart),
3242 if (vm_map_lookup_entry(map, end - 1, &lentry)) {
3248 for (entry = start_entry; entry->start < end;
3250 if ((entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK)
3257 for (entry = start_entry; entry->start < end; prev_entry = entry,
3259 KASSERT(entry->end <= end, ("non-clipped entry %p end %jx %jx",
3260 entry, (uintmax_t)entry->end, (uintmax_t)end));
3261 if ((entry->eflags & MAP_ENTRY_GUARD) == 0 ||
3263 entry->inheritance = new_inheritance;
3277 * another held the lock, lookup a possibly-changed entry at or after the
3289 KASSERT((in_entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
3290 ("not in-tranition map entry %p", in_entry));
3294 start = MAX(in_start, in_entry->start);
3295 in_entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
3296 last_timestamp = map->timestamp;
3303 if (last_timestamp + 1 == map->timestamp)
3349 for (entry = first_entry; entry->start < end; entry = next_entry) {
3350 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
3379 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
3380 entry->wiring_thread == NULL,
3382 entry->eflags |= MAP_ENTRY_IN_TRANSITION;
3383 entry->wiring_thread = curthread;
3390 entry->end < end && next_entry->start > entry->end) {
3391 end = entry->end;
3400 end = entry->end;
3415 for (; entry->start < end;
3426 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
3427 entry->wiring_thread != curthread) {
3434 (entry->eflags & MAP_ENTRY_USER_WIRED))) {
3435 if (entry->wired_count == 1)
3438 entry->wired_count--;
3440 entry->eflags &= ~MAP_ENTRY_USER_WIRED;
3442 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
3443 ("vm_map_unwire: in-transition flag missing %p", entry));
3444 KASSERT(entry->wiring_thread == curthread,
3446 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
3447 entry->wiring_thread = NULL;
3448 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
3449 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
3496 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 &&
3497 entry->wired_count == 1,
3499 KASSERT(failed_addr < entry->end,
3503 * If any pages at the start of this entry were successfully wired,
3506 if (failed_addr > entry->start) {
3507 pmap_unwire(map->pmap, entry->start, failed_addr);
3508 vm_object_unwire(entry->object.vm_object, entry->offset,
3509 failed_addr - entry->start, PQ_ACTIVE);
3513 * Assign an out-of-range value to represent the failure to wire this
3516 entry->wired_count = -1;
3563 for (entry = first_entry; entry->start < end; entry = next_entry) {
3564 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
3591 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
3592 entry->wiring_thread == NULL,
3594 entry->eflags |= MAP_ENTRY_IN_TRANSITION;
3595 entry->wiring_thread = curthread;
3596 if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0
3597 || (entry->protection & prot) != prot) {
3598 entry->eflags |= MAP_ENTRY_WIRE_SKIPPED;
3600 end = entry->end;
3604 } else if (entry->wired_count == 0) {
3605 entry->wired_count++;
3607 npages = atop(entry->end - entry->start);
3610 entry->start);
3611 end = entry->end;
3617 * Release the map lock, relying on the in-transition
3620 saved_start = entry->start;
3621 saved_end = entry->end;
3622 last_timestamp = map->timestamp;
3641 if (last_timestamp + 1 != map->timestamp) {
3654 for (entry = next_entry; entry->end < saved_end;
3663 faddr < entry->end)
3672 end = entry->end;
3676 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
3677 entry->wired_count++;
3685 entry->end < end && next_entry->start > entry->end) {
3686 end = entry->end;
3703 for (; entry->start < end;
3709 * pages or draining MAP_ENTRY_IN_TRANSITION.
3718 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
3719 entry->wiring_thread != curthread) {
3725 if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0) {
3729 entry->eflags |= MAP_ENTRY_USER_WIRED;
3730 } else if (entry->wired_count == -1) {
3735 entry->wired_count = 0;
3737 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
3742 if (entry->wired_count == 1) {
3746 atop(entry->end - entry->start));
3748 entry->wired_count--;
3750 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
3751 ("vm_map_wire: in-transition flag missing %p", entry));
3752 KASSERT(entry->wiring_thread == curthread,
3754 entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION |
3756 entry->wiring_thread = NULL;
3757 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
3758 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
3772 * Push any dirty cached pages in the address range to their pager.
3773 * If syncio is TRUE, dirty pages are written synchronously.
3774 * If invalidate is TRUE, any cached pages are freed as well.
3777 * supposed to flush all modified pages within the region containing
3807 start = first_entry->start;
3808 end = first_entry->end;
3812 * Make a first pass to check for user-wired memory, holes,
3815 for (entry = first_entry; entry->start < end; entry = next_entry) {
3817 if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0) {
3823 ((start & (pagesizes[bdry_idx] - 1)) != 0 ||
3824 (end & (pagesizes[bdry_idx] - 1)) != 0)) {
3830 if (end > entry->end &&
3831 entry->end != next_entry->start) {
3838 pmap_remove(map->pmap, start, end);
3842 * Make a second pass, cleaning/uncaching pages from the indicated
3845 for (entry = first_entry; entry->start < end;) {
3846 offset = entry->offset + (start - entry->start);
3847 size = (end <= entry->end ? end : entry->end) - start;
3848 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) {
3853 smap = entry->object.sub_map;
3856 tsize = tentry->end - offset;
3859 object = tentry->object.vm_object;
3860 offset = tentry->offset + (offset - tentry->start);
3863 object = entry->object.vm_object;
3866 last_timestamp = map->timestamp;
3873 if (last_timestamp == map->timestamp ||
3883 * vm_map_entry_unwire: [ internal use only ]
3896 KASSERT(entry->wired_count > 0,
3899 size = entry->end - entry->start;
3900 if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0)
3902 pmap_unwire(map->pmap, entry->start, entry->end);
3903 vm_object_unwire(entry->object.vm_object, entry->offset, size,
3905 entry->wired_count = 0;
3912 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0)
3913 vm_object_deallocate(entry->object.vm_object);
3918 * vm_map_entry_delete: [ internal use only ]
3930 object = entry->object.vm_object;
3932 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) {
3933 MPASS(entry->cred == NULL);
3934 MPASS((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0);
3940 size = entry->end - entry->start;
3941 map->size -= size;
3943 if (entry->cred != NULL) {
3944 swap_release_by_cred(size, entry->cred);
3945 crfree(entry->cred);
3948 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 || object == NULL) {
3949 entry->object.vm_object = NULL;
3950 } else if ((object->flags & OBJ_ANON) != 0 ||
3952 KASSERT(entry->cred == NULL || object->cred == NULL ||
3953 (entry->eflags & MAP_ENTRY_NEEDS_COPY),
3955 offidxstart = OFF_TO_IDX(entry->offset);
3958 if (object->ref_count != 1 &&
3959 ((object->flags & OBJ_ONEMAPPING) != 0 ||
3967 * of pages.
3971 if (offidxend >= object->size &&
3972 offidxstart < object->size) {
3973 size1 = object->size;
3974 object->size = offidxstart;
3975 if (object->cred != NULL) {
3976 size1 -= object->size;
3977 KASSERT(object->charge >= ptoa(size1),
3980 object->cred);
3981 object->charge -= ptoa(size1);
3990 entry->defer_next = curthread->td_map_def_user;
3991 curthread->td_map_def_user = entry;
3996 * vm_map_delete: [ internal use only ]
4019 for (; entry->start < end; entry = next_entry) {
4025 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 ||
4031 saved_start = entry->start;
4032 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
4033 last_timestamp = map->timestamp;
4036 if (last_timestamp + 1 != map->timestamp) {
4062 if (entry->wired_count != 0)
4066 * Remove mappings for the pages, but only if the
4068 * make sense to call pmap_remove() for guard entries.
4070 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 ||
4071 entry->object.vm_object != NULL)
4072 pmap_map_delete(map->pmap, entry->start, entry->end);
4076 * entries pointing to its pages. (Otherwise, its
4132 if (start < entry->start)
4137 if ((entry->protection & protection) != protection)
4140 start = entry->end;
4150 * Copies a swap-backed object from an existing map entry to a
4162 src_object = src_entry->object.vm_object;
4164 if ((src_object->flags & OBJ_ANON) != 0) {
4167 if ((src_object->flags & OBJ_ONEMAPPING) != 0) {
4169 src_object = src_entry->object.vm_object;
4176 if (src_entry->cred != NULL &&
4177 !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
4178 KASSERT(src_object->cred == NULL,
4181 src_object->cred = src_entry->cred;
4182 src_object->charge = size;
4184 dst_entry->object.vm_object = src_object;
4186 cred = curthread->td_ucred;
4188 dst_entry->cred = cred;
4190 if (!(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
4192 src_entry->cred = cred;
4218 if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
4221 if (src_entry->wired_count == 0 ||
4222 (src_entry->protection & VM_PROT_WRITE) == 0) {
4225 * write-protected.
4227 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0 &&
4228 (src_entry->protection & VM_PROT_WRITE) != 0) {
4229 pmap_protect(src_map->pmap,
4230 src_entry->start,
4231 src_entry->end,
4232 src_entry->protection & ~VM_PROT_WRITE);
4238 size = src_entry->end - src_entry->start;
4239 if ((src_object = src_entry->object.vm_object) != NULL) {
4240 if ((src_object->flags & OBJ_SWAP) != 0) {
4244 src_object = src_entry->object.vm_object;
4247 dst_entry->object.vm_object = src_object;
4249 src_entry->eflags |= MAP_ENTRY_COW |
4251 dst_entry->eflags |= MAP_ENTRY_COW |
4253 dst_entry->offset = src_entry->offset;
4254 if (src_entry->eflags & MAP_ENTRY_WRITECNT) {
4261 * decrement object->un_pager writecount
4266 fake_entry->eflags = MAP_ENTRY_WRITECNT;
4267 src_entry->eflags &= ~MAP_ENTRY_WRITECNT;
4269 fake_entry->object.vm_object = src_object;
4270 fake_entry->start = src_entry->start;
4271 fake_entry->end = src_entry->end;
4272 fake_entry->defer_next =
4273 curthread->td_map_def_user;
4274 curthread->td_map_def_user = fake_entry;
4277 pmap_copy(dst_map->pmap, src_map->pmap,
4278 dst_entry->start, dst_entry->end - dst_entry->start,
4279 src_entry->start);
4281 dst_entry->object.vm_object = NULL;
4282 if ((dst_entry->eflags & MAP_ENTRY_GUARD) == 0)
4283 dst_entry->offset = 0;
4284 if (src_entry->cred != NULL) {
4285 dst_entry->cred = curthread->td_ucred;
4286 crhold(dst_entry->cred);
4292 * We don't want to make writeable wired pages copy-on-write.
4293 * Immediately copy these pages into the new map by simulating
4294 * page faults. The new pages are pageable.
4303 * Update the newly-forked vmspace each time a map entry is inherited
4305 * (and mostly-obsolete ideas in the face of mmap(2) et al.)
4314 if ((entry->eflags & MAP_ENTRY_GUARD) != 0)
4316 entrysize = entry->end - entry->start;
4317 vm2->vm_map.size += entrysize;
4318 if ((entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0) {
4319 vm2->vm_ssize += btoc(entrysize);
4320 } else if (entry->start >= (vm_offset_t)vm1->vm_daddr &&
4321 entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) {
4322 newend = MIN(entry->end,
4323 (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize));
4324 vm2->vm_dsize += btoc(newend - entry->start);
4325 } else if (entry->start >= (vm_offset_t)vm1->vm_taddr &&
4326 entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) {
4327 newend = MIN(entry->end,
4328 (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize));
4329 vm2->vm_tsize += btoc(newend - entry->start);
4354 old_map = &vm1->vm_map;
4361 vm2->vm_taddr = vm1->vm_taddr;
4362 vm2->vm_daddr = vm1->vm_daddr;
4363 vm2->vm_maxsaddr = vm1->vm_maxsaddr;
4364 vm2->vm_stacktop = vm1->vm_stacktop;
4365 vm2->vm_shp_base = vm1->vm_shp_base;
4367 if (old_map->busy)
4369 new_map = &vm2->vm_map;
4373 error = pmap_vmspace_copy(new_map->pmap, old_map->pmap);
4375 sx_xunlock(&old_map->lock);
4376 sx_xunlock(&new_map->lock);
4382 new_map->anon_loc = old_map->anon_loc;
4383 new_map->flags |= old_map->flags & (MAP_ASLR | MAP_ASLR_IGNSTART |
4387 if ((old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
4390 inh = old_entry->inheritance;
4391 if ((old_entry->eflags & MAP_ENTRY_GUARD) != 0 &&
4404 object = old_entry->object.vm_object;
4407 object = old_entry->object.vm_object;
4415 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
4416 vm_object_shadow(&old_entry->object.vm_object,
4417 &old_entry->offset,
4418 old_entry->end - old_entry->start,
4419 old_entry->cred,
4422 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
4423 old_entry->cred = NULL;
4431 object = old_entry->object.vm_object;
4435 if (old_entry->cred != NULL) {
4436 KASSERT(object->cred == NULL,
4438 object->cred = old_entry->cred;
4439 object->charge = old_entry->end -
4440 old_entry->start;
4441 old_entry->cred = NULL;
4450 if (old_entry->eflags & MAP_ENTRY_WRITECNT &&
4451 object->type == OBJT_VNODE) {
4452 KASSERT(((struct vnode *)object->
4453 handle)->v_writecount > 0,
4456 KASSERT(object->un_pager.vnp.
4469 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
4471 new_entry->wiring_thread = NULL;
4472 new_entry->wired_count = 0;
4473 if (new_entry->eflags & MAP_ENTRY_WRITECNT) {
4475 new_entry->start, new_entry->end);
4480 * Insert the entry into the new map -- we know we're
4489 pmap_copy(new_map->pmap, old_map->pmap,
4490 new_entry->start,
4491 (old_entry->end - old_entry->start),
4492 old_entry->start);
4504 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
4506 new_entry->wiring_thread = NULL;
4507 new_entry->wired_count = 0;
4508 new_entry->object.vm_object = NULL;
4509 new_entry->cred = NULL;
4525 new_entry->start = old_entry->start;
4526 new_entry->end = old_entry->end;
4527 new_entry->eflags = old_entry->eflags &
4531 new_entry->protection = old_entry->protection;
4532 new_entry->max_protection = old_entry->max_protection;
4533 new_entry->inheritance = VM_INHERIT_ZERO;
4538 new_entry->cred = curthread->td_ucred;
4539 crhold(new_entry->cred);
4540 *fork_charge += (new_entry->end - new_entry->start);
4546 * Use inlined vm_map_unlock() to postpone handling the deferred
4550 sx_xunlock(&old_map->lock);
4551 sx_xunlock(&new_map->lock);
4569 MPASS((map->flags & MAP_WIREFUTURE) == 0);
4575 if (map->size + init_ssize > vmemlim) {
4589 "Specifies the number of guard pages for a stack that grows");
4606 sgp = ((curproc->p_flag2 & P2_STKGAP_DISABLE) != 0 ||
4607 (curproc->p_fctl0 & NT_FREEBSD_FCTL_STKGAP_DISABLE) != 0) ? 0 :
4614 init_ssize = max_ssize - sgp;
4623 if (vm_map_entry_succ(prev_entry)->start < addrbos + max_ssize)
4634 bot = addrbos + max_ssize - init_ssize;
4642 KASSERT(new_entry->end == top || new_entry->start == bot,
4644 KASSERT((new_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0,
4652 KASSERT((gap_entry->eflags & MAP_ENTRY_GUARD) != 0,
4653 ("entry %p not gap %#x", gap_entry, gap_entry->eflags));
4654 KASSERT((gap_entry->eflags & MAP_ENTRY_STACK_GAP) != 0,
4656 gap_entry->eflags));
4660 * read-ahead logic is never used for it. Re-use
4667 gap_entry->next_read = sgp;
4668 gap_entry->offset = prot | PROT_MAX(max);
4686 vm_size_t grow_amount, guard, max_grow, sgp;
4699 vm = p->p_vmspace;
4706 if (p != initproc && (map != &p->p_vmspace->vm_map ||
4707 p->p_textvp == NULL))
4719 if ((gap_entry->eflags & MAP_ENTRY_GUARD) == 0)
4721 if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP) != 0) {
4723 if ((stack_entry->eflags & MAP_ENTRY_GROWS_DOWN) == 0 ||
4724 stack_entry->start != gap_entry->end)
4726 grow_amount = round_page(stack_entry->start - addr);
4730 guard = ((curproc->p_flag2 & P2_STKGAP_DISABLE) != 0 ||
4731 (curproc->p_fctl0 & NT_FREEBSD_FCTL_STKGAP_DISABLE) != 0) ? 0 :
4732 gap_entry->next_read;
4733 max_grow = gap_entry->end - gap_entry->start;
4734 if (guard > max_grow)
4736 max_grow -= guard;
4744 is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr &&
4745 addr < (vm_offset_t)vm->vm_stacktop;
4746 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim))
4753 ctob(vm->vm_ssize) + grow_amount)) {
4764 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
4765 grow_amount = trunc_page((vm_size_t)stacklim) -
4766 ctob(vm->vm_ssize);
4773 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit))
4774 grow_amount = limit - ctob(vm->vm_ssize);
4777 if (!old_mlock && (map->flags & MAP_WIREFUTURE) != 0) {
4778 if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) {
4786 ptoa(pmap_wired_count(map->pmap)) + grow_amount)) {
4797 if (map->size + grow_amount > vmemlim) {
4804 if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) {
4823 prot = PROT_EXTRACT(gap_entry->offset);
4824 max = PROT_MAX_EXTRACT(gap_entry->offset);
4825 sgp = gap_entry->next_read;
4827 grow_start = gap_entry->end - grow_amount;
4828 if (gap_entry->start + grow_amount == gap_entry->end) {
4829 gap_start = gap_entry->start;
4830 gap_end = gap_entry->end;
4834 MPASS(gap_entry->start < gap_entry->end - grow_amount);
4835 vm_map_entry_resize(map, gap_entry, -grow_amount);
4847 gap_entry->next_read = sgp;
4848 gap_entry->offset = prot | PROT_MAX(max);
4855 vm->vm_ssize += btoc(grow_amount);
4860 if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE) != 0) {
4871 error = racct_set(p, RACCT_VMEM, map->size);
4875 ptoa(pmap_wired_count(map->pmap)));
4878 error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize));
4894 struct vmspace *oldvmspace = p->p_vmspace;
4897 KASSERT((curthread->td_pflags & TDP_EXECVMSPC) == 0,
4902 newvmspace->vm_swrss = oldvmspace->vm_swrss;
4911 p->p_vmspace = newvmspace;
4913 if (p == curthread->td_proc)
4915 curthread->td_pflags |= TDP_EXECVMSPC;
4926 struct vmspace *oldvmspace = p->p_vmspace;
4932 * cannot concurrently transition 1 -> 2.
4934 if (refcount_load(&oldvmspace->vm_refcnt) == 1)
4940 if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) {
4945 p->p_vmspace = newvmspace;
4947 if (p == curthread->td_proc)
4967 * A handle (out_entry) is returned for use in
5011 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
5014 *var_map = map = entry->object.sub_map;
5022 prot = entry->protection;
5026 (entry->eflags & MAP_ENTRY_GUARD) != 0 &&
5027 (entry->eflags & MAP_ENTRY_STACK_GAP) != 0 &&
5036 KASSERT((prot & VM_PROT_WRITE) == 0 || (entry->eflags &
5039 ("entry %p flags %x", entry, entry->eflags));
5041 (entry->max_protection & VM_PROT_WRITE) == 0 &&
5042 (entry->eflags & MAP_ENTRY_COW) == 0) {
5051 *wired = (entry->wired_count != 0);
5053 fault_type = entry->protection;
5054 size = entry->end - entry->start;
5057 * If the entry was copy-on-write, we either ...
5059 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
5072 * -- one just moved from the map to the new
5078 if (entry->cred == NULL) {
5083 cred = curthread->td_ucred;
5090 entry->cred = cred;
5092 eobject = entry->object.vm_object;
5093 vm_object_shadow(&entry->object.vm_object,
5094 &entry->offset, size, entry->cred, false);
5095 if (eobject == entry->object.vm_object) {
5099 swap_release_by_cred(size, entry->cred);
5100 crfree(entry->cred);
5102 entry->cred = NULL;
5103 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
5108 * We're attempting to read a copy-on-write page --
5118 if (entry->object.vm_object == NULL && !vm_map_is_system(map)) {
5121 entry->object.vm_object = vm_object_allocate_anon(atop(size),
5122 NULL, entry->cred, size);
5123 entry->offset = 0;
5124 entry->cred = NULL;
5130 * copy-on-write or empty, it has been fixed up.
5132 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
5133 *object = entry->object.vm_object;
5171 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
5177 prot = entry->protection;
5186 *wired = (entry->wired_count != 0);
5188 fault_type = entry->protection;
5190 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
5192 * Fail if the entry was copy-on-write for a write fault.
5197 * We're attempting to read a copy-on-write page --
5206 if (entry->object.vm_object == NULL && !vm_map_is_system(map))
5211 * copy-on-write or empty, it has been fixed up.
5213 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
5214 *object = entry->object.vm_object;
5230 * Unlock the main-level map
5253 return (map->pmap);
5272 ++map->nupdates;
5277 header = prev = &map->header;
5279 KASSERT(prev->end <= entry->start,
5280 ("map %p prev->end = %jx, start = %jx", map,
5281 (uintmax_t)prev->end, (uintmax_t)entry->start));
5282 KASSERT(entry->start < entry->end,
5284 (uintmax_t)entry->start, (uintmax_t)entry->end));
5285 KASSERT(entry->left == header ||
5286 entry->left->start < entry->start,
5287 ("map %p left->start = %jx, start = %jx", map,
5288 (uintmax_t)entry->left->start, (uintmax_t)entry->start));
5289 KASSERT(entry->right == header ||
5290 entry->start < entry->right->start,
5291 ("map %p start = %jx, right->start = %jx", map,
5292 (uintmax_t)entry->start, (uintmax_t)entry->right->start));
5293 cur = map->root;
5296 if (entry->start < cur->start) {
5298 cur = cur->left;
5301 map, (uintmax_t)entry->start));
5302 } else if (cur->end <= entry->start) {
5304 cur = cur->right;
5307 map, (uintmax_t)entry->start));
5311 map, (uintmax_t)entry->start));
5317 KASSERT(entry->max_free == vm_size_max(max_left, max_right),
5319 (uintmax_t)entry->max_free,
5323 KASSERT(prev->end <= entry->start,
5324 ("map %p prev->end = %jx, start = %jx", map,
5325 (uintmax_t)prev->end, (uintmax_t)entry->start));
5342 (void *)map->pmap, map->nentries, map->timestamp);
5345 prev = &map->header;
5348 (void *)entry, (void *)entry->start, (void *)entry->end,
5349 entry->eflags);
5355 entry->protection,
5356 entry->max_protection,
5358 entry->inheritance]);
5359 if (entry->wired_count != 0)
5362 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
5364 (void *)entry->object.sub_map,
5365 (uintmax_t)entry->offset);
5366 if (prev == &map->header ||
5367 prev->object.sub_map !=
5368 entry->object.sub_map) {
5370 vm_map_print((vm_map_t)entry->object.sub_map);
5371 db_indent -= 2;
5374 if (entry->cred != NULL)
5375 db_printf(", ruid %d", entry->cred->cr_ruid);
5377 (void *)entry->object.vm_object,
5378 (uintmax_t)entry->offset);
5379 if (entry->object.vm_object && entry->object.vm_object->cred)
5381 entry->object.vm_object->cred->cr_ruid,
5382 (uintmax_t)entry->object.vm_object->charge);
5383 if (entry->eflags & MAP_ENTRY_COW)
5385 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
5388 if (prev == &map->header ||
5389 prev->object.vm_object !=
5390 entry->object.vm_object) {
5393 entry->object.vm_object,
5395 db_indent -= 2;
5400 db_indent -= 2;
5424 (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
5425 (void *)vmspace_pmap(p->p_vmspace));
5427 vm_map_print((vm_map_t)&p->p_vmspace->vm_map);