Lines Matching refs:map
81 * uvm_map.c: uvm map operations
212 #define UVM_ASSERT(map, cond, file, line) \
213 uvm_tree_assert((map), (cond), #cond, (file), (line))
224 * The kernel map will initially be VM_MAP_KSIZE_INIT bytes.
268 * pool for dynamically-allocated map entries.
295 #define vm_map_modflags(map, set, clear) \
297 mtx_enter(&(map)->flags_lock); \
298 (map)->flags = ((map)->flags | (set)) & ~(clear); \
299 mtx_leave(&(map)->flags_lock); \
341 uvm_mapent_free_insert(struct vm_map *map, struct uvm_addr_state *uaddr,
358 bound = uvm_map_boundary(map, min, max);
359 KASSERT(uvm_map_uaddr(map, min) == uaddr);
366 UVM_MAP_REQ_WRITE(map);
373 (*fun->uaddr_free_insert)(map, uaddr, entry);
385 uvm_mapent_free_remove(struct vm_map *map, struct uvm_addr_state *uaddr,
391 KASSERT(uvm_map_uaddr_e(map, entry) == uaddr);
392 UVM_MAP_REQ_WRITE(map);
397 (*fun->uaddr_free_remove)(map, uaddr, entry);
406 uvm_mapent_addr_insert(struct vm_map *map, struct vm_map_entry *entry)
419 UVM_MAP_REQ_WRITE(map);
420 res = RBT_INSERT(uvm_map_addr, &map->addr, entry);
422 panic("uvm_mapent_addr_insert: map %p entry %p "
425 map, entry,
435 uvm_mapent_addr_remove(struct vm_map *map, struct vm_map_entry *entry)
442 UVM_MAP_REQ_WRITE(map);
443 res = RBT_REMOVE(uvm_map_addr, &map->addr, entry);
450 * uvm_map_reference: add reference to a map
452 * => map need not be locked
455 uvm_map_reference(struct vm_map *map)
457 atomic_inc_int(&map->ref_count);
486 uvmspace_dused(struct vm_map *map, vaddr_t min, vaddr_t max)
493 KASSERT(map->flags & VM_MAP_ISVMSPACE);
494 vm_map_assert_anylock(map);
496 vm = (struct vmspace *)map;
543 * may not be linked in a map.
564 uvm_map_isavail(struct vm_map *map, struct uvm_addr_state *uaddr,
575 vm_map_assert_anylock(map);
580 if ((map->flags & VM_MAP_ISVMSPACE) == 0) {
585 atree = &map->addr;
636 free = uvm_map_uaddr_e(map, i);
639 (free == map->uaddr_exe ||
640 free == map->uaddr_brk_stack))
653 uvm_map_findspace(struct vm_map *map, struct vm_map_entry**first,
664 for (i = 0; i < nitems(map->uaddr_any); i++) {
665 uaddr = map->uaddr_any[i];
667 if (uvm_addr_invoke(map, uaddr, first, last,
673 uaddr = map->uaddr_brk_stack;
674 if (uvm_addr_invoke(map, uaddr, first, last,
722 * uvm_mapanon: establish a valid mapping in map for an anon
726 * => map must be unlocked.
732 uvm_mapanon(struct vm_map *map, vaddr_t *addr, vsize_t sz,
745 KASSERT((map->flags & VM_MAP_ISVMSPACE) == VM_MAP_ISVMSPACE);
746 KASSERT(map != kernel_map);
747 KASSERT((map->flags & UVM_FLAG_HOLE) == 0);
748 KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
778 * Before grabbing the lock, allocate a map entry for later
782 new = uvm_mapent_alloc(map, flags);
786 vm_map_lock(map);
800 !uvm_map_is_stack_remappable(map, *addr, sz,
805 if (uvm_unmap_remove(map, *addr, *addr + sz, &dead,
812 if (!uvm_map_isavail(map, NULL, &first, &last, *addr, sz)) {
818 uvm_map_isavail(map, NULL, &first, &last, *addr, sz)) {
825 } else if ((prot & PROT_EXEC) != 0 && map->uaddr_exe != NULL) {
827 error = uvm_addr_invoke(map, map->uaddr_exe, &first, &last,
834 uvm_map_vmspace_update(map, &dead, flags);
836 error = uvm_map_findspace(map, &first, &last, addr, sz,
859 entry = uvm_map_mkentry(map, first, last, *addr, sz, flags, &dead,
877 map->sserial++;
891 /* Update map and process statistics. */
892 map->size += sz;
894 ((struct vmspace *)map)->vm_dused +=
895 uvmspace_dused(map, *addr, *addr + sz);
899 vm_map_unlock(map);
916 * uvm_map: establish a valid mapping in map
919 * => map must be unlocked.
934 uvm_map(struct vm_map *map, vaddr_t *addr, vsize_t sz,
948 if ((map->flags & VM_MAP_INTRSAFE) == 0)
995 if (!(map->flags & VM_MAP_ISVMSPACE) && !(flags & UVM_FLAG_FIXED))
1002 if (map == kernel_map &&
1004 panic("uvm_map: kernel map W^X violation requested");
1007 * Before grabbing the lock, allocate a map entry for later
1011 new = uvm_mapent_alloc(map, flags);
1016 if (vm_map_lock_try(map) == FALSE) {
1021 vm_map_lock(map);
1038 if ((map->flags & VM_MAP_ISVMSPACE) == 0 &&
1040 uvm_map_kmem_grow(map, &dead,
1046 if (uvm_unmap_remove(map, *addr, *addr + sz, &dead,
1052 if (!uvm_map_isavail(map, NULL, &first, &last, *addr, sz)) {
1057 (map->flags & VM_MAP_ISVMSPACE) == VM_MAP_ISVMSPACE &&
1059 uvm_map_isavail(map, NULL, &first, &last, *addr, sz)) {
1066 } else if ((prot & PROT_EXEC) != 0 && map->uaddr_exe != NULL) {
1068 error = uvm_addr_invoke(map, map->uaddr_exe, &first, &last,
1072 if (error != 0 && (map->flags & VM_MAP_ISVMSPACE) == 0) {
1073 uvm_map_kmem_grow(map, &dead, sz, flags);
1075 error = uvm_addr_invoke(map, map->uaddr_exe,
1084 if (map->flags & VM_MAP_ISVMSPACE)
1085 uvm_map_vmspace_update(map, &dead, flags);
1087 error = uvm_map_findspace(map, &first, &last, addr, sz,
1091 if (error != 0 && (map->flags & VM_MAP_ISVMSPACE) == 0) {
1092 uvm_map_kmem_grow(map, &dead, sz, flags);
1094 error = uvm_map_findspace(map, &first, &last, addr, sz,
1108 KASSERT((map->flags & VM_MAP_ISVMSPACE) == VM_MAP_ISVMSPACE ||
1128 entry = uvm_map_mkentry(map, first, last, *addr, sz, flags, &dead,
1146 map->sserial++;
1168 /* Update map and process statistics. */
1170 map->size += sz;
1171 if ((map->flags & VM_MAP_ISVMSPACE) && uobj == NULL &&
1173 ((struct vmspace *)map)->vm_dused +=
1174 uvmspace_dused(map, *addr, *addr + sz);
1186 (map->flags & VM_MAP_ISVMSPACE) == 0)
1187 uvm_mapent_tryjoin(map, entry, &dead);
1190 vm_map_unlock(map);
1199 if (map->flags & VM_MAP_INTRSAFE)
1213 uvm_mapent_isjoinable(struct vm_map *map, struct vm_map_entry *e1,
1267 uvm_mapent_merge(struct vm_map *map, struct vm_map_entry *e1,
1273 * Merging is not supported for map entries that
1286 free = uvm_map_uaddr_e(map, e1);
1287 uvm_mapent_free_remove(map, free, e1);
1289 free = uvm_map_uaddr_e(map, e2);
1290 uvm_mapent_free_remove(map, free, e2);
1291 uvm_mapent_addr_remove(map, e2);
1295 uvm_mapent_free_insert(map, free, e1);
1309 uvm_mapent_tryjoin(struct vm_map *map, struct vm_map_entry *entry,
1317 if (other && uvm_mapent_isjoinable(map, other, entry)) {
1318 merged = uvm_mapent_merge(map, other, entry, dead);
1333 uvm_mapent_isjoinable(map, entry, other)) {
1334 merged = uvm_mapent_merge(map, entry, other, dead);
1343 * Kill entries that are no longer in a map.
1394 uvm_map_mkentry(struct vm_map *map, struct vm_map_entry *first,
1402 KDASSERT(map != NULL);
1410 KDASSERT(uvm_map_isavail(map, NULL, &first, &last, addr, sz));
1411 uvm_tree_sanity(map, __FILE__, __LINE__);
1418 entry = uvm_mapent_alloc(map, flags);
1434 vm_map_assert_wrlock(map);
1437 free = uvm_map_uaddr_e(map, first);
1438 uvm_mapent_free_remove(map, free, first);
1450 free = uvm_map_uaddr_e(map, last);
1451 uvm_mapent_free_remove(map, free, last);
1452 uvm_mapent_addr_remove(map, last);
1457 uvm_mapent_addr_remove(map, first);
1460 uvm_map_fix_space(map, first, VMMAP_FREE_START(first),
1465 uvm_mapent_addr_insert(map, entry);
1466 uvm_map_fix_space(map, entry, min, max, flags);
1468 uvm_tree_sanity(map, __FILE__, __LINE__);
1474 * uvm_mapent_alloc: allocate a map entry
1477 uvm_mapent_alloc(struct vm_map *map, int flags)
1487 if (map->flags & VM_MAP_INTRSAFE || cold) {
1493 panic("uvm_mapent_alloc: cannot allocate map "
1502 "map entries\n");
1509 } else if (map == kernel_map) {
1529 * uvm_mapent_free: free map entry
1531 * => XXX: static pool for kernel map?
1551 * uvm_map_lookup_entry: find map entry at or before an address.
1553 * => map must at least be read-locked by caller
1560 uvm_map_lookup_entry(struct vm_map *map, vaddr_t address,
1563 vm_map_assert_anylock(map);
1565 *entry = uvm_map_entrybyaddr(&map->addr, address);
1602 vm_map_t map = &p->p_vmspace->vm_map;
1606 if (addr < map->min_offset || addr >= map->max_offset)
1609 /* lock map */
1610 vm_map_lock_read(map);
1613 if (!uvm_map_lookup_entry(map, trunc_page(addr), &entry)) {
1614 vm_map_unlock_read(map);
1620 vm_map_unlock_read(map);
1629 vm_map_unlock_read(map);
1659 * Must be called with map locked.
1662 uvm_map_is_stack_remappable(struct vm_map *map, vaddr_t addr, vaddr_t sz,
1668 vm_map_assert_anylock(map);
1670 if (!uvm_map_lookup_entry(map, addr, &first))
1709 * Must be called with map unlocked.
1714 vm_map_t map = &p->p_vmspace->vm_map;
1732 if (start < map->min_offset || end >= map->max_offset || end < start)
1742 return uvm_mapanon(map, &start, end - start, 0, flags);
1784 uvm_unmap(struct vm_map *map, vaddr_t start, vaddr_t end)
1791 vm_map_lock(map);
1792 uvm_unmap_remove(map, start, end, &dead, FALSE, TRUE, FALSE);
1793 vm_map_unlock(map);
1795 if (map->flags & VM_MAP_INTRSAFE)
1809 uvm_mapent_mkfree(struct vm_map *map, struct vm_map_entry *entry,
1818 UVM_MAP_REQ_WRITE(map);
1836 free = uvm_map_uaddr_e(map, entry);
1837 uvm_mapent_free_remove(map, free, entry);
1838 uvm_mapent_addr_remove(map, entry);
1843 free = uvm_map_uaddr_e(map, prev);
1844 uvm_mapent_free_remove(map, free, prev);
1846 *prev_ptr = uvm_map_fix_space(map, prev, addr, end, 0);
1851 * Unwire and release referenced amap and object from map entry.
1854 uvm_unmap_kill_entry_withlock(struct vm_map *map, struct vm_map_entry *entry,
1857 /* Unwire removed map entry. */
1860 uvm_fault_unwire_locked(map, entry->start, entry->end);
1869 } else if (map->flags & VM_MAP_INTRSAFE) {
1870 KASSERT(vm_map_pmap(map) == pmap_kernel());
1875 KASSERT(vm_map_pmap(map) == pmap_kernel());
1914 pmap_remove(map->pmap, entry->start, entry->end);
1922 uvm_unmap_kill_entry(struct vm_map *map, struct vm_map_entry *entry)
1924 uvm_unmap_kill_entry_withlock(map, entry, 0);
1935 uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
1941 start = MAX(start, map->min_offset);
1942 end = MIN(end, map->max_offset);
1946 vm_map_assert_wrlock(map);
1949 entry = uvm_map_entrybyaddr(&map->addr, start);
1972 UVM_MAP_CLIP_START(map, entry, start);
1982 UVM_MAP_CLIP_END(map, entry, end);
1995 if (UVM_ET_ISSTACK(entry) && (map->flags & VM_MAP_ISVMSPACE))
1996 map->sserial++;
1999 uvm_unmap_kill_entry_withlock(map, entry, 1);
2002 if ((map->flags & VM_MAP_ISVMSPACE) &&
2006 ((struct vmspace *)map)->vm_dused -=
2007 uvmspace_dused(map, entry->start, entry->end);
2010 map->size -= entry->end - entry->start;
2013 uvm_mapent_mkfree(map, entry, &prev_hint, dead, markfree);
2016 pmap_update(vm_map_pmap(map));
2020 for (entry = uvm_map_entrybyaddr(&map->addr, start);
2030 KDASSERT(uvm_map_entrybyaddr(&map->addr, a) == NULL);
2042 uvm_map_pageable_pgon(struct vm_map *map, struct vm_map_entry *first,
2054 uvm_fault_unwire_locked(map, iter->start, iter->end);
2065 uvm_map_pageable_wire(struct vm_map *map, struct vm_map_entry *first,
2079 * to be created. then we clip each map entry to the region to
2082 * 2: we mark the map busy, unlock it and call uvm_fault_wire to fault
2094 * - create an anonymous map for copy-on-write
2095 * - anonymous map for zero-fill
2102 amap_copy(map, iter, M_WAITOK,
2113 timestamp_save = map->timestamp;
2115 vm_map_busy(map);
2116 vm_map_unlock(map);
2125 error = uvm_fault_wire(map, iter->start, iter->end,
2129 vm_map_lock(map);
2130 vm_map_unbusy(map);
2134 if (timestamp_save != map->timestamp)
2135 panic("uvm_map_pageable_wire: stale map");
2151 uvm_fault_unwire_locked(map,
2167 vm_map_unlock(map);
2173 vm_map_unlock(map);
2176 if (timestamp_save != map->timestamp)
2177 panic("uvm_map_pageable_wire: stale map");
2184 * uvm_map_pageable: set pageability of a range in a map.
2187 * UVM_LK_ENTER: map is already locked by caller
2188 * UVM_LK_EXIT: don't unlock map on exit
2194 uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end,
2207 if (start < map->min_offset)
2209 if (end > map->max_offset)
2212 KASSERT(map->flags & VM_MAP_PAGEABLE);
2214 vm_map_lock(map);
2222 first = uvm_map_entrybyaddr(&map->addr, start);
2252 last = RBT_MAX(uvm_map_addr, &map->addr);
2269 UVM_MAP_CLIP_START(map, first, start);
2276 UVM_MAP_CLIP_END(map, last, end);
2281 uvm_map_pageable_pgon(map, first, tmp, start, end);
2286 vm_map_unlock(map);
2294 UVM_MAP_CLIP_START(map, first, start);
2301 UVM_MAP_CLIP_END(map, last, end);
2306 return uvm_map_pageable_wire(map, first, tmp, start, end,
2319 uvm_map_pageable_all(struct vm_map *map, int flags, vsize_t limit)
2324 KASSERT(map->flags & VM_MAP_PAGEABLE);
2325 vm_map_lock(map);
2328 uvm_map_pageable_pgon(map, RBT_MIN(uvm_map_addr, &map->addr),
2329 NULL, map->min_offset, map->max_offset);
2331 vm_map_modflags(map, 0, VM_MAP_WIREFUTURE);
2332 vm_map_unlock(map);
2337 vm_map_modflags(map, VM_MAP_WIREFUTURE, 0);
2339 vm_map_unlock(map);
2348 RBT_FOREACH(iter, uvm_map_addr, &map->addr) {
2356 vm_map_unlock(map);
2363 size + ptoa(pmap_wired_count(vm_map_pmap(map))) > limit) {
2364 vm_map_unlock(map);
2372 return uvm_map_pageable_wire(map, RBT_MIN(uvm_map_addr, &map->addr),
2373 NULL, map->min_offset, map->max_offset, 0);
2377 * Initialize map.
2379 * Allocates sufficient entries to describe the free memory in the map.
2382 uvm_map_setup(struct vm_map *map, pmap_t pmap, vaddr_t min, vaddr_t max,
2404 RBT_INIT(uvm_map_addr, &map->addr);
2405 map->uaddr_exe = NULL;
2406 for (i = 0; i < nitems(map->uaddr_any); ++i)
2407 map->uaddr_any[i] = NULL;
2408 map->uaddr_brk_stack = NULL;
2410 map->pmap = pmap;
2411 map->size = 0;
2412 map->ref_count = 0;
2413 map->min_offset = min;
2414 map->max_offset = max;
2415 map->b_start = map->b_end = 0; /* Empty brk() area by default. */
2416 map->s_start = map->s_end = 0; /* Empty stack area by default. */
2417 map->flags = flags;
2418 map->timestamp = 0;
2419 map->busy = NULL;
2421 rw_init_flags(&map->lock, "vmmaplk", RWL_DUPOK);
2423 rw_init(&map->lock, "kmmaplk");
2424 mtx_init(&map->mtx, IPL_VM);
2425 mtx_init(&map->flags_lock, IPL_VM);
2429 uvm_map_setup_md(map);
2431 map->uaddr_any[3] = &uaddr_kbootstrap;
2434 * Fill map entries.
2435 * We do not need to write-lock the map here because only the current
2439 uvm_map_setup_entries(map);
2440 uvm_tree_sanity(map, __FILE__, __LINE__);
2441 map->ref_count = 1;
2445 * Destroy the map.
2450 uvm_map_teardown(struct vm_map *map)
2459 KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
2461 vm_map_lock(map);
2464 uvm_addr_destroy(map->uaddr_exe);
2465 map->uaddr_exe = NULL;
2466 for (i = 0; i < nitems(map->uaddr_any); i++) {
2467 uvm_addr_destroy(map->uaddr_any[i]);
2468 map->uaddr_any[i] = NULL;
2470 uvm_addr_destroy(map->uaddr_brk_stack);
2471 map->uaddr_brk_stack = NULL;
2491 if ((entry = RBT_ROOT(uvm_map_addr, &map->addr)) != NULL)
2495 uvm_unmap_kill_entry(map, entry);
2504 vm_map_unlock(map);
2508 RBT_FOREACH(entry, uvm_map_addr, &map->addr)
2516 pmap_destroy(map->pmap);
2517 map->pmap = NULL;
2521 * Populate map with free-memory entries.
2526 uvm_map_setup_entries(struct vm_map *map)
2528 KDASSERT(RBT_EMPTY(uvm_map_addr, &map->addr));
2530 uvm_map_fix_space(map, NULL, map->min_offset, map->max_offset, 0);
2537 * next: a newly allocated map entry that is not linked.
2541 uvm_map_splitentry(struct vm_map *map, struct vm_map_entry *orig,
2551 KDASSERT(map != NULL && orig != NULL && next != NULL);
2552 uvm_tree_sanity(map, __FILE__, __LINE__);
2556 KDASSERT(RBT_FIND(uvm_map_addr, &map->addr, orig) == orig);
2557 KDASSERT(RBT_FIND(uvm_map_addr, &map->addr, next) != next);
2563 free = uvm_map_uaddr_e(map, orig);
2564 uvm_mapent_free_remove(map, free, orig);
2614 free_before = uvm_map_uaddr_e(map, orig);
2615 uvm_mapent_free_insert(map, free_before, orig);
2616 uvm_mapent_addr_insert(map, next);
2617 uvm_mapent_free_insert(map, free, next);
2619 uvm_tree_sanity(map, __FILE__, __LINE__);
2626 uvm_tree_assert(struct vm_map *map, int test, char *test_str,
2634 if (map == kernel_map)
2636 else if (map == kmem_map)
2640 panic("uvm_tree_sanity %p%s (%s %d): %s", map, map_special, file,
2645 * Check that map is sane.
2648 uvm_tree_sanity(struct vm_map *map, char *file, int line)
2655 addr = vm_map_min(map);
2656 RBT_FOREACH(iter, uvm_map_addr, &map->addr) {
2661 UVM_ASSERT(map, iter->end >= iter->start, file, line);
2662 UVM_ASSERT(map, VMMAP_FREE_END(iter) >= iter->end, file, line);
2665 UVM_ASSERT(map, iter->start < VMMAP_FREE_END(iter),
2668 /* Addresses for entry must lie within map boundaries. */
2669 UVM_ASSERT(map, iter->start >= vm_map_min(map) &&
2670 VMMAP_FREE_END(iter) <= vm_map_max(map), file, line);
2673 UVM_ASSERT(map, iter->start == addr, file, line);
2684 (bound = uvm_map_boundary(map, min, max)) != max) {
2685 UVM_ASSERT(map,
2686 uvm_map_uaddr(map, bound - 1) ==
2687 uvm_map_uaddr(map, bound),
2692 free = uvm_map_uaddr_e(map, iter);
2694 UVM_ASSERT(map, (iter->etype & UVM_ET_FREEMAPPED) != 0,
2697 UVM_ASSERT(map, (iter->etype & UVM_ET_FREEMAPPED) == 0,
2701 UVM_ASSERT(map, addr == vm_map_max(map), file, line);
2705 uvm_tree_size_chk(struct vm_map *map, char *file, int line)
2711 RBT_FOREACH(iter, uvm_map_addr, &map->addr) {
2716 if (map->size != size)
2717 printf("map size = 0x%lx, should be 0x%lx\n", map->size, size);
2718 UVM_ASSERT(map, map->size == size, file, line);
2720 vmspace_validate(map);
2727 vmspace_validate(struct vm_map *map)
2735 if (!(map->flags & VM_MAP_ISVMSPACE))
2738 vm = (struct vmspace *)map;
2743 RBT_FOREACH(iter, uvm_map_addr, &map->addr) {
2779 "expected %ld pgs, got %d pgs in map %p",
2781 map);
2797 /* now set up static pool of kernel map entries ... */
2805 /* initialize the map-related pools. */
2824 * uvm_map_printit: actually prints the map
2827 uvm_map_printit(struct vm_map *map, boolean_t full,
2836 (*pr)("MAP %p: [0x%lx->0x%lx]\n", map, map->min_offset,map->max_offset);
2838 map->b_start, map->b_end);
2840 map->s_start, map->s_end);
2842 map->size, map->ref_count, map->timestamp,
2843 map->flags);
2844 (*pr)("\tpmap=%p(resident=%d)\n", map->pmap,
2845 pmap_resident_count(map->pmap));
2848 if (map->flags & VM_MAP_ISVMSPACE) {
2849 vm = (struct vmspace *)map;
2863 RBT_FOREACH(entry, uvm_map_addr, &map->addr) {
2878 free = uvm_map_uaddr_e(map, entry);
2897 uvm_addr_print(map->uaddr_exe, "exe", full, pr);
2898 for (i = 0; i < nitems(map->uaddr_any); i++) {
2900 uvm_addr_print(map->uaddr_any[i], &buf[0], full, pr);
2902 uvm_addr_print(map->uaddr_brk_stack, "brk/stack", full, pr);
3031 * uvm_map_protect: change map protection
3034 * => map must be unlocked.
3037 uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
3050 start = MAX(start, map->min_offset);
3051 end = MIN(end, map->max_offset);
3057 vm_map_lock(map);
3063 first = uvm_map_entrybyaddr(&map->addr, start);
3082 map, MAX(start, iter->start), MIN(end, iter->end));
3093 if (map == kernel_map &&
3095 panic("uvm_map_protect: kernel map W^X violation requested");
3099 if (dused > 0 && (map->flags & VM_MAP_ISVMSPACE)) {
3103 limit - dused < ptoa(((struct vmspace *)map)->vm_dused)) {
3135 UVM_MAP_CLIP_START(map, iter, start);
3136 UVM_MAP_CLIP_END(map, iter, end);
3146 * update physical map if necessary. worry about copy-on-write
3153 if (map->flags & VM_MAP_ISVMSPACE) {
3155 ((struct vmspace *)map)->vm_dused +=
3156 uvmspace_dused(map, iter->start,
3160 ((struct vmspace *)map)->vm_dused -=
3161 uvmspace_dused(map, iter->start,
3184 pmap_protect(map->pmap, iter->start, iter->end,
3190 * If the map is configured to lock any future mappings,
3194 if ((map->flags & VM_MAP_WIREFUTURE) != 0 &&
3198 if (uvm_map_pageable(map, iter->start, iter->end,
3204 * the map, but it will return the resource
3217 pmap_update(map->pmap);
3221 map->sserial++;
3222 vm_map_unlock(map);
3296 struct vm_map *map = &ovm->vm_map;
3326 vm_map_lock(map);
3327 vm_map_modflags(map, 0, VM_MAP_WIREFUTURE |
3333 * Instead of attempting to keep the map valid, we simply
3335 * the map to the new boundaries.
3340 uvm_unmap_remove(map, map->min_offset, map->max_offset,
3343 KDASSERT(RBT_EMPTY(uvm_map_addr, &map->addr));
3356 /* Setup new boundaries and populate map with entries. */
3357 map->min_offset = start;
3358 map->max_offset = end;
3359 uvm_map_setup_entries(map);
3360 vm_map_unlock(map);
3371 (map->flags & VM_MAP_PAGEABLE) ? TRUE : FALSE, TRUE);
3406 * lock the map, to wait out all other references to it. delete
3481 panic("uvm_share: non-copy_on_write map entries "
3485 * srcaddr > map entry start? means we are in the middle of a
3486 * map, so we calculate the offset to use in the source map.
3493 panic("uvm_share: map entry start > srcaddr");
3527 * Clone map entry into other map.
3546 panic("uvm_mapent_clone: no space in map for "
3547 "entry in empty map");
3563 /* gain reference to object backing the map (can't be a submap). */
3794 * uvmspace_fork: fork a process' main map
3797 * => parent's map must not be locked.
3833 panic("fork: non-copy_on_write map entry marked "
3936 * uvm_map_submap: punch down part of a map into a submap
3940 * of a larger map
3942 * call [with uobj==NULL] to create a blank map entry in the main map.
3945 * => to remove a submap, use uvm_unmap() on the main map
3947 * => main map must be unlocked.
3952 uvm_map_submap(struct vm_map *map, vaddr_t start, vaddr_t end,
3958 if (start > map->max_offset || end > map->max_offset ||
3959 start < map->min_offset || end < map->min_offset)
3962 vm_map_lock(map);
3964 if (uvm_map_lookup_entry(map, start, &entry)) {
3965 UVM_MAP_CLIP_START(map, entry, start);
3966 UVM_MAP_CLIP_END(map, entry, end);
3982 vm_map_unlock(map);
3987 * uvm_map_checkprot: check protection in map
3990 * => map must be read or write locked by caller.
3993 uvm_map_checkprot(struct vm_map *map, vaddr_t start, vaddr_t end,
3998 vm_map_assert_anylock(map);
4000 if (start < map->min_offset || end > map->max_offset || start > end)
4008 for (entry = uvm_map_entrybyaddr(&map->addr, start);
4024 * uvm_map_create: create map
4029 vm_map_t map;
4031 map = malloc(sizeof *map, M_VMMAP, M_WAITOK);
4032 uvm_map_setup(map, pmap, min, max, flags);
4033 return (map);
4037 * uvm_map_deallocate: drop reference to a map
4039 * => caller must not lock map
4040 * => we will zap map if ref count goes to zero
4043 uvm_map_deallocate(vm_map_t map)
4048 c = atomic_dec_int_nv(&map->ref_count);
4056 * No lock required: we are only one to access this map.
4059 uvm_tree_sanity(map, __FILE__, __LINE__);
4060 vm_map_lock(map);
4061 uvm_unmap_remove(map, map->min_offset, map->max_offset, &dead,
4063 vm_map_unlock(map);
4064 pmap_destroy(map->pmap);
4065 KASSERT(RBT_EMPTY(uvm_map_addr, &map->addr));
4066 free(map, M_VMMAP, sizeof *map);
4072 * uvm_map_inherit: set inheritance code for range of addrs in map.
4074 * => map must be unlocked
4079 uvm_map_inherit(struct vm_map *map, vaddr_t start, vaddr_t end,
4097 start = MAX(start, map->min_offset);
4098 end = MIN(end, map->max_offset);
4102 vm_map_lock(map);
4104 entry = uvm_map_entrybyaddr(&map->addr, start);
4106 UVM_MAP_CLIP_START(map, entry, start);
4122 UVM_MAP_CLIP_END(map, entry, end);
4129 vm_map_unlock(map);
4135 check_copyin_add(struct vm_map *map, vaddr_t start, vaddr_t end)
4138 map->check_copyin_count >= UVM_MAP_CHECK_COPYIN_MAX)
4140 vm_map_assert_wrlock(map);
4141 map->check_copyin[map->check_copyin_count].start = start;
4142 map->check_copyin[map->check_copyin_count].end = end;
4144 map->check_copyin_count++;
4151 * => map must be unlocked
4154 uvm_map_check_copyin_add(struct vm_map *map, vaddr_t start, vaddr_t end)
4158 start = MAX(start, map->min_offset);
4159 end = MIN(end, map->max_offset);
4162 vm_map_lock(map);
4163 check_copyin_add(map, start, end);
4164 vm_map_unlock(map);
4170 * uvm_map_immutable: block mapping/mprotect for range of addrs in map.
4172 * => map must be unlocked
4175 uvm_map_immutable(struct vm_map *map, vaddr_t start, vaddr_t end, int imut)
4182 start = MAX(start, map->min_offset);
4183 end = MIN(end, map->max_offset);
4187 vm_map_lock(map);
4189 entry = uvm_map_entrybyaddr(&map->addr, start);
4191 UVM_MAP_CLIP_START(map, entry, start);
4204 UVM_MAP_CLIP_END(map, entry, end);
4213 vm_map_unlock(map);
4218 * uvm_map_advice: set advice code for range of addrs in map.
4220 * => map must be unlocked
4223 uvm_map_advice(struct vm_map *map, vaddr_t start, vaddr_t end, int new_advice)
4238 start = MAX(start, map->min_offset);
4239 end = MIN(end, map->max_offset);
4243 vm_map_lock(map);
4245 entry = uvm_map_entrybyaddr(&map->addr, start);
4247 UVM_MAP_CLIP_START(map, entry, start);
4255 UVM_MAP_CLIP_END(map, entry, end);
4260 vm_map_unlock(map);
4265 * uvm_map_extract: extract a mapping from a map and put it somewhere
4268 * => map should be unlocked (we will write lock it and kernel_map)
4294 * mapped area is not in source map.
4343 /* Lock destination map (kernel_map). */
4358 /* step 1: start looping through map entries, performing extraction. */
4414 * uvm_map_clean: clean out a map range
4425 * => caller must not have map locked
4429 uvm_map_clean(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
4444 if (start > end || start < map->min_offset || end > map->max_offset)
4447 vm_map_lock(map);
4448 first = uvm_map_entrybyaddr(&map->addr, start);
4456 vm_map_unlock(map);
4463 vm_map_unlock(map);
4468 vm_map_busy(map);
4469 vm_map_unlock(map);
4488 vm_map_unbusy(map);
4581 vm_map_unbusy(map);
4589 uvm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, vaddr_t addr)
4594 tmp = uvm_mapent_alloc(map, 0);
4597 uvm_map_splitentry(map, entry, tmp, addr);
4610 uvm_map_clip_start(struct vm_map *map, struct vm_map_entry *entry, vaddr_t addr)
4616 free = uvm_map_uaddr_e(map, entry);
4617 uvm_mapent_free_remove(map, free, entry);
4618 uvm_mapent_addr_remove(map, entry);
4622 tmp = uvm_mapent_alloc(map, 0);
4626 uvm_mapent_addr_insert(map, tmp);
4627 uvm_mapent_free_insert(map, free, tmp);
4630 uvm_map_splitentry(map, tmp, entry, addr);
4652 uvm_map_uaddr(struct vm_map *map, vaddr_t addr)
4662 if ((map->flags & VM_MAP_ISVMSPACE) == 0) {
4667 /* Is the address inside the exe-only map? */
4668 if (map->uaddr_exe != NULL && addr >= map->uaddr_exe->uaddr_minaddr &&
4669 addr < map->uaddr_exe->uaddr_maxaddr)
4670 return map->uaddr_exe;
4673 if ((addr >= map->b_start && addr < map->b_end) ||
4674 (addr >= map->s_start && addr < map->s_end)) {
4675 if (map->uaddr_brk_stack != NULL &&
4676 addr >= map->uaddr_brk_stack->uaddr_minaddr &&
4677 addr < map->uaddr_brk_stack->uaddr_maxaddr) {
4678 return map->uaddr_brk_stack;
4689 for (i = 0; i < nitems(map->uaddr_any); i++) {
4690 uaddr = map->uaddr_any[i];
4713 uvm_map_uaddr_e(struct vm_map *map, struct vm_map_entry *entry)
4715 return uvm_map_uaddr(map, VMMAP_FREE_START(entry));
4722 uvm_map_boundary(struct vm_map *map, vaddr_t min, vaddr_t max)
4730 /* Treat the maxkaddr special, if the map is a kernel_map. */
4731 if ((map->flags & VM_MAP_ISVMSPACE) == 0)
4735 if (map->uaddr_exe != NULL) {
4736 max = uvm_map_boundfix(min, max, map->uaddr_exe->uaddr_minaddr);
4737 max = uvm_map_boundfix(min, max, map->uaddr_exe->uaddr_maxaddr);
4741 if (map->uaddr_brk_stack != NULL) {
4743 map->uaddr_brk_stack->uaddr_minaddr);
4745 map->uaddr_brk_stack->uaddr_maxaddr);
4749 for (i = 0; i < nitems(map->uaddr_any); i++) {
4750 uaddr = map->uaddr_any[i];
4758 max = uvm_map_boundfix(min, max, map->s_start);
4759 max = uvm_map_boundfix(min, max, map->s_end);
4760 max = uvm_map_boundfix(min, max, map->b_start);
4761 max = uvm_map_boundfix(min, max, map->b_end);
4767 * Update map allocation start and end addresses from proc vmspace.
4770 uvm_map_vmspace_update(struct vm_map *map,
4776 KASSERT(map->flags & VM_MAP_ISVMSPACE);
4782 vm = (struct vmspace *)map;
4798 if (__predict_true(map->b_start == b_start && map->b_end == b_end &&
4799 map->s_start == s_start && map->s_end == s_end))
4802 uvm_map_freelist_update(map, dead, b_start, b_end,
4811 * If the map has a gap that is large enough to accommodate alloc_sz, this
4812 * function will make sure map->free will include it.
4815 uvm_map_kmem_grow(struct vm_map *map, struct uvm_map_deadq *dead,
4823 KASSERT((map->flags & VM_MAP_ISVMSPACE) == 0);
4825 uvm_map_freelist_update_clear(map, dead);
4828 if (map->flags & VM_MAP_GUARDPAGES)
4842 * We assume the kernel map has no boundaries.
4845 end = MAX(uvm_maxkaddr, map->min_offset);
4846 entry = uvm_map_entrybyaddr(&map->addr, end);
4851 end += MIN(sz, map->max_offset - end);
4853 end = map->max_offset;
4863 uvm_map_freelist_update_refill(map, flags);
4870 uvm_map_freelist_update_clear(struct vm_map *map, struct uvm_map_deadq *dead)
4876 for (entry = RBT_MIN(uvm_map_addr, &map->addr); entry != NULL;
4880 free = uvm_map_uaddr_e(map, entry);
4881 uvm_mapent_free_remove(map, free, entry);
4885 uvm_mapent_addr_remove(map, entry);
4896 uvm_map_freelist_update_refill(struct vm_map *map, int flags)
4901 RBT_FOREACH(entry, uvm_map_addr, &map->addr) {
4906 entry = uvm_map_fix_space(map, entry, min, max, flags);
4909 uvm_tree_sanity(map, __FILE__, __LINE__);
4916 uvm_map_freelist_update(struct vm_map *map, struct uvm_map_deadq *dead,
4920 vm_map_assert_wrlock(map);
4923 uvm_map_freelist_update_clear(map, dead);
4926 map->b_start = b_start;
4927 map->b_end = b_end;
4928 map->s_start = s_start;
4929 map->s_end = s_end;
4932 uvm_map_freelist_update_refill(map, flags);
4941 uvm_map_set_uaddr(struct vm_map *map, struct uvm_addr_state **which,
4946 /* Pointer which must be in this map. */
4948 KASSERT((void*)map <= (void*)(which) &&
4949 (void*)(which) < (void*)(map + 1));
4951 vm_map_lock(map);
4953 uvm_map_freelist_update_clear(map, &dead);
4958 uvm_map_freelist_update_refill(map, 0);
4959 vm_map_unlock(map);
4969 uvm_map_fix_space(struct vm_map *map, struct vm_map_entry *entry,
4978 min == map->min_offset);
4980 UVM_MAP_REQ_WRITE(map);
4987 uvm_map_uaddr_e(map, entry));
4991 if ((map->flags & VM_MAP_GUARDPAGES) && entry != NULL &&
4998 * Because in a guarded map, an area needs
5022 lmax = uvm_map_boundary(map, min, max);
5023 free = uvm_map_uaddr(map, min);
5032 !((map->flags & VM_MAP_ISVMSPACE) == 0 &&
5044 uvm_mapent_free_insert(map, entfree, entry);
5047 entry = uvm_mapent_alloc(map, flags);
5061 uvm_mapent_addr_insert(map, entry);
5070 uvm_mapent_free_insert(map, entfree, entry);
5078 * This allocator searches forward until sufficient space is found to map
5084 uvm_map_mquery(struct vm_map *map, vaddr_t *addr_p, vsize_t sz, voff_t offset,
5093 vm_map_lock_read(map);
5113 entry = uvm_map_entrybyaddr(&map->addr, addr);
5115 if (uvm_map_isavail(map, NULL, &entry, &last, addr, sz)) {
5129 * [1] it's outside the map,
5138 /* [1] Outside the map. */
5139 if (addr >= map->max_offset)
5142 entry = RBT_MIN(uvm_map_addr, &map->addr);
5164 if (addr + sz > map->b_start && addr < map->b_end) {
5165 if (VMMAP_FREE_END(entry) > map->b_end) {
5166 addr = map->b_end;
5172 if (addr + sz > map->s_start && addr < map->s_end) {
5173 if (VMMAP_FREE_END(entry) > map->s_end) {
5174 addr = map->s_end;
5181 if (uvm_map_isavail(map, NULL, &entry, &last, addr, sz)) {
5188 vm_map_unlock_read(map);
5195 vm_map_lock_try_ln(struct vm_map *map, char *file, int line)
5199 if (map->flags & VM_MAP_INTRSAFE) {
5200 if (!mtx_enter_try(&map->mtx))
5205 mtx_enter(&map->flags_lock);
5206 busy = map->busy;
5207 mtx_leave(&map->flags_lock);
5211 rv = rw_enter(&map->lock, RW_WRITE|RW_NOSLEEP);
5216 mtx_enter(&map->flags_lock);
5217 busy = map->busy;
5218 mtx_leave(&map->flags_lock);
5220 rw_exit(&map->lock);
5225 map->timestamp++;
5226 LPRINTF(("map lock: %p (at %s %d)\n", map, file, line));
5227 uvm_tree_sanity(map, file, line);
5228 uvm_tree_size_chk(map, file, line);
5234 vm_map_lock_ln(struct vm_map *map, char *file, int line)
5236 if ((map->flags & VM_MAP_INTRSAFE) == 0) {
5237 mtx_enter(&map->flags_lock);
5239 while (map->busy != NULL && map->busy != curproc) {
5240 map->nbusy++;
5241 msleep_nsec(&map->busy, &map->flags_lock,
5243 map->nbusy--;
5245 mtx_leave(&map->flags_lock);
5247 rw_enter_write(&map->lock);
5250 mtx_enter(&map->flags_lock);
5251 if (map->busy != NULL && map->busy != curproc) {
5253 rw_exit_write(&map->lock);
5259 mtx_leave(&map->flags_lock);
5261 mtx_enter(&map->mtx);
5264 if (map->busy != curproc) {
5265 KASSERT(map->busy == NULL);
5266 map->timestamp++;
5268 LPRINTF(("map lock: %p (at %s %d)\n", map, file, line));
5269 uvm_tree_sanity(map, file, line);
5270 uvm_tree_size_chk(map, file, line);
5274 vm_map_lock_read_ln(struct vm_map *map, char *file, int line)
5276 if ((map->flags & VM_MAP_INTRSAFE) == 0)
5277 rw_enter_read(&map->lock);
5279 mtx_enter(&map->mtx);
5280 LPRINTF(("map lock: %p (at %s %d)\n", map, file, line));
5281 uvm_tree_sanity(map, file, line);
5282 uvm_tree_size_chk(map, file, line);
5286 vm_map_unlock_ln(struct vm_map *map, char *file, int line)
5288 KASSERT(map->busy == NULL || map->busy == curproc);
5289 uvm_tree_sanity(map, file, line);
5290 uvm_tree_size_chk(map, file, line);
5291 LPRINTF(("map unlock: %p (at %s %d)\n", map, file, line));
5292 if ((map->flags & VM_MAP_INTRSAFE) == 0)
5293 rw_exit(&map->lock);
5295 mtx_leave(&map->mtx);
5299 vm_map_unlock_read_ln(struct vm_map *map, char *file, int line)
5301 /* XXX: RO */ uvm_tree_sanity(map, file, line);
5302 /* XXX: RO */ uvm_tree_size_chk(map, file, line);
5303 LPRINTF(("map unlock: %p (at %s %d)\n", map, file, line));
5304 if ((map->flags & VM_MAP_INTRSAFE) == 0)
5305 rw_exit_read(&map->lock);
5307 mtx_leave(&map->mtx);
5311 vm_map_busy_ln(struct vm_map *map, char *file, int line)
5313 KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
5314 KASSERT(rw_write_held(&map->lock));
5315 KASSERT(map->busy == NULL);
5317 mtx_enter(&map->flags_lock);
5318 map->busy = curproc;
5319 mtx_leave(&map->flags_lock);
5323 vm_map_unbusy_ln(struct vm_map *map, char *file, int line)
5327 KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
5328 KASSERT(map->busy == curproc);
5330 mtx_enter(&map->flags_lock);
5331 nbusy = map->nbusy;
5332 map->busy = NULL;
5333 mtx_leave(&map->flags_lock);
5336 wakeup(&map->busy);
5340 vm_map_assert_anylock_ln(struct vm_map *map, char *file, int line)
5342 LPRINTF(("map assert read or write locked: %p (at %s %d)\n", map, file, line));
5343 if ((map->flags & VM_MAP_INTRSAFE) == 0)
5344 rw_assert_anylock(&map->lock);
5346 MUTEX_ASSERT_LOCKED(&map->mtx);
5350 vm_map_assert_wrlock_ln(struct vm_map *map, char *file, int line)
5352 LPRINTF(("map assert write locked: %p (at %s %d)\n", map, file, line));
5353 if ((map->flags & VM_MAP_INTRSAFE) == 0) {
5355 rw_assert_wrlock(&map->lock);
5357 MUTEX_ASSERT_LOCKED(&map->mtx);
5362 uvm_map_fill_vmmap(struct vm_map *map, struct kinfo_vmentry *kve,
5382 vm_map_lock(map);
5383 RBT_FOREACH(entry, uvm_map_addr, &map->addr) {
5406 vm_map_unlock(map);
5426 uvm_map_setup_md(struct vm_map *map)
5430 min = map->min_offset;
5431 max = map->max_offset;
5442 map->uaddr_exe = uaddr_rnd_create(min, I386_MAX_EXE_ADDR);
5444 map->uaddr_any[3] = uaddr_pivot_create(2 * I386_MAX_EXE_ADDR, max);
5446 map->uaddr_any[0] = uaddr_rnd_create(min, max);
5450 map->uaddr_brk_stack = uaddr_stack_brk_create(min, max);
5455 uvm_map_setup_md(struct vm_map *map)
5459 min = map->min_offset;
5460 max = map->max_offset;
5470 map->uaddr_any[3] = uaddr_pivot_create(MAX(min, 0x100000000ULL), max);
5472 map->uaddr_any[0] = uaddr_rnd_create(min, max);
5476 map->uaddr_brk_stack = uaddr_stack_brk_create(min, max);
5481 uvm_map_setup_md(struct vm_map *map)
5485 min = map->min_offset;
5486 max = map->max_offset;
5496 map->uaddr_any[3] = uaddr_pivot_create(min, max);
5498 map->uaddr_any[0] = uaddr_rnd_create(min, max);
5502 map->uaddr_brk_stack = uaddr_stack_brk_create(min, max);