Lines Matching defs:map

65  * uvm_map.c: uvm map operations
147 * cache for dynamically-allocated map entries.
196 * UVM_ET_ISCOMPATIBLE: check some requirements for map entry merging
212 * uvm_map_entry_link: insert entry into a map
214 * => map must be locked
216 #define uvm_map_entry_link(map, after_where, entry) do { \
218 (map)->nentries++; \
223 uvm_rb_insert((map), (entry)); \
227 * uvm_map_entry_unlink: remove entry from a map
229 * => map must be locked
231 #define uvm_map_entry_unlink(map, entry) do { \
232 KASSERT((entry) != (map)->first_free); \
233 KASSERT((entry) != (map)->hint); \
235 (map)->nentries--; \
238 uvm_rb_remove((map), (entry)); \
244 * => map need not be locked.
246 #define SAVE_HINT(map, check, value) do { \
247 if ((map)->hint == (check)) \
248 (map)->hint = (value); \
254 * => map must be write-locked.
257 clear_hints(struct vm_map *map, struct vm_map_entry *ent)
260 SAVE_HINT(map, ent, ent->prev);
261 if (map->first_free == ent) {
262 map->first_free = ent->prev;
269 * => map must at least be read locked
272 #define VM_MAP_RANGE_CHECK(map, start, end) do { \
273 if (start < vm_map_min(map)) \
274 start = vm_map_min(map); \
275 if (end > vm_map_max(map)) \
276 end = vm_map_max(map); \
291 #define uvm_mapent_check(map) _uvm_mapent_check(map, __LINE__)
315 #define ROOT_ENTRY(map) \
316 ((struct vm_map_entry *)(map)->rb_tree.rbt_root)
321 #define PARENT_ENTRY(map, entry) \
322 (ROOT_ENTRY(map) == (entry) \
404 uvm_rb_fixup(struct vm_map *map, struct vm_map_entry *entry)
411 while ((parent = PARENT_ENTRY(map, entry)) != NULL) {
439 uvm_rb_insert(struct vm_map *map, struct vm_map_entry *entry)
444 if (entry->prev != &map->header)
447 ret = rb_tree_insert_node(&map->rb_tree, entry);
449 "uvm_rb_insert: map %p: duplicate entry %p", map, ret);
454 * have to check entry->prev against &map->header since &map->header
457 uvm_rb_fixup(map,
462 uvm_rb_remove(struct vm_map *map, struct vm_map_entry *entry)
472 if (entry->prev != &map->header)
473 prev_parent = PARENT_ENTRY(map, entry->prev);
474 if (entry->next != &map->header)
475 next_parent = PARENT_ENTRY(map, entry->next);
477 rb_tree_remove_node(&map->rb_tree, entry);
483 if (entry->prev != &map->header) {
488 uvm_rb_fixup(map, entry->prev);
491 && prev_parent != PARENT_ENTRY(map, entry->prev))
492 uvm_rb_fixup(map, prev_parent);
499 if (entry->next != &map->header) {
500 uvm_rb_fixup(map, entry->next);
503 && next_parent != PARENT_ENTRY(map, entry->next))
504 uvm_rb_fixup(map, next_parent);
511 #define uvm_map_check(map, name) \
512 _uvm_map_check((map), (name), __FILE__, __LINE__)
514 _uvm_map_check(struct vm_map *map, const char *name,
518 if ((uvm_debug_check_map && _uvm_map_sanity(map)) ||
519 (uvm_debug_check_rbtree && _uvm_tree_sanity(map))) {
520 panic("uvm_map_check failed: \"%s\" map=%p (%s:%d)",
521 name, map, file, line);
525 #define uvm_map_check(map, name) /* nothing */
530 _uvm_map_sanity(struct vm_map *map)
535 struct vm_map_entry *hint = map->hint;
537 e = &map->header;
539 if (map->first_free == e) {
543 map->first_free, e);
551 if (e == &map->header) {
567 _uvm_tree_sanity(struct vm_map *map)
572 for (tmp = map->header.next; tmp != &map->header; tmp = tmp->next) {
575 n + 1, map->nentries,
577 tmp->next == &map->header ? "(last)" : "");
582 * and will likely exceed the size of the map.
584 if (tmp->gap >= vm_map_max(map) - vm_map_min(map)) {
591 if (n != map->nentries) {
592 printf("nentries: %d vs %d\n", n, map->nentries);
597 for (tmp = map->header.next; tmp != &map->header; tmp = tmp->next) {
613 for (tmp = map->header.next; tmp != &map->header;
615 trtmp = rb_tree_iterate(&map->rb_tree, tmp, RB_DIR_LEFT);
617 trtmp = &map->header;
623 trtmp = rb_tree_iterate(&map->rb_tree, tmp, RB_DIR_RIGHT);
625 trtmp = &map->header;
631 trtmp = rb_tree_find_node(&map->rb_tree, &tmp->start);
634 PARENT_ENTRY(map, tmp));
646 * vm_map_lock: acquire an exclusive (write) lock on a map.
649 * exclusive by whichever thread currently has the map marked busy.
656 vm_map_lock(struct vm_map *map)
660 rw_enter(&map->lock, RW_WRITER);
661 if (map->busy == NULL || map->busy == curlwp) {
664 mutex_enter(&map->misc_lock);
665 rw_exit(&map->lock);
666 if (map->busy != NULL) {
667 cv_wait(&map->cv, &map->misc_lock);
669 mutex_exit(&map->misc_lock);
671 map->timestamp++;
675 * vm_map_lock_try: try to lock a map, failing if it is already locked.
679 vm_map_lock_try(struct vm_map *map)
682 if (!rw_tryenter(&map->lock, RW_WRITER)) {
685 if (map->busy != NULL) {
686 rw_exit(&map->lock);
689 map->timestamp++;
694 * vm_map_unlock: release an exclusive lock on a map.
698 vm_map_unlock(struct vm_map *map)
701 KASSERT(rw_write_held(&map->lock));
702 KASSERT(map->busy == NULL || map->busy == curlwp);
703 rw_exit(&map->lock);
707 * vm_map_unbusy: mark the map as unbusy, and wake any waiters that
712 vm_map_unbusy(struct vm_map *map)
715 KASSERT(map->busy == curlwp);
724 mutex_enter(&map->misc_lock);
725 map->busy = NULL;
726 cv_broadcast(&map->cv);
727 mutex_exit(&map->misc_lock);
731 * vm_map_lock_read: acquire a shared (read) lock on a map.
735 vm_map_lock_read(struct vm_map *map)
738 rw_enter(&map->lock, RW_READER);
742 * vm_map_unlock_read: release a shared lock on a map.
746 vm_map_unlock_read(struct vm_map *map)
749 rw_exit(&map->lock);
753 * vm_map_busy: mark a map as busy.
755 * => the caller must hold the map write locked
759 vm_map_busy(struct vm_map *map)
762 KASSERT(rw_write_held(&map->lock));
763 KASSERT(map->busy == NULL);
765 map->busy = curlwp;
769 * vm_map_locked_p: return true if the map is write locked.
772 * => should not be used to verify that a map is not locked.
776 vm_map_locked_p(struct vm_map *map)
779 return rw_write_held(&map->lock);
783 * uvm_mapent_alloc: allocate a map entry
787 uvm_mapent_alloc(struct vm_map *map, int flags)
800 (map == kernel_map), 0, 0);
805 * uvm_mapent_free: free map entry
812 UVMHIST_CALLARGS(maphist,"<- freeing map entry=%#jx [flags=%#jx]",
818 * uvm_mapent_copy: copy a map entry, preserving flags
865 * uvm_map_entry_unwire: unwire a map entry
867 * => map should be locked by caller
871 uvm_map_entry_unwire(struct vm_map *map, struct vm_map_entry *entry)
875 uvm_fault_unwire_locked(map, entry->start, entry->end);
918 UVMHIST_LOG(maphist,"<starting uvm map system>", 0, 0, 0, 0);
921 * initialize the global lock for kernel map entry.
947 * uvm_mapent_splitadj: adjust map entries for splitting, after uvm_mapent_copy.
984 * => map must be locked by caller
988 uvm_map_clip_start(struct vm_map *map, struct vm_map_entry *entry,
993 /* uvm_map_simplify_entry(map, entry); */ /* XXX */
995 uvm_map_check(map, "clip_start entry");
1003 new_entry = uvm_mapent_alloc(map, 0);
1006 uvm_map_entry_link(map, entry->prev, new_entry);
1008 uvm_map_check(map, "clip_start leave");
1017 * => map must be locked by caller
1021 uvm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, vaddr_t end)
1025 uvm_map_check(map, "clip_end entry");
1032 new_entry = uvm_mapent_alloc(map, 0);
1035 uvm_map_entry_link(map, entry, new_entry);
1037 uvm_map_check(map, "clip_end leave");
1044 * uvm_map: establish a valid mapping in a map
1050 * => map must be unlocked (we will lock it)
1065 * => XXXCDC: need way to map in external amap?
1069 uvm_map(struct vm_map *map, vaddr_t *startp /* IN/OUT */, vsize_t size,
1081 * for memory while we have the map locked.
1085 if (map == pager_map) {
1086 new_entry = uvm_mapent_alloc(map, (flags & UVM_FLAG_NOWAIT));
1090 if (map == pager_map)
1093 error = uvm_map_prepare(map, *startp, size, uobj, uoffset, align,
1096 error = uvm_map_enter(map, &args, new_entry);
1103 if (!error && VM_MAP_IS_KERNEL(map) && (flags & UVM_FLAG_NOWAIT) == 0) {
1104 uvm_km_check_empty(map, *startp, *startp + size);
1114 * called with map unlocked.
1115 * on success, returns the map locked.
1119 uvm_map_prepare(struct vm_map *map, vaddr_t start, vsize_t size,
1128 UVMHIST_CALLARGS(maphist, "(map=%#jx, start=%#jx, size=%jx, flags=%#jx)",
1129 (uintptr_t)map, start, size, flags);
1146 uvm_map_check(map, "map entry");
1162 if (vm_map_lock_try(map) == false) {
1166 vm_map_lock(map); /* could sleep here */
1177 if (uvm_map_lookup_entry(map, start, &prev_entry)) {
1181 UVM_MAP_CLIP_END(map, prev_entry, start);
1182 SAVE_HINT(map, map->hint, prev_entry);
1185 prev_entry = uvm_map_findspace(map, start, size, &start,
1191 timestamp = map->timestamp;
1194 map->flags |= VM_MAP_WANTVA;
1195 vm_map_unlock(map);
1203 mutex_enter(&map->misc_lock);
1204 while ((map->flags & VM_MAP_WANTVA) != 0 &&
1205 map->timestamp == timestamp) {
1207 mutex_exit(&map->misc_lock);
1212 cv_timedwait(&map->cv, &map->misc_lock, hz);
1215 mutex_exit(&map->misc_lock);
1221 * If the kernel pmap can't map the requested space,
1224 if (map == kernel_map && uvm_maxkaddr < (start + size))
1233 * either case we want to zero it before storing it in the map entry
1240 * now (based on the starting address of the map). this case is
1243 * offset is the distance we are from the start of the map.
1269 * called with map locked.
1270 * unlock the map before returning.
1274 uvm_map_enter(struct vm_map *map, const struct uvm_map_args *args,
1293 const int kmap = (vm_map_pmap(map) == pmap_kernel());
1299 UVMHIST_CALLARGS(maphist, "(map=%#jx, start=%#jx, size=%ju, flags=%#jx)",
1300 (uintptr_t)map, start, size, flags);
1304 KASSERT(map->hint == prev_entry); /* bimerge case assumes this */
1305 KASSERT(vm_map_locked_p(map));
1328 uvm_unmap_remove(map, start, start + size, &dead_entries, 0);
1333 rv = uvm_map_lookup_entry(map, start, &tmp_entry);
1339 SAVE_HINT(map, map->hint, prev_entry);
1343 * try and insert in map by extending previous entry, if possible.
1352 prev_entry != &map->header &&
1399 uvm_rb_fixup(map, prev_entry);
1401 uvm_map_check(map, "map backmerged");
1409 prev_entry->next != &map->header &&
1517 uvm_map_entry_unlink(map, dead);
1524 if (prev_entry != &map->header) {
1527 uvm_rb_fixup(map, prev_entry);
1533 uvm_map_check(map, "map forwardmerged");
1541 UVMHIST_LOG(maphist," allocating new map entry", 0, 0, 0, 0);
1553 new_entry = uvm_mapent_alloc(map,
1597 uvm_map_entry_link(map, prev_entry, new_entry);
1603 if ((map->first_free == prev_entry) &&
1605 map->first_free = new_entry;
1610 map->size += size;
1617 vm_map_unlock(map);
1635 * => map must at least be read-locked by caller.
1640 * => If address is below all entries in map, return false and set
1641 * *entry to &map->header.
1645 * not &map->header, address < (*entry)->next->start.
1649 uvm_map_lookup_entry_bytree(struct vm_map *map, vaddr_t address,
1652 struct vm_map_entry *prev = &map->header;
1653 struct vm_map_entry *cur = ROOT_ENTRY(map);
1655 KASSERT(rw_lock_held(&map->lock));
1658 KASSERT(prev == &map->header || prev->end <= address);
1659 KASSERT(prev == &map->header || prev->end <= cur->start);
1673 KASSERT(prev == &map->header || prev->end <= address);
1674 KASSERT(prev->next == &map->header || address < prev->next->start);
1680 * uvm_map_lookup_entry: find map entry at or before an address
1682 * => map must at least be read-locked by caller.
1687 * => If address is below all entries in map, return false and set
1688 * *entry to &map->header.
1692 * not &map->header, address < (*entry)->next->start.
1696 uvm_map_lookup_entry(struct vm_map *map, vaddr_t address,
1701 UVMHIST_CALLARGS(maphist,"(map=%#jx,addr=%#jx,ent=%#jx)",
1702 (uintptr_t)map, address, (uintptr_t)entry, 0);
1704 KASSERT(rw_lock_held(&map->lock));
1714 cur = map->hint;
1716 if (cur != &map->header &&
1725 uvm_map_check(map, __func__);
1732 if (__predict_true(uvm_map_lookup_entry_bytree(map, address, entry))) {
1733 SAVE_HINT(map, map->hint, *entry);
1742 SAVE_HINT(map, map->hint, *entry);
1744 KDASSERT((*entry) == &map->header || (*entry)->end <= address);
1745 KDASSERT((*entry)->next == &map->header ||
1824 * XXX Should this use vm_map_max(map) as the max?
1840 uvm_findspace_invariants(struct vm_map *map, vaddr_t orig_hint, vaddr_t length,
1844 const int topdown = map->flags & VM_MAP_TOPDOWN;
1850 "%s map=%p hint=%#" PRIxVADDR " %s orig_hint=%#" PRIxVADDR
1856 map, hint, topdown ? ">" : "<", orig_hint,
1866 * uvm_map_findspace: find "length" sized space in "map".
1873 * => caller must at least have read-locked map
1874 * => returns NULL on failure, or pointer to prev. map entry if success
1879 uvm_map_findspace(struct vm_map *map, vaddr_t hint, vsize_t length,
1884 uvm_findspace_invariants(map, orig_hint, length, uobj, uoffset, align,\
1889 const int topdown = map->flags & VM_MAP_TOPDOWN;
1892 UVMHIST_CALLARGS(maphist, "(map=%#jx, hint=%#jx, len=%ju, flags=%#jx...",
1893 (uintptr_t)map, hint, length, flags);
1901 uvm_map_check(map, "map_findspace entry");
1904 * Clamp the hint to the VM map's min/max address, and remmeber
1915 if (hint < vm_map_min(map)) { /* check ranges ... */
1917 UVMHIST_LOG(maphist,"<- VA below map range",0,0,0,0);
1920 hint = vm_map_min(map);
1922 if (hint > vm_map_max(map)) {
1924 hint, vm_map_min(map), vm_map_max(map), 0);
1931 hint, vm_map_min(map), vm_map_max(map), 0);
1943 hint, vm_map_min(map), vm_map_max(map), 0);
1971 hint == (topdown ? vm_map_max(map) : vm_map_min(map))) {
1982 * the VM map's minimum or maximum address, which
2000 entry = map->first_free;
2001 } else if (uvm_map_lookup_entry(map, hint, &entry)) {
2014 KASSERT(entry == &map->header || entry->end <= hint);
2015 KASSERT(entry->next == &map->header ||
2048 if (entry == &map->header)
2064 KASSERTMSG(entry->next->start >= vm_map_min(map),
2065 "map=%p entry=%p entry->next=%p"
2067 map, entry, entry->next,
2068 entry->next->start, vm_map_min(map));
2069 if (length > entry->next->start - vm_map_min(map))
2070 hint = vm_map_min(map); /* XXX goto wraparound? */
2073 KASSERT(hint >= vm_map_min(map));
2091 tmp = ROOT_ENTRY(map);
2238 * Look through the rest of the map, trying to fit a new region in
2264 if (entry == &map->header) {
2272 if (entry == &map->header) {
2281 SAVE_HINT(map, map->hint, entry);
2310 * => map must be locked by caller
2311 * => we return a list of map entries that we've remove from the map
2316 uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
2322 UVMHIST_CALLARGS(maphist,"(map=%#jx, start=%#jx, end=%#jx)",
2323 (uintptr_t)map, start, end, 0);
2324 VM_MAP_RANGE_CHECK(map, start, end);
2326 KASSERT(vm_map_locked_p(map));
2328 uvm_map_check(map, "unmap_remove entry");
2334 if (uvm_map_lookup_entry(map, start, &first_entry) == true) {
2337 UVM_MAP_CLIP_START(map, entry, start);
2339 SAVE_HINT(map, entry, entry->prev);
2348 if (map->first_free != &map->header && map->first_free->start >= start)
2349 map->first_free = entry->prev;
2353 * a number of map entries from the map and save them in a linked
2354 * list headed by "first_entry". once we remove them from the map
2355 * the caller should unlock the map and drop the references to the
2358 * [1] the map has to be locked for unmapping
2359 * [2] the map need not be locked for reference dropping
2370 * break up the area into map entry sized regions and unmap. note
2376 while ((entry != &map->header) && (entry->start < end)) {
2379 UVM_MAP_CLIP_END(map, entry, end);
2389 uvm_map_entry_unwire(map, entry);
2395 } else if ((map->flags & VM_MAP_PAGEABLE) == 0) {
2398 * if the map is non-pageable, any pages mapped there
2403 KASSERT(vm_map_pmap(map) == pmap_kernel());
2405 uvm_km_pgremove_intrsafe(map, entry->start, entry->end);
2409 __func__, map, entry);
2422 pmap_remove(map->pmap, entry->start, entry->end);
2425 * note: if map is dying, leave pmap_update() for
2426 * later. if the map is to be reused (exec) then
2427 * pmap_update() will be called. if the map is
2432 if ((map->flags & VM_MAP_DYING) == 0) {
2433 pmap_update(vm_map_pmap(map));
2435 KASSERT(vm_map_pmap(map) != pmap_kernel());
2450 if (pmap_extract(vm_map_pmap(map), va, NULL)) {
2456 if (VM_MAP_IS_KERNEL(map) && (flags & UVM_FLAG_NOWAIT) == 0) {
2457 uvm_km_check_empty(map, entry->start, entry->end);
2462 * remove entry from map and put it on our list of entries
2466 UVMHIST_LOG(maphist, " removed map entry %#jx",
2470 SAVE_HINT(map, entry, entry->prev);
2472 uvm_map_entry_unlink(map, entry);
2473 KASSERT(map->size >= len);
2474 map->size -= len;
2481 uvm_map_check(map, "unmap_remove leave");
2484 * now we've cleaned up the map and are ready for the caller to drop
2491 if (map->flags & VM_MAP_WANTVA) {
2492 mutex_enter(&map->misc_lock);
2493 map->flags &= ~VM_MAP_WANTVA;
2494 cv_broadcast(&map->cv);
2495 mutex_exit(&map->misc_lock);
2500 * uvm_unmap_detach: drop references in a chain of map entries
2502 * => we will free the map entries as we traverse the list.
2551 * => we reserve space in a map by putting a dummy map entry in the
2552 * map (dummy means obj=NULL, amap=NULL, prot=VM_PROT_NONE)
2553 * => map should be unlocked (we will write lock it)
2559 uvm_map_reserve(struct vm_map *map, vsize_t size,
2566 UVMHIST_CALLARGS(maphist, "(map=%#jx, size=%#jx, offset=%#jx, addr=%#jx)",
2567 (uintptr_t)map, size, offset, (uintptr_t)raddr);
2575 if (uvm_map(map, raddr, size, NULL, offset, align,
2590 * => caller must WRITE-LOCK the map
2598 uvm_map_replace(struct vm_map *map, vaddr_t start, vaddr_t end,
2604 uvm_map_check(map, "map_replace entry");
2607 * first find the blank map entry at the specified address
2610 if (!uvm_map_lookup_entry(map, start, &oldent)) {
2619 UVM_MAP_CLIP_END(map, oldent, end);
2668 * map entry is a valid blank! replace it. (this does all the
2669 * work of map entry link/unlink...).
2675 /* critical: flush stale hints out of map */
2676 SAVE_HINT(map, map->hint, newents);
2677 if (map->first_free == oldent)
2678 map->first_free = last;
2684 uvm_rb_remove(map, oldent);
2688 map->nentries = map->nentries + (nnewents - 1);
2697 uvm_rb_insert(map, tmp);
2703 clear_hints(map, oldent);
2704 uvm_map_entry_unlink(map, oldent);
2706 map->size -= end - start - nsize;
2708 uvm_map_check(map, "map_replace leave");
2719 * uvm_map_extract: extract a mapping from a map and put it somewhere
2734 * be used from within the kernel in a kernel level map <<<
2766 * step 1: reserve space in the target map for the extracted area
2784 * map entry chain, locking src map, and looking up the first useful
2785 * entry in the map.
2802 * the entry may map space "before" the starting
2812 * normal reference: we clip the map to fit (thus
2837 * step 3: now start looping through the map entries, extracting
2866 /* allocate a new map entry */
2873 /* set up new map entry */
2944 * step 5: attempt to lock the dest map so we can pmap_copy.
3033 * step 7: we are done with the source map, unlock. if copy_ok
3082 * uvm_map_submap: punch down part of a map into a submap
3086 * of a larger map
3088 * call [with uobj==NULL] to create a blank map entry in the main map.
3091 * => to remove a submap, use uvm_unmap() on the main map
3093 * => main map must be unlocked.
3099 uvm_map_submap(struct vm_map *map, vaddr_t start, vaddr_t end,
3105 vm_map_lock(map);
3106 VM_MAP_RANGE_CHECK(map, start, end);
3108 if (uvm_map_lookup_entry(map, start, &entry)) {
3109 UVM_MAP_CLIP_START(map, entry, start);
3110 UVM_MAP_CLIP_END(map, entry, end); /* to be safe */
3127 vm_map_unlock(map);
3133 * uvm_map_protect_user: change map protection on behalf of the user.
3151 * uvm_map_protect: change map protection
3154 * => map must be unlocked.
3161 uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
3167 UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx,new_prot=%#jx)",
3168 (uintptr_t)map, start, end, new_prot);
3170 vm_map_lock(map);
3171 VM_MAP_RANGE_CHECK(map, start, end);
3172 if (uvm_map_lookup_entry(map, start, &entry)) {
3173 UVM_MAP_CLIP_START(map, entry, start);
3183 while ((current != &map->header) && (current->start < end)) {
3215 while ((current != &map->header) && (current->start < end)) {
3218 UVM_MAP_CLIP_END(map, current, end);
3227 * update physical map if necessary. worry about copy-on-write
3238 pmap_protect(map->pmap, current->start, current->end,
3259 * If the map is configured to lock any future mappings,
3264 if ((map->flags & VM_MAP_WIREFUTURE) != 0 &&
3277 pmap_update(map->pmap);
3279 if (uvm_map_pageable(map, current->start,
3287 * the map, but will return the error
3302 pmap_update(map->pmap);
3305 vm_map_unlock(map);
3314 * uvm_map_inherit: set inheritance code for range of addrs in map.
3316 * => map must be unlocked
3322 uvm_map_inherit(struct vm_map *map, vaddr_t start, vaddr_t end,
3327 UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx,new_inh=%#jx)",
3328 (uintptr_t)map, start, end, new_inheritance);
3341 vm_map_lock(map);
3342 VM_MAP_RANGE_CHECK(map, start, end);
3343 if (uvm_map_lookup_entry(map, start, &temp_entry)) {
3345 UVM_MAP_CLIP_START(map, entry, start);
3349 while ((entry != &map->header) && (entry->start < end)) {
3350 UVM_MAP_CLIP_END(map, entry, end);
3354 vm_map_unlock(map);
3360 * uvm_map_advice: set advice code for range of addrs in map.
3362 * => map must be unlocked
3366 uvm_map_advice(struct vm_map *map, vaddr_t start, vaddr_t end, int new_advice)
3370 UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx,new_adv=%#jx)",
3371 (uintptr_t)map, start, end, new_advice);
3373 vm_map_lock(map);
3374 VM_MAP_RANGE_CHECK(map, start, end);
3375 if (uvm_map_lookup_entry(map, start, &temp_entry)) {
3377 UVM_MAP_CLIP_START(map, entry, start);
3386 while ((entry != &map->header) && (entry->start < end)) {
3387 UVM_MAP_CLIP_END(map, entry, end);
3397 vm_map_unlock(map);
3405 vm_map_unlock(map);
3415 uvm_map_willneed(struct vm_map *map, vaddr_t start, vaddr_t end)
3419 UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx)",
3420 (uintptr_t)map, start, end, 0);
3422 vm_map_lock_read(map);
3423 VM_MAP_RANGE_CHECK(map, start, end);
3424 if (!uvm_map_lookup_entry(map, start, &entry)) {
3431 KASSERT(entry != &map->header);
3457 vm_map_unlock_read(map);
3463 * uvm_map_pageable: sets the pageability of a range in a map.
3465 * => wires map entries. should not be used for transient page locking.
3469 * => map must never be read-locked
3470 * => if islocked is true, map is already write-locked
3471 * => we always unlock the map, since we must downgrade to a read-lock
3477 uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end,
3486 UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx,new_pageable=%ju)",
3487 (uintptr_t)map, start, end, new_pageable);
3488 KASSERT(map->flags & VM_MAP_PAGEABLE);
3491 vm_map_lock(map);
3492 VM_MAP_RANGE_CHECK(map, start, end);
3502 if (uvm_map_lookup_entry(map, start, &start_entry) == false) {
3504 vm_map_unlock(map);
3513 vm_map_unlock(map);
3524 UVM_MAP_CLIP_START(map, entry, start);
3531 while ((entry != &map->header) && (entry->start < end)) {
3534 (entry->next == &map->header ||
3537 vm_map_unlock(map);
3551 while ((entry != &map->header) && (entry->start < end)) {
3552 UVM_MAP_CLIP_END(map, entry, end);
3554 uvm_map_entry_unwire(map, entry);
3558 vm_map_unlock(map);
3567 * to be created. then we clip each map entry to the region to
3576 * us, then in turn block on the map lock that we hold). because
3580 * keep the read lock on the map, the copy-on-write status of the
3584 while ((entry != &map->header) && (entry->start < end)) {
3589 * write lock on the map: create an anonymous map
3590 * for a copy-on-write region, or an anonymous map
3599 amap_copy(map, entry, 0, start, end);
3604 UVM_MAP_CLIP_START(map, entry, start);
3605 UVM_MAP_CLIP_END(map, entry, end);
3614 (entry->next == &map->header ||
3622 while (entry != &map->header && entry->end > start) {
3627 vm_map_unlock(map);
3639 timestamp_save = map->timestamp;
3641 vm_map_busy(map);
3642 vm_map_unlock(map);
3646 while (entry != &map->header && entry->start < end) {
3648 rv = uvm_fault_wire(map, entry->start, entry->end,
3654 * we'll clean up the map below, once we
3670 vm_map_lock(map);
3671 vm_map_unbusy(map);
3674 if (timestamp_save + 1 != map->timestamp)
3675 panic("uvm_map_pageable: stale map");
3684 while (entry != &map->header && entry->start < end) {
3698 uvm_map_entry_unwire(map, entry);
3702 vm_map_unlock(map);
3708 vm_map_unbusy(map);
3715 vm_map_lock(map);
3716 vm_map_unbusy(map);
3727 * => map must not be locked.
3733 uvm_map_pageable_all(struct vm_map *map, int flags, vsize_t limit)
3742 UVMHIST_CALLARGS(maphist,"(map=%#jx,flags=%#jx)", (uintptr_t)map, flags,
3745 KASSERT(map->flags & VM_MAP_PAGEABLE);
3747 vm_map_lock(map);
3760 for (entry = map->header.next; entry != &map->header;
3763 uvm_map_entry_unwire(map, entry);
3765 map->flags &= ~VM_MAP_WIREFUTURE;
3766 vm_map_unlock(map);
3777 map->flags |= VM_MAP_WIREFUTURE;
3787 vm_map_unlock(map);
3806 * us, then in turn block on the map lock that we hold). because
3810 * keep the read lock on the map, the copy-on-write status of the
3814 for (size = 0, entry = map->header.next; entry != &map->header;
3823 vm_map_unlock(map);
3828 (size + ptoa(pmap_wired_count(vm_map_pmap(map))) > limit)) {
3829 vm_map_unlock(map);
3837 for (entry = map->header.next; entry != &map->header;
3845 * write lock on the map: create an anonymous map
3846 * for a copy-on-write region, or an anonymous map
3855 amap_copy(map, entry, 0, entry->start,
3869 timestamp_save = map->timestamp;
3871 vm_map_busy(map);
3872 vm_map_unlock(map);
3875 for (entry = map->header.next; entry != &map->header;
3878 rv = uvm_fault_wire(map, entry->start, entry->end,
3884 * we'll clean up the map below, once we
3899 vm_map_lock(map);
3900 vm_map_unbusy(map);
3903 if (timestamp_save + 1 != map->timestamp)
3904 panic("uvm_map_pageable_all: stale map");
3915 for (/* nothing */; entry != &map->header;
3929 for (entry = map->header.next; entry != failed_entry;
3935 uvm_map_entry_unwire(map, entry);
3937 vm_map_unlock(map);
3942 vm_map_unbusy(map);
3949 * uvm_map_clean: clean out a map range
3960 * => caller must not have map locked
3964 uvm_map_clean(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
3976 UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx,flags=%#jx)",
3977 (uintptr_t)map, start, end, flags);
3982 vm_map_lock(map);
3983 VM_MAP_RANGE_CHECK(map, start, end);
3984 if (!uvm_map_lookup_entry(map, start, &entry)) {
3985 vm_map_unlock(map);
3995 vm_map_unlock(map);
3999 vm_map_unlock(map);
4006 vm_map_unlock(map);
4011 vm_map_busy(map);
4012 vm_map_unlock(map);
4118 vm_map_unbusy(map);
4124 * uvm_map_checkprot: check protection in map
4127 * => map must be read or write locked by caller.
4131 uvm_map_checkprot(struct vm_map *map, vaddr_t start, vaddr_t end,
4137 if (!uvm_map_lookup_entry(map, start, &tmp_entry)) {
4142 if (entry == &map->header) {
4291 struct vm_map *map;
4299 map = &ovm->vm_map;
4325 map->flags &= ~VM_MAP_WIREFUTURE;
4340 flags = pmap_remove_all(map->pmap) ? UVM_FLAG_VAONLY : 0;
4341 map->flags |= VM_MAP_DYING;
4342 uvm_unmap1(map, vm_map_min(map), vm_map_max(map), flags);
4343 map->flags &= ~VM_MAP_DYING;
4344 pmap_update(map->pmap);
4345 KASSERT(map->header.prev == &map->header);
4346 KASSERT(map->nentries == 0);
4349 * resize the map
4352 vm_map_setmin(map, start);
4353 vm_map_setmax(map, end);
4399 struct vm_map *map = &vm->vm_map;
4412 * at this point, there should be no other references to the map.
4416 map->flags |= VM_MAP_DYING;
4417 flags = pmap_remove_all(map->pmap) ? UVM_FLAG_VAONLY : 0;
4423 if (map->nentries) {
4424 vm_map_lock(map);
4425 uvm_unmap_remove(map, vm_map_min(map), vm_map_max(map),
4427 vm_map_unlock(map);
4431 KASSERT(map->nentries == 0);
4432 KASSERT(map->size == 0);
4434 mutex_destroy(&map->misc_lock);
4435 rw_destroy(&map->lock);
4436 cv_destroy(&map->cv);
4437 pmap_destroy(map->pmap);
4455 * gain reference to object backing the map (can't
4643 * uvmspace_fork: fork a process' main map
4646 * => parent's map must not be locked.
4727 * => called with map locked.
4732 uvm_mapent_trymerge(struct vm_map *map, struct vm_map_entry *entry, int flags)
4755 if (next != &map->header &&
4781 clear_hints(map, next);
4782 uvm_map_entry_unlink(map, next);
4787 uvm_map_check(map, "trymerge forwardmerge");
4794 if (prev != &map->header &&
4822 clear_hints(map, prev);
4823 uvm_map_entry_unlink(map, prev);
4828 uvm_map_check(map, "trymerge backmerge");
4838 * uvm_map_setup: init map
4840 * => map must not be in service yet.
4844 uvm_map_setup(struct vm_map *map, vaddr_t vmin, vaddr_t vmax, int flags)
4847 rb_tree_init(&map->rb_tree, &uvm_map_tree_ops);
4848 map->header.next = map->header.prev = &map->header;
4849 map->nentries = 0;
4850 map->size = 0;
4851 map->ref_count = 1;
4852 vm_map_setmin(map, vmin);
4853 vm_map_setmax(map, vmax);
4854 map->flags = flags;
4855 map->first_free = &map->header;
4856 map->hint = &map->header;
4857 map->timestamp = 0;
4858 map->busy = NULL;
4860 rw_init(&map->lock);
4861 cv_init(&map->cv, "vm_map");
4862 mutex_init(&map->misc_lock, MUTEX_DRIVER, IPL_NONE);
4873 * => map must be unlocked (we will lock it)
4878 uvm_unmap1(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
4882 UVMHIST_CALLARGS(maphist, " (map=%#jx, start=%#jx, end=%#jx)",
4883 (uintptr_t)map, start, end, 0);
4886 "%s: map %p: start %#jx < end %#jx", __func__, map,
4888 if (map == kernel_map) {
4896 vm_map_lock(map);
4897 uvm_unmap_remove(map, start, end, &dead_entries, flags);
4898 vm_map_unlock(map);
4908 * uvm_map_reference: add reference to a map
4910 * => map need not be locked
4914 uvm_map_reference(struct vm_map *map)
4917 atomic_inc_uint(&map->ref_count);
4978 uvm_voaddr_acquire(struct vm_map * const map, vaddr_t const va,
4988 UVMHIST_LOG(maphist,"(map=%#jx,va=%#jx)", (uintptr_t)map, va, 0, 0);
4995 vm_map_lock(map);
4998 vm_map_lock_read(map);
5002 if (__predict_false(!uvm_map_lookup_entry(map, start, &entry))) {
5003 unlock_fn(map);
5009 unlock_fn(map);
5016 * needed" whereby we have taken a read lock on the map and if
5025 * to be created. however, we do NOT need to clip the map entries
5038 * write lock on the map: create an anonymous map for
5039 * a copy-on-write region, or an anonymous map for
5043 unlock_fn(map);
5053 vm_map_unlock_read(map);
5058 amap_copy(map, entry, 0, start, end);
5087 vm_map_busy(map);
5088 vm_map_unlock(map);
5092 if (uvm_fault_wire(map, start, end,
5095 unlock_fn(map);
5106 vm_map_lock(map);
5107 vm_map_unbusy(map);
5111 uvm_fault_unwire_locked(map, start, end);
5142 unlock_fn(map);
5236 * uvm_map_printit: actually prints the map
5240 uvm_map_printit(struct vm_map *map, bool full,
5245 (*pr)("MAP %p: [%#lx->%#lx]\n", map, vm_map_min(map),
5246 vm_map_max(map));
5248 map->nentries, map->size, map->ref_count, map->timestamp,
5249 map->flags);
5250 (*pr)("\tpmap=%p(resident=%ld, wired=%ld)\n", map->pmap,
5251 pmap_resident_count(map->pmap), pmap_wired_count(map->pmap));
5254 for (entry = map->header.next; entry != &map->header;
5268 entry == map->first_free ? " (first_free)" : "");
5275 struct vm_map *map;
5277 for (map = kernel_map;;) {
5280 if (!uvm_map_lookup_entry_bytree(map, (vaddr_t)addr, &entry)) {
5285 (size_t)(addr - (uintptr_t)entry->start), map);
5289 map = entry->object.sub_map;
5360 struct vm_map *map = e->object.sub_map;
5361 KASSERT(map != NULL);
5362 kve->kve_ref_count = map->ref_count;
5363 kve->kve_count = map->nentries;
5400 struct vm_map *map;
5427 map = &vm->vm_map;
5428 vm_map_lock_read(map);
5433 for (entry = map->header.next; entry != &map->header;
5436 error = fill_vmentry(l, p, &vme[count], map, entry);
5443 vm_map_unlock_read(map);