Lines Matching +defs:prev +defs:entry

196  * UVM_ET_ISCOMPATIBLE: check some requirements for map entry merging
212 * uvm_map_entry_link: insert entry into a map
216 #define uvm_map_entry_link(map, after_where, entry) do { \
217 uvm_mapent_check(entry); \
219 (entry)->prev = (after_where); \
220 (entry)->next = (after_where)->next; \
221 (entry)->prev->next = (entry); \
222 (entry)->next->prev = (entry); \
223 uvm_rb_insert((map), (entry)); \
227 * uvm_map_entry_unlink: remove entry from a map
231 #define uvm_map_entry_unlink(map, entry) do { \
232 KASSERT((entry) != (map)->first_free); \
233 KASSERT((entry) != (map)->hint); \
234 uvm_mapent_check(entry); \
236 (entry)->next->prev = (entry)->prev; \
237 (entry)->prev->next = (entry)->next; \
238 uvm_rb_remove((map), (entry)); \
242 * SAVE_HINT: saves the specified entry as the hint for future lookups.
252 * clear_hints: ensure that hints don't point to the entry.
260 SAVE_HINT(map, ent, ent->prev);
262 map->first_free = ent->prev;
309 * child entry in question exists:
311 * LEFT_ENTRY(entry)->end <= entry->start
312 * entry->end <= RIGHT_ENTRY(entry)->start
317 #define LEFT_ENTRY(entry) \
318 ((struct vm_map_entry *)(entry)->rb_node.rb_left)
319 #define RIGHT_ENTRY(entry) \
320 ((struct vm_map_entry *)(entry)->rb_node.rb_right)
321 #define PARENT_ENTRY(map, entry) \
322 (ROOT_ENTRY(map) == (entry) \
323 ? NULL : (struct vm_map_entry *)RB_FATHER(&(entry)->rb_node))
371 * uvm_rb_gap: return the gap size between our entry and next entry.
374 uvm_rb_gap(const struct vm_map_entry *entry)
377 KASSERT(entry->next != NULL);
378 return entry->next->start - entry->end;
382 uvm_rb_maxgap(const struct vm_map_entry *entry)
385 vsize_t maxgap = entry->gap;
394 if ((child = LEFT_ENTRY(entry)) != NULL && maxgap < child->maxgap)
397 if ((child = RIGHT_ENTRY(entry)) != NULL && maxgap < child->maxgap)
404 uvm_rb_fixup(struct vm_map *map, struct vm_map_entry *entry)
408 KASSERT(entry->gap == uvm_rb_gap(entry));
409 entry->maxgap = uvm_rb_maxgap(entry);
411 while ((parent = PARENT_ENTRY(map, entry)) != NULL) {
417 if (maxgap < entry->maxgap)
418 maxgap = entry->maxgap;
420 * Since we work towards the root, we know entry's maxgap
424 which = RB_POSITION(&entry->rb_node) ^ RB_DIR_OTHER;
434 entry = parent;
439 uvm_rb_insert(struct vm_map *map, struct vm_map_entry *entry)
443 entry->gap = entry->maxgap = uvm_rb_gap(entry);
444 if (entry->prev != &map->header)
445 entry->prev->gap = uvm_rb_gap(entry->prev);
447 ret = rb_tree_insert_node(&map->rb_tree, entry);
448 KASSERTMSG(ret == entry,
449 "uvm_rb_insert: map %p: duplicate entry %p", map, ret);
452 * If the previous entry is not our immediate left child, then it's an
454 * have to check entry->prev against &map->header since &map->header
458 LEFT_ENTRY(entry) == entry->prev ? entry->prev : entry);
462 uvm_rb_remove(struct vm_map *map, struct vm_map_entry *entry)
472 if (entry->prev != &map->header)
473 prev_parent = PARENT_ENTRY(map, entry->prev);
474 if (entry->next != &map->header)
475 next_parent = PARENT_ENTRY(map, entry->next);
477 rb_tree_remove_node(&map->rb_tree, entry);
483 if (entry->prev != &map->header) {
485 * Update the previous entry's gap due to our absence.
487 entry->prev->gap = uvm_rb_gap(entry->prev);
488 uvm_rb_fixup(map, entry->prev);
490 && prev_parent != entry
491 && prev_parent != PARENT_ENTRY(map, entry->prev))
499 if (entry->next != &map->header) {
500 uvm_rb_fixup(map, entry->next);
502 && next_parent != entry
503 && next_parent != PARENT_ENTRY(map, entry->next))
618 if (tmp->prev != trtmp) {
619 printf("lookup: %d: %p->prev=%p: %p\n",
620 i, tmp, tmp->prev, trtmp);
783 * uvm_mapent_alloc: allocate a map entry
799 UVMHIST_LOG(maphist, "<- new entry=%#jx [kentry=%jd]", (uintptr_t)me,
805 * uvm_mapent_free: free map entry
812 UVMHIST_CALLARGS(maphist,"<- freeing map entry=%#jx [flags=%#jx]",
818 * uvm_mapent_copy: copy a map entry, preserving flags
831 _uvm_mapent_check(const struct vm_map_entry *entry, int line)
834 if (entry->start >= entry->end) {
837 if (UVM_ET_ISOBJ(entry)) {
838 if (entry->object.uvm_obj == NULL) {
841 } else if (UVM_ET_ISSUBMAP(entry)) {
842 if (entry->object.sub_map == NULL) {
846 if (entry->object.uvm_obj != NULL ||
847 entry->object.sub_map != NULL) {
851 if (!UVM_ET_ISOBJ(entry)) {
852 if (entry->offset != 0) {
860 panic("%s: bad entry %p, line %d", __func__, entry, line);
865 * uvm_map_entry_unwire: unwire a map entry
871 uvm_map_entry_unwire(struct vm_map *map, struct vm_map_entry *entry)
874 entry->wired_count = 0;
875 uvm_fault_unwire_locked(map, entry->start, entry->end);
883 uvm_map_reference_amap(struct vm_map_entry *entry, int flags)
886 amap_ref(entry->aref.ar_amap, entry->aref.ar_pageoff,
887 (entry->end - entry->start) >> PAGE_SHIFT, flags);
895 uvm_map_unreference_amap(struct vm_map_entry *entry, int flags)
898 amap_unref(entry->aref.ar_amap, entry->aref.ar_pageoff,
899 (entry->end - entry->start) >> PAGE_SHIFT, flags);
921 * initialize the global lock for kernel map entry.
979 * uvm_map_clip_start: ensure that the entry begins at or after
980 * the starting address, if it doesn't we split the entry.
988 uvm_map_clip_start(struct vm_map *map, struct vm_map_entry *entry,
993 /* uvm_map_simplify_entry(map, entry); */ /* XXX */
995 uvm_map_check(map, "clip_start entry");
996 uvm_mapent_check(entry);
1000 * entry BEFORE this one, so that this entry has the specified
1004 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
1005 uvm_mapent_splitadj(new_entry, entry, start);
1006 uvm_map_entry_link(map, entry->prev, new_entry);
1012 * uvm_map_clip_end: ensure that the entry ends at or before
1021 uvm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, vaddr_t end)
1025 uvm_map_check(map, "clip_end entry");
1026 uvm_mapent_check(entry);
1029 * Create a new entry and insert it
1030 * AFTER the specified entry
1033 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
1034 uvm_mapent_splitadj(entry, new_entry, end);
1035 uvm_map_entry_link(map, entry, new_entry);
1080 * for pager_map, allocate the new entry first to avoid sleeping
1146 uvm_map_check(map, "map entry");
1179 prev_entry = prev_entry->prev;
1233 * either case we want to zero it before storing it in the map entry
1322 * entry cannot fail because that can only happen if UVM_FLAG_NOWAIT
1343 * try and insert in map by extending previous entry, if possible.
1344 * XXX: we don't try and pull back the next entry. might be useful
1422 * merged with the previous entry which has an amap,
1423 * and the next entry also has an amap, we give up.
1433 * backed by the same amap (ie, arefs is 2, "prev" and
1447 * Try to extend the amap of the previous entry to
1448 * cover the next entry as well. If it doesn't work
1461 * Try to extend the amap of the *next* entry
1463 * previous entry as well (the previous merge
1479 * Pull the next entry's amap backwards to cover this
1541 UVMHIST_LOG(maphist," allocating new map entry", 0, 0, 0, 0);
1549 * allocate new entry and link it in.
1633 * uvm_map_lookup_entry_bytree: lookup an entry in tree
1637 * => If address lies in an entry, set *entry to it and return true;
1638 * then (*entry)->start <= address < (*entry)->end.
1641 * *entry to &map->header.
1643 * => Otherwise, return false and set *entry to the highest entry below
1644 * address, so (*entry)->end <= address, and if (*entry)->next is
1645 * not &map->header, address < (*entry)->next->start.
1650 struct vm_map_entry **entry /* OUT */)
1652 struct vm_map_entry *prev = &map->header;
1658 KASSERT(prev == &map->header || prev->end <= address);
1659 KASSERT(prev == &map->header || prev->end <= cur->start);
1663 *entry = cur;
1666 prev = cur;
1667 KASSERT(prev->end <= address);
1669 KASSERT(cur == NULL || prev->end <= cur->start);
1673 KASSERT(prev == &map->header || prev->end <= address);
1674 KASSERT(prev->next == &map->header || address < prev->next->start);
1675 *entry = prev;
1680 * uvm_map_lookup_entry: find map entry at or before an address
1684 * => If address lies in an entry, set *entry to it and return true;
1685 * then (*entry)->start <= address < (*entry)->end.
1688 * *entry to &map->header.
1690 * => Otherwise, return false and set *entry to the highest entry below
1691 * address, so (*entry)->end <= address, and if (*entry)->next is
1692 * not &map->header, address < (*entry)->next->start.
1697 struct vm_map_entry **entry /* OUT */)
1702 (uintptr_t)map, address, (uintptr_t)entry, 0);
1708 * the entry we want (which is usually the case). note also
1719 *entry = cur;
1722 uvm_mapent_check(*entry);
1732 if (__predict_true(uvm_map_lookup_entry_bytree(map, address, entry))) {
1733 SAVE_HINT(map, map->hint, *entry);
1736 KDASSERT((*entry)->start <= address);
1737 KDASSERT(address < (*entry)->end);
1738 uvm_mapent_check(*entry);
1742 SAVE_HINT(map, map->hint, *entry);
1744 KDASSERT((*entry) == &map->header || (*entry)->end <= address);
1745 KDASSERT((*entry)->next == &map->header ||
1746 address < (*entry)->next->start);
1752 * entry->next->start and entry->end. Returns 1 if fits, 0 if doesn't
1757 vsize_t align, int flags, int topdown, struct vm_map_entry *entry)
1768 " flags=%x entry@%p=[%"PRIxVADDR",%"PRIxVADDR")" \
1772 flags, entry, entry->start, entry->end, \
1822 * proposed new region fits before the next entry, we win.
1831 if (entry->next->start >= end && *start >= entry->end)
1842 vaddr_t hint, struct vm_map_entry *entry, int line)
1852 " flags=%#x entry@%p=[%" PRIxVADDR ",%" PRIxVADDR ")"
1853 " entry->next@%p=[%" PRIxVADDR ",%" PRIxVADDR ")"
1858 flags, entry, entry ? entry->start : 0, entry ? entry->end : 0,
1859 entry ? entry->next : NULL,
1860 entry && entry->next ? entry->next->start : 0,
1861 entry && entry->next ? entry->next->end : 0,
1874 * => returns NULL on failure, or pointer to prev. map entry if success
1885 flags, hint, entry, __LINE__)
1886 struct vm_map_entry *entry = NULL;
1887 struct vm_map_entry *child, *prev, *tmp;
1901 uvm_map_check(map, "map_findspace entry");
1954 * 2: found, not fixed, bottom up -> start after entry->end,
1956 * 3: found, not fixed, top down -> start before entry->start,
1958 * 4: not found, fixed, bottom up -> check entry->next->start, fail
1959 * 5: not found, fixed, top down -> check entry->next->start, fail
1960 * 6: not found, not fixed, bottom up -> check entry->next->start,
1962 * 7: not found, not fixed, top down -> check entry->next->start,
2000 entry = map->first_free;
2001 } else if (uvm_map_lookup_entry(map, hint, &entry)) {
2002 KASSERT(entry->start <= hint);
2003 KASSERT(hint < entry->end);
2012 entry = entry->prev;
2014 KASSERT(entry == &map->header || entry->end <= hint);
2015 KASSERT(entry->next == &map->header ||
2016 hint < entry->next->start);
2018 if (entry->next->start >= hint &&
2019 length <= entry->next->start - hint)
2031 uoffset, align, flags, topdown, entry);
2043 * if hint > entry->end.
2047 entry = entry->next;
2048 if (entry == &map->header)
2062 /* Check slot before any entry */
2064 KASSERTMSG(entry->next->start >= vm_map_min(map),
2065 "map=%p entry=%p entry->next=%p"
2066 " entry->next->start=0x%"PRIxVADDR" min=0x%"PRIxVADDR,
2067 map, entry, entry->next,
2068 entry->next->start, vm_map_min(map));
2069 if (length > entry->next->start - vm_map_min(map))
2072 hint = MIN(orig_hint, entry->next->start - length);
2075 hint = entry->end;
2079 topdown, entry);
2095 prev = NULL; /* previous candidate */
2097 /* Find an entry close to hint that has enough space */
2102 (prev == NULL || tmp->end > prev->end)) {
2104 prev = tmp;
2107 prev = tmp;
2111 (prev == NULL || tmp->end < prev->end)) {
2113 prev = tmp;
2116 prev = tmp;
2138 * Check if the entry that we found satifies the
2154 entry = tmp;
2162 if (prev == NULL)
2166 KASSERT(orig_hint >= prev->next->start - length ||
2167 prev->next->start - length > prev->next->start);
2168 hint = prev->next->start - length;
2170 KASSERT(orig_hint <= prev->end);
2171 hint = prev->end;
2175 flags, topdown, prev);
2179 entry = prev;
2184 if (prev->gap >= length)
2188 tmp = LEFT_ENTRY(prev);
2190 tmp = RIGHT_ENTRY(prev);
2226 entry = tmp;
2233 * The tree fails to find an entry because of offset or alignment
2240 * note: entry->end = base VA of current gap,
2241 * entry->next->start = VA of end of current gap
2247 hint = topdown ? MIN(orig_hint, entry->next->start - length)
2248 : entry->end;
2253 flags, topdown, entry);
2264 if (entry == &map->header) {
2269 entry = entry->prev;
2271 entry = entry->next;
2272 if (entry == &map->header) {
2281 SAVE_HINT(map, map->hint, entry);
2285 KASSERT(entry->end <= hint);
2286 KASSERT(hint <= entry->next->start);
2287 KASSERT(length <= entry->next->start - hint);
2288 return (entry);
2319 struct vm_map_entry *entry, *first_entry, *next;
2328 uvm_map_check(map, "unmap_remove entry");
2331 * find first entry
2336 entry = first_entry;
2337 UVM_MAP_CLIP_START(map, entry, start);
2339 SAVE_HINT(map, entry, entry->prev);
2341 entry = first_entry->next;
2349 map->first_free = entry->prev;
2370 * break up the area into map entry sized regions and unmap. note
2376 while ((entry != &map->header) && (entry->start < end)) {
2377 KASSERT((entry->flags & UVM_MAP_STATIC) == 0);
2379 UVM_MAP_CLIP_END(map, entry, end);
2380 next = entry->next;
2381 len = entry->end - entry->start;
2388 if (VM_MAPENT_ISWIRED(entry)) {
2389 uvm_map_entry_unwire(map, entry);
2405 uvm_km_pgremove_intrsafe(map, entry->start, entry->end);
2406 } else if (UVM_ET_ISOBJ(entry) &&
2407 UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) {
2409 __func__, map, entry);
2410 } else if (UVM_ET_ISOBJ(entry) || entry->aref.ar_amap) {
2418 uvm_map_lock_entry(entry, RW_WRITER);
2420 uvm_map_lock_entry(entry, RW_READER);
2422 pmap_remove(map->pmap, entry->start, entry->end);
2438 uvm_map_unlock_entry(entry);
2448 for (va = entry->start; va < entry->end;
2457 uvm_km_check_empty(map, entry->start, entry->end);
2462 * remove entry from map and put it on our list of entries
2463 * that we've nuked. then go to next entry.
2466 UVMHIST_LOG(maphist, " removed map entry %#jx",
2467 (uintptr_t)entry, 0, 0, 0);
2470 SAVE_HINT(map, entry, entry->prev);
2472 uvm_map_entry_unlink(map, entry);
2475 entry->prev = NULL;
2476 entry->next = first_entry;
2477 first_entry = entry;
2478 entry = next;
2551 * => we reserve space in a map by putting a dummy map entry in the
2593 * we expect newents->prev to point to the last entry on the list
2604 uvm_map_check(map, "map_replace entry");
2607 * first find the blank map entry at the specified address
2615 * check to make sure we have a proper blank entry
2652 if (tmpent->next->prev != tmpent)
2655 if (newents->prev != tmpent)
2668 * map entry is a valid blank! replace it. (this does all the
2669 * work of map entry link/unlink...).
2673 last = newents->prev;
2681 last->next->prev = last;
2686 newents->prev = oldent->prev;
2687 newents->prev->next = newents;
2711 * now we can free the old blank entry and return.
2742 struct vm_map_entry *chain, *endchain, *entry, *orig_entry, *newentry,
2744 struct vm_map_entry *resentry = NULL; /* a dummy reservation entry */
2784 * map entry chain, locking src map, and looking up the first useful
2785 * entry in the map.
2795 if (uvm_map_lookup_entry(srcmap, start, &entry)) {
2797 /* "start" is within an entry */
2801 * for quick references we don't clip the entry, so
2802 * the entry may map space "before" the starting
2808 fudge = start - entry->start;
2816 UVM_MAP_CLIP_START(srcmap, entry, start);
2817 SAVE_HINT(srcmap, srcmap->hint, entry->prev);
2822 /* "start" is not within an entry ... skip to next entry */
2828 entry = entry->next;
2833 orig_entry = entry;
2841 while (entry->start < end && entry != &srcmap->header) {
2845 UVM_MAP_CLIP_END(srcmap, entry, end);
2848 if (UVM_ET_ISNEEDSCOPY(entry)) {
2849 amap_copy(srcmap, entry,
2851 if (UVM_ET_ISNEEDSCOPY(entry)) { /* failed? */
2858 fudge = start - entry->start;
2864 oldoffset = (entry->start + fudge) - start;
2866 /* allocate a new map entry */
2873 /* set up new map entry */
2875 newentry->prev = endchain;
2878 newentry->start + (entry->end - (entry->start + fudge));
2881 newentry->object.uvm_obj = entry->object.uvm_obj;
2886 newentry->offset = entry->offset + fudge;
2890 newentry->etype = entry->etype;
2896 entry->max_protection : entry->protection;
2897 newentry->max_protection = entry->max_protection;
2899 newentry->inheritance = entry->inheritance;
2901 newentry->aref.ar_amap = entry->aref.ar_amap;
2904 entry->aref.ar_pageoff + (fudge >> PAGE_SHIFT);
2910 newentry->advice = entry->advice;
2926 if ((flags & UVM_EXTRACT_CONTIG) && entry->end < end &&
2927 (entry->next == &srcmap->header ||
2928 entry->next->start != entry->end)) {
2932 entry = entry->next;
2941 chain->prev = endchain;
2975 SAVE_HINT(srcmap, srcmap->hint, orig_entry->prev);
2978 srcmap->first_free = orig_entry->prev;
2981 entry = orig_entry;
2985 while (entry->start < end && entry != &srcmap->header) {
2987 oldoffset = (entry->start + fudge) - start;
2988 elen = MIN(end, entry->end) -
2989 (entry->start + fudge);
2992 entry->start + fudge);
2995 /* we advance "entry" in the following if statement */
2998 uvm_map_lock_entry(entry, RW_WRITER);
3000 uvm_map_lock_entry(entry, RW_READER);
3002 pmap_remove(srcmap->pmap, entry->start,
3003 entry->end);
3004 uvm_map_unlock_entry(entry);
3005 oldentry = entry; /* save entry */
3006 entry = entry->next; /* advance */
3012 entry = entry->next; /* advance */
3088 * call [with uobj==NULL] to create a blank map entry in the main map.
3102 struct vm_map_entry *entry;
3108 if (uvm_map_lookup_entry(map, start, &entry)) {
3109 UVM_MAP_CLIP_START(map, entry, start);
3110 UVM_MAP_CLIP_END(map, entry, end); /* to be safe */
3112 entry = NULL;
3115 if (entry != NULL &&
3116 entry->start == start && entry->end == end &&
3117 entry->object.uvm_obj == NULL && entry->aref.ar_amap == NULL &&
3118 !UVM_ET_ISCOPYONWRITE(entry) && !UVM_ET_ISNEEDSCOPY(entry)) {
3119 entry->etype |= UVM_ET_SUBMAP;
3120 entry->object.sub_map = submap;
3121 entry->offset = 0;
3157 #define MASK(entry) (UVM_ET_ISCOPYONWRITE(entry) ? \
3164 struct vm_map_entry *current, *entry;
3172 if (uvm_map_lookup_entry(map, start, &entry)) {
3173 UVM_MAP_CLIP_START(map, entry, start);
3175 entry = entry->next;
3182 current = entry;
3214 current = entry;
3243 * If this entry points at a vnode, and the
3260 * wire this entry now if the old protection was VM_PROT_NONE
3284 * If locking the entry fails, remember the
3325 struct vm_map_entry *entry, *temp_entry;
3344 entry = temp_entry;
3345 UVM_MAP_CLIP_START(map, entry, start);
3347 entry = temp_entry->next;
3349 while ((entry != &map->header) && (entry->start < end)) {
3350 UVM_MAP_CLIP_END(map, entry, end);
3351 entry->inheritance = new_inheritance;
3352 entry = entry->next;
3368 struct vm_map_entry *entry, *temp_entry;
3376 entry = temp_entry;
3377 UVM_MAP_CLIP_START(map, entry, start);
3379 entry = temp_entry->next;
3386 while ((entry != &map->header) && (entry->start < end)) {
3387 UVM_MAP_CLIP_END(map, entry, end);
3401 entry->advice = new_advice;
3402 entry = entry->next;
3417 struct vm_map_entry *entry;
3424 if (!uvm_map_lookup_entry(map, start, &entry)) {
3425 entry = entry->next;
3427 while (entry->start < end) {
3428 struct vm_amap * const amap = entry->aref.ar_amap;
3429 struct uvm_object * const uobj = entry->object.uvm_obj;
3431 KASSERT(entry != &map->header);
3432 KASSERT(start < entry->end);
3441 if (UVM_ET_ISOBJ(entry) && amap == NULL && uobj != NULL) {
3445 offset = entry->offset;
3446 if (start < entry->start) {
3447 offset += entry->start - start;
3449 size = entry->offset + (entry->end - entry->start);
3450 if (entry->end < end) {
3451 size -= end - entry->end;
3455 entry = entry->next;
3480 struct vm_map_entry *entry, *start_entry, *failed_entry;
3509 entry = start_entry;
3524 UVM_MAP_CLIP_START(map, entry, start);
3531 while ((entry != &map->header) && (entry->start < end)) {
3532 if (entry->wired_count == 0 ||
3533 (entry->end < end &&
3534 (entry->next == &map->header ||
3535 entry->next->start > entry->end))) {
3541 entry = entry->next;
3550 entry = start_entry;
3551 while ((entry != &map->header) && (entry->start < end)) {
3552 UVM_MAP_CLIP_END(map, entry, end);
3553 if (VM_MAPENT_ISWIRED(entry))
3554 uvm_map_entry_unwire(map, entry);
3555 entry = entry->next;
3567 * to be created. then we clip each map entry to the region to
3584 while ((entry != &map->header) && (entry->start < end)) {
3585 if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3595 if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
3596 if (UVM_ET_ISNEEDSCOPY(entry) &&
3597 ((entry->max_protection & VM_PROT_WRITE) ||
3598 (entry->object.uvm_obj == NULL))) {
3599 amap_copy(map, entry, 0, start, end);
3604 UVM_MAP_CLIP_START(map, entry, start);
3605 UVM_MAP_CLIP_END(map, entry, end);
3606 entry->wired_count++;
3612 if (entry->protection == VM_PROT_NONE ||
3613 (entry->end < end &&
3614 (entry->next == &map->header ||
3615 entry->next->start > entry->end))) {
3622 while (entry != &map->header && entry->end > start) {
3623 entry->wired_count--;
3624 entry = entry->prev;
3631 entry = entry->next;
3645 entry = start_entry;
3646 while (entry != &map->header && entry->start < end) {
3647 if (entry->wired_count == 1) {
3648 rv = uvm_fault_wire(map, entry->start, entry->end,
3649 entry->max_protection, 1);
3661 entry = entry->next;
3683 failed_entry = entry;
3684 while (entry != &map->header && entry->start < end) {
3685 entry->wired_count--;
3686 entry = entry->next;
3694 entry = start_entry;
3695 while (entry != failed_entry) {
3696 entry->wired_count--;
3697 if (VM_MAPENT_ISWIRED(entry) == 0)
3698 uvm_map_entry_unwire(map, entry);
3699 entry = entry->next;
3735 struct vm_map_entry *entry, *failed_entry;
3760 for (entry = map->header.next; entry != &map->header;
3761 entry = entry->next) {
3762 if (VM_MAPENT_ISWIRED(entry))
3763 uvm_map_entry_unwire(map, entry);
3814 for (size = 0, entry = map->header.next; entry != &map->header;
3815 entry = entry->next) {
3816 if (entry->protection != VM_PROT_NONE &&
3817 VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3818 size += entry->end - entry->start;
3837 for (entry = map->header.next; entry != &map->header;
3838 entry = entry->next) {
3839 if (entry->protection == VM_PROT_NONE)
3841 if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3851 if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
3852 if (UVM_ET_ISNEEDSCOPY(entry) &&
3853 ((entry->max_protection & VM_PROT_WRITE) ||
3854 (entry->object.uvm_obj == NULL))) {
3855 amap_copy(map, entry, 0, entry->start,
3856 entry->end);
3861 entry->wired_count++;
3875 for (entry = map->header.next; entry != &map->header;
3876 entry = entry->next) {
3877 if (entry->wired_count == 1) {
3878 rv = uvm_fault_wire(map, entry->start, entry->end,
3879 entry->max_protection, 1);
3914 failed_entry = entry;
3915 for (/* nothing */; entry != &map->header;
3916 entry = entry->next) {
3917 if (entry->protection == VM_PROT_NONE)
3919 entry->wired_count--;
3929 for (entry = map->header.next; entry != failed_entry;
3930 entry = entry->next) {
3931 if (entry->protection == VM_PROT_NONE)
3933 entry->wired_count--;
3934 if (VM_MAPENT_ISWIRED(entry))
3935 uvm_map_entry_unwire(map, entry);
3966 struct vm_map_entry *current, *entry;
3984 if (!uvm_map_lookup_entry(map, start, &entry)) {
3993 for (current = entry; current->start < end; current = current->next) {
3998 if ((flags & PGO_FREE) != 0 && VM_MAPENT_ISWIRED(entry)) {
4014 for (current = entry; start < end; current = current->next) {
4134 struct vm_map_entry *entry;
4140 entry = tmp_entry;
4142 if (entry == &map->header) {
4150 if (start < entry->start) {
4155 * check protection associated with entry
4158 if ((entry->protection & protection) != protection) {
4161 start = entry->end;
4162 entry = entry->next;
4345 KASSERT(map->header.prev == &map->header);
4467 /* insert entry at end of new_map's entry list */
4468 uvm_map_entry_link(new_map, new_map->header.prev,
4483 * if the old_entry needs a new amap (due to prev fork)
4520 * the new entry will need an amap. it will either
4521 * need to be copied from the old entry or created
4522 * from scratch (if the old entry does not have an
4529 * 1. the old entry has an amap and that amap is
4542 * 2. if the old entry has an amap and a non-zero
4562 * if the parent's entry is wired down, then the
4566 * protect the old entry. in this case we
4610 * zero the mapping: the new entry will be zero initialized
4670 * go entry-by-entry
4676 * first, some sanity checks on the old entry
4725 * uvm_mapent_trymerge: try to merge an entry with its neighbors.
4732 uvm_mapent_trymerge(struct vm_map *map, struct vm_map_entry *entry, int flags)
4736 struct vm_map_entry *prev;
4742 if (entry->aref.ar_amap != NULL) {
4745 if ((entry->flags & UVM_MAP_NOMERGE) != 0) {
4749 uobj = entry->object.uvm_obj;
4750 size = entry->end - entry->start;
4752 newetype = copying ? (entry->etype & ~UVM_ET_NEEDSCOPY) : entry->etype;
4754 next = entry->next;
4756 next->start == entry->end &&
4761 uobj, entry->flags, entry->protection,
4762 entry->max_protection, entry->inheritance, entry->advice,
4763 entry->wired_count) &&
4764 (uobj == NULL || entry->offset + size == next->offset)) {
4780 entry->end = next->end;
4784 entry->aref = next->aref;
4785 entry->etype &= ~UVM_ET_NEEDSCOPY;
4793 prev = entry->prev;
4794 if (prev != &map->header &&
4795 prev->end == entry->start &&
4796 ((copying && !merged && prev->aref.ar_amap != NULL &&
4797 amap_refs(prev->aref.ar_amap) == 1) ||
4798 (!copying && prev->aref.ar_amap == NULL)) &&
4799 UVM_ET_ISCOMPATIBLE(prev, newetype,
4800 uobj, entry->flags, entry->protection,
4801 entry->max_protection, entry->inheritance, entry->advice,
4802 entry->wired_count) &&
4804 prev->offset + prev->end - prev->start == entry->offset)) {
4808 error = amap_extend(prev, size,
4818 entry->offset = prev->offset;
4821 entry->start = prev->start;
4822 clear_hints(map, prev);
4823 uvm_map_entry_unlink(map, prev);
4825 entry->aref = prev->aref;
4826 entry->etype &= ~UVM_ET_NEEDSCOPY;
4829 uvm_mapent_free(prev);
4848 map->header.next = map->header.prev = &map->header;
4921 uvm_map_lock_entry(struct vm_map_entry *entry, krw_t op)
4924 if (entry->aref.ar_amap != NULL) {
4925 amap_lock(entry->aref.ar_amap, op);
4927 if (UVM_ET_ISOBJ(entry)) {
4928 rw_enter(entry->object.uvm_obj->vmobjlock, op);
4933 uvm_map_unlock_entry(struct vm_map_entry *entry)
4936 if (UVM_ET_ISOBJ(entry)) {
4937 rw_exit(entry->object.uvm_obj->vmobjlock);
4939 if (entry->aref.ar_amap != NULL) {
4940 amap_unlock(entry->aref.ar_amap);
4981 struct vm_map_entry *entry;
5002 if (__predict_false(!uvm_map_lookup_entry(map, start, &entry))) {
5004 UVMHIST_LOG(maphist,"<- done (no entry)",0,0,0,0);
5008 if (__predict_false(entry->protection == VM_PROT_NONE)) {
5029 * in the page (assuming the entry is not already wired). this
5032 if (__predict_true(!VM_MAPENT_ISWIRED(entry))) {
5042 if (__predict_false(UVM_ET_ISSUBMAP(entry))) {
5047 if (__predict_false(UVM_ET_ISNEEDSCOPY(entry) &&
5048 ((entry->max_protection & VM_PROT_WRITE) ||
5049 (entry->object.uvm_obj == NULL)))) {
5058 amap_copy(map, entry, 0, start, end);
5066 if (__predict_true(entry->aref.ar_amap != NULL &&
5068 amap_lock(entry->aref.ar_amap, RW_WRITER);
5069 anon = amap_lookup(&entry->aref, start - entry->start);
5074 amap_unlock(entry->aref.ar_amap);
5093 entry->max_protection, 1)) {
5116 if (entry->aref.ar_amap) {
5117 amap_lock(entry->aref.ar_amap, RW_WRITER);
5118 anon = amap_lookup(&entry->aref, start - entry->start);
5120 found_anon: KASSERT(anon->an_lock == entry->aref.ar_amap->am_lock);
5128 amap_unlock(entry->aref.ar_amap);
5132 if (!result && UVM_ET_ISOBJ(entry)) {
5133 struct uvm_object *uobj = entry->object.uvm_obj;
5138 voaddr->offset = entry->offset + (va - entry->start);
5243 struct vm_map_entry *entry;
5254 for (entry = map->header.next; entry != &map->header;
5255 entry = entry->next) {
5257 entry, entry->start, entry->end, entry->object.uvm_obj,
5258 (long long)entry->offset, entry->aref.ar_amap,
5259 entry->aref.ar_pageoff);
5263 (entry->etype & UVM_ET_SUBMAP) ? 'T' : 'F',
5264 (entry->etype & UVM_ET_COPYONWRITE) ? 'T' : 'F',
5265 (entry->etype & UVM_ET_NEEDSCOPY) ? 'T' : 'F',
5266 entry->protection, entry->max_protection,
5267 entry->inheritance, entry->wired_count, entry->advice,
5268 entry == map->first_free ? " (first_free)" : "");
5278 struct vm_map_entry *entry;
5280 if (!uvm_map_lookup_entry_bytree(map, (vaddr_t)addr, &entry)) {
5284 (void *)addr, (void *)entry->start,
5285 (size_t)(addr - (uintptr_t)entry->start), map);
5286 if (!UVM_ET_ISSUBMAP(entry)) {
5289 map = entry->object.sub_map;
5401 struct vm_map_entry *entry;
5433 for (entry = map->header.next; entry != &map->header;
5434 entry = entry->next) {
5436 error = fill_vmentry(l, p, &vme[count], map, entry);