Lines Matching defs:entry
307 * Entries with start == end may only exist if they are the first entry
342 struct vm_map_entry *entry)
354 min = VMMAP_FREE_START(entry);
355 max = VMMAP_FREE_END(entry);
363 KDASSERT((entry->fspace & (vaddr_t)PAGE_MASK) == 0);
364 KASSERT((entry->etype & UVM_ET_FREEMAPPED) == 0);
373 (*fun->uaddr_free_insert)(map, uaddr, entry);
374 entry->etype |= UVM_ET_FREEMAPPED;
378 uvm_map_addr_augment(entry);
386 struct vm_map_entry *entry)
390 KASSERT((entry->etype & UVM_ET_FREEMAPPED) != 0 || uaddr == NULL);
391 KASSERT(uvm_map_uaddr_e(map, entry) == uaddr);
397 (*fun->uaddr_free_remove)(map, uaddr, entry);
398 entry->etype &= ~UVM_ET_FREEMAPPED;
406 uvm_mapent_addr_insert(struct vm_map *map, struct vm_map_entry *entry)
410 if (!RBT_CHECK(uvm_map_addr, entry, UVMMAP_DEADBEEF))
411 panic("uvm_mapent_addr_insert: entry still in addr list");
412 KDASSERT(entry->start <= entry->end);
413 KDASSERT((entry->start & (vaddr_t)PAGE_MASK) == 0 &&
414 (entry->end & (vaddr_t)PAGE_MASK) == 0);
417 entry->start, entry->end, entry->protection, NULL);
420 res = RBT_INSERT(uvm_map_addr, &map->addr, entry);
422 panic("uvm_mapent_addr_insert: map %p entry %p "
424 "with entry %p (0x%lx-0x%lx G=0x%lx F=0x%lx)",
425 map, entry,
426 entry->start, entry->end, entry->guard, entry->fspace,
435 uvm_mapent_addr_remove(struct vm_map *map, struct vm_map_entry *entry)
440 entry->start, entry->end, entry->protection, NULL);
443 res = RBT_REMOVE(uvm_map_addr, &map->addr, entry);
444 if (res != entry)
446 RBT_POISON(uvm_map_addr, entry, UVMMAP_DEADBEEF);
461 uvm_map_lock_entry(struct vm_map_entry *entry)
463 if (entry->aref.ar_amap != NULL) {
464 amap_lock(entry->aref.ar_amap, RW_WRITE);
466 if (UVM_ET_ISOBJ(entry)) {
467 rw_enter(entry->object.uvm_obj->vmobjlock, RW_WRITE);
472 uvm_map_unlock_entry(struct vm_map_entry *entry)
474 if (UVM_ET_ISOBJ(entry)) {
475 rw_exit(entry->object.uvm_obj->vmobjlock);
477 if (entry->aref.ar_amap != NULL) {
478 amap_unlock(entry->aref.ar_amap);
519 * Find the entry describing the given address.
539 * DEAD_ENTRY_PUSH(struct vm_map_deadq *deadq, struct vm_map_entry *entry)
542 * Since the linked list abuses the address tree for storage, the entry
549 dead_entry_push(struct uvm_map_deadq *deadq, struct vm_map_entry *entry)
551 TAILQ_INSERT_TAIL(deadq, entry, dfree.deadq);
559 * Fills in *start_ptr and *end_ptr to be the first and last entry describing
619 * Also, if the entry belong to uaddr_exe or uaddr_brk_stack, it is
681 /* Calculate entry augmentation value. */
683 uvm_map_addr_augment_get(struct vm_map_entry *entry)
688 augment = entry->fspace;
689 if ((left = RBT_LEFT(uvm_map_addr, entry)) != NULL)
691 if ((right = RBT_RIGHT(uvm_map_addr, entry)) != NULL)
697 * Update augmentation data in entry.
700 uvm_map_addr_augment(struct vm_map_entry *entry)
704 while (entry != NULL) {
706 augment = uvm_map_addr_augment_get(entry);
710 * Once we find an entry that already has the correct value,
714 if (entry->fspace_augment == augment)
716 entry->fspace_augment = augment;
717 entry = RBT_PARENT(uvm_map_addr, entry);
735 struct vm_map_entry *first, *last, *entry, *new;
778 * Before grabbing the lock, allocate a map entry for later
856 * Create new entry.
859 entry = uvm_map_mkentry(map, first, last, *addr, sz, flags, &dead,
861 if (entry == NULL) {
866 KDASSERT(entry->start == *addr && entry->end == *addr + sz);
867 entry->object.uvm_obj = NULL;
868 entry->offset = 0;
869 entry->protection = prot;
870 entry->max_protection = maxprot;
871 entry->inheritance = inherit;
872 entry->wired_count = 0;
873 entry->advice = advice;
875 entry->etype |= UVM_ET_STACK;
880 entry->etype |= UVM_ET_COPYONWRITE;
882 entry->etype |= UVM_ET_NEEDSCOPY;
885 entry->etype |= UVM_ET_CONCEAL;
887 entry->aref.ar_pageoff = 0;
888 entry->aref.ar_amap = amap_alloc(sz, M_WAITOK, 0);
938 struct vm_map_entry *first, *last, *entry, *new;
1007 * Before grabbing the lock, allocate a map entry for later
1125 * Create new entry.
1128 entry = uvm_map_mkentry(map, first, last, *addr, sz, flags, &dead,
1130 if (entry == NULL) {
1135 KDASSERT(entry->start == *addr && entry->end == *addr + sz);
1136 entry->object.uvm_obj = uobj;
1137 entry->offset = uoffset;
1138 entry->protection = prot;
1139 entry->max_protection = maxprot;
1140 entry->inheritance = inherit;
1141 entry->wired_count = 0;
1142 entry->advice = advice;
1144 entry->etype |= UVM_ET_STACK;
1149 entry->etype |= UVM_ET_OBJ;
1151 entry->etype |= UVM_ET_HOLE;
1153 entry->etype |= UVM_ET_NOFAULT;
1155 entry->etype |= UVM_ET_WC;
1157 entry->etype |= UVM_ET_COPYONWRITE;
1159 entry->etype |= UVM_ET_NEEDSCOPY;
1162 entry->etype |= UVM_ET_CONCEAL;
1164 entry->aref.ar_pageoff = 0;
1165 entry->aref.ar_amap = amap_alloc(sz, M_WAITOK, 0);
1179 * Try to merge entry.
1183 * and only try the merge if it concerns a kernel entry.
1187 uvm_mapent_tryjoin(map, entry, &dead);
1218 /* Must be the same entry type and not have free memory between. */
1263 * Returns the merged entry on success.
1302 * Attempt forward and backward joining of entry.
1304 * Returns entry after joins.
1305 * We are guaranteed that the amap of entry is either non-existent or
1309 uvm_mapent_tryjoin(struct vm_map *map, struct vm_map_entry *entry,
1315 /* Merge with previous entry. */
1316 other = RBT_PREV(uvm_map_addr, entry);
1317 if (other && uvm_mapent_isjoinable(map, other, entry)) {
1318 merged = uvm_mapent_merge(map, other, entry, dead);
1320 entry = merged;
1324 * Merge with next entry.
1326 * Because amap can only extend forward and the next entry
1330 other = RBT_NEXT(uvm_map_addr, entry);
1331 if (other && entry->aref.ar_amap == NULL &&
1333 uvm_mapent_isjoinable(map, entry, other)) {
1334 merged = uvm_mapent_merge(map, entry, other, dead);
1336 entry = merged;
1339 return entry;
1348 struct vm_map_entry *entry, *tmp;
1350 TAILQ_FOREACH_SAFE(entry, deadq, dfree.deadq, tmp) {
1352 if (entry->aref.ar_amap)
1353 amap_unref(entry->aref.ar_amap,
1354 entry->aref.ar_pageoff,
1355 atop(entry->end - entry->start),
1359 if (UVM_ET_ISSUBMAP(entry)) {
1361 uvm_map_deallocate(entry->object.sub_map);
1362 } else if (UVM_ET_ISOBJ(entry) &&
1363 entry->object.uvm_obj->pgops->pgo_detach) {
1364 entry->object.uvm_obj->pgops->pgo_detach(
1365 entry->object.uvm_obj);
1368 TAILQ_REMOVE(deadq, entry, dfree.deadq);
1369 uvm_mapent_free(entry);
1376 struct vm_map_entry *entry;
1378 while ((entry = TAILQ_FIRST(deadq)) != NULL) {
1379 KASSERT(entry->aref.ar_amap == NULL);
1380 KASSERT(!UVM_ET_ISSUBMAP(entry));
1381 KASSERT(!UVM_ET_ISOBJ(entry));
1382 TAILQ_REMOVE(deadq, entry, dfree.deadq);
1383 uvm_mapent_free(entry);
1388 * Create and insert new entry.
1390 * Returned entry contains new addresses and is inserted properly in the tree.
1398 struct vm_map_entry *entry, *prev;
1400 vaddr_t min, max; /* free space boundaries for new entry */
1416 /* Initialize new entry. */
1418 entry = uvm_mapent_alloc(map, flags);
1420 entry = new;
1421 if (entry == NULL)
1423 entry->offset = 0;
1424 entry->etype = 0;
1425 entry->wired_count = 0;
1426 entry->aref.ar_pageoff = 0;
1427 entry->aref.ar_amap = NULL;
1429 entry->start = addr;
1430 entry->end = min;
1431 entry->guard = 0;
1432 entry->fspace = 0;
1464 /* Finally, link in entry. */
1465 uvm_mapent_addr_insert(map, entry);
1466 uvm_map_fix_space(map, entry, min, max, flags);
1469 return entry;
1474 * uvm_mapent_alloc: allocate a map entry
1494 "entry");
1529 * uvm_mapent_free: free map entry
1551 * uvm_map_lookup_entry: find map entry at or before an address.
1554 * => entry is returned in "entry"
1555 * => return value is true if address is in the returned entry
1561 struct vm_map_entry **entry)
1565 *entry = uvm_map_entrybyaddr(&map->addr, address);
1566 return *entry != NULL && !UVM_ET_ISHOLE(*entry) &&
1567 (*entry)->start <= address && (*entry)->end > address;
1571 * Stack must be in a MAP_STACK entry. PROT_NONE indicates stack not yet
1572 * grown -- then uvm_map_check_region_range() should not cache the entry
1576 uvm_map_inentry_sp(vm_map_entry_t entry)
1578 if ((entry->etype & UVM_ET_STACK) == 0) {
1579 if (entry->protection == PROT_NONE)
1603 vm_map_entry_t entry;
1613 if (!uvm_map_lookup_entry(map, trunc_page(addr), &entry)) {
1618 ret = (*fn)(entry);
1623 ie->ie_start = entry->start;
1624 ie->ie_end = entry->end;
1802 * Mark entry as free.
1804 * entry will be put on the dead list.
1805 * The free space will be merged into the previous or a new entry,
1809 uvm_mapent_mkfree(struct vm_map *map, struct vm_map_entry *entry,
1821 if (prev == entry)
1825 VMMAP_FREE_END(prev) != entry->start)
1826 prev = RBT_PREV(uvm_map_addr, entry);
1829 if (prev == NULL && entry->start == entry->end && markfree) {
1830 *prev_ptr = entry;
1834 addr = entry->start;
1835 end = VMMAP_FREE_END(entry);
1836 free = uvm_map_uaddr_e(map, entry);
1837 uvm_mapent_free_remove(map, free, entry);
1838 uvm_mapent_addr_remove(map, entry);
1839 DEAD_ENTRY_PUSH(dead, entry);
1851 * Unwire and release referenced amap and object from map entry.
1854 uvm_unmap_kill_entry_withlock(struct vm_map *map, struct vm_map_entry *entry,
1857 /* Unwire removed map entry. */
1858 if (VM_MAPENT_ISWIRED(entry)) {
1859 entry->wired_count = 0;
1860 uvm_fault_unwire_locked(map, entry->start, entry->end);
1864 uvm_map_lock_entry(entry);
1867 if (UVM_ET_ISHOLE(entry)) {
1872 uvm_km_pgremove_intrsafe(entry->start, entry->end);
1873 } else if (UVM_ET_ISOBJ(entry) &&
1874 UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) {
1910 uvm_km_pgremove(entry->object.uvm_obj, entry->start,
1911 entry->end);
1914 pmap_remove(map->pmap, entry->start, entry->end);
1918 uvm_map_unlock_entry(entry);
1922 uvm_unmap_kill_entry(struct vm_map *map, struct vm_map_entry *entry)
1924 uvm_unmap_kill_entry_withlock(map, entry, 0);
1931 * If markfree, entry will be properly marked free, otherwise, no replacement
1932 * entry will be put in the tree (corrupting the tree).
1939 struct vm_map_entry *prev_hint, *next, *entry;
1948 /* Find first affected entry. */
1949 entry = uvm_map_entrybyaddr(&map->addr, start);
1950 KDASSERT(entry != NULL && entry->start <= start);
1953 struct vm_map_entry *entry1 = entry;
1969 if (entry->end <= start && markfree)
1970 entry = RBT_NEXT(uvm_map_addr, entry);
1972 UVM_MAP_CLIP_START(map, entry, start);
1979 for (; entry != NULL && entry->start < end; entry = next) {
1980 KDASSERT(entry->start >= start);
1981 if (entry->end > end || !markfree)
1982 UVM_MAP_CLIP_END(map, entry, end);
1983 KDASSERT(entry->start >= start && entry->end <= end);
1984 next = RBT_NEXT(uvm_map_addr, entry);
1987 if (UVM_ET_ISHOLE(entry)) {
1989 prev_hint = entry;
1995 if (UVM_ET_ISSTACK(entry) && (map->flags & VM_MAP_ISVMSPACE))
1998 /* Kill entry. */
1999 uvm_unmap_kill_entry_withlock(map, entry, 1);
2003 entry->object.uvm_obj == NULL &&
2004 entry->protection != PROT_NONE &&
2005 !UVM_ET_ISHOLE(entry)) {
2007 uvmspace_dused(map, entry->start, entry->end);
2009 if (!UVM_ET_ISHOLE(entry))
2010 map->size -= entry->end - entry->start;
2012 /* Actual removal of entry. */
2013 uvm_mapent_mkfree(map, entry, &prev_hint, dead, markfree);
2020 for (entry = uvm_map_entrybyaddr(&map->addr, start);
2021 entry != NULL && entry->start < end;
2022 entry = RBT_NEXT(uvm_map_addr, entry)) {
2023 KDASSERT(entry->end <= start ||
2024 entry->start == entry->end ||
2025 UVM_ET_ISHOLE(entry));
2039 * Lock must be exclusive on entry and will not be touched.
2062 * Lock must be exclusive on entry.
2079 * to be created. then we clip each map entry to the region to
2217 * Find first entry.
2246 * Last ended at the first entry after the range.
2272 * Make tmp be the first entry after what is to be touched.
2297 * Make tmp be the first entry after what is to be touched.
2453 struct vm_map_entry *entry, *tmp;
2481 * - any entry not in dead_entries is white
2482 * - any entry that appears in dead_entries before entry,
2484 * The set [entry, end] is also referred to as the wavefront.
2491 if ((entry = RBT_ROOT(uvm_map_addr, &map->addr)) != NULL)
2492 DEAD_ENTRY_PUSH(&dead_entries, entry);
2493 while (entry != NULL) {
2495 uvm_unmap_kill_entry(map, entry);
2496 if ((tmp = RBT_LEFT(uvm_map_addr, entry)) != NULL)
2498 if ((tmp = RBT_RIGHT(uvm_map_addr, entry)) != NULL)
2501 entry = TAILQ_NEXT(entry, dfree.deadq);
2508 RBT_FOREACH(entry, uvm_map_addr, &map->addr)
2510 TAILQ_FOREACH(entry, &dead_entries, dfree.deadq)
2534 * Split entry at given address.
2536 * orig: entry that is to be split.
2537 * next: a newly allocated map entry that is not linked.
2668 /* Addresses for entry must lie within map boundaries. */
2831 struct vm_map_entry *entry;
2863 RBT_FOREACH(entry, uvm_map_addr, &map->addr) {
2865 entry, entry->start, entry->end, entry->object.uvm_obj,
2866 (long long)entry->offset, entry->aref.ar_amap,
2867 entry->aref.ar_pageoff);
2871 (entry->etype & UVM_ET_SUBMAP) ? 'T' : 'F',
2872 (entry->etype & UVM_ET_COPYONWRITE) ? 'T' : 'F',
2873 (entry->etype & UVM_ET_NEEDSCOPY) ? 'T' : 'F',
2874 (entry->etype & UVM_ET_STACK) ? 'T' : 'F',
2875 entry->protection, entry->max_protection,
2876 entry->inheritance, entry->wired_count, entry->advice);
2878 free = uvm_map_uaddr_e(map, entry);
2882 (entry->etype & UVM_ET_HOLE) ? 'T' : 'F',
2884 entry->guard,
2885 VMMAP_FREE_START(entry), VMMAP_FREE_END(entry));
2886 (*pr)("\tfspace_augment=%lu\n", entry->fspace_augment);
2888 (entry->etype & UVM_ET_FREEMAPPED) ? 'T' : 'F', free);
3061 * - first will contain first entry at or after start.
3191 * wire this entry now if the old protection was PROT_NONE
3201 * If locking the entry fails, remember the
3485 * srcaddr > map entry start? means we are in the middle of a
3493 panic("uvm_share: map entry start > srcaddr");
3527 * Clone map entry into other map.
3543 /* Create new entry (linked in on creation). Fill in first, last. */
3547 "entry in empty map");
3649 * the new entry will need an amap. it will either
3650 * need to be copied from the old entry or created
3651 * from scratch (if the old entry does not have an
3658 * 1. the old entry has an amap and that amap is
3671 * 2. if the old entry has an amap and a non-zero
3689 * if the parent's entry is wired down, then the
3693 * protect the old entry. in this case we
3759 * zero the mapping: the new entry will be zero initialized
3819 /* go entry-by-entry */
3825 /* first, some sanity checks on the old entry */
3833 panic("fork: non-copy_on_write map entry marked "
3879 * space in which an entry was inherited.
3942 * call [with uobj==NULL] to create a blank map entry in the main map.
3955 struct vm_map_entry *entry;
3964 if (uvm_map_lookup_entry(map, start, &entry)) {
3965 UVM_MAP_CLIP_START(map, entry, start);
3966 UVM_MAP_CLIP_END(map, entry, end);
3968 entry = NULL;
3970 if (entry != NULL &&
3971 entry->start == start && entry->end == end &&
3972 entry->object.uvm_obj == NULL && entry->aref.ar_amap == NULL &&
3973 !UVM_ET_ISCOPYONWRITE(entry) && !UVM_ET_ISNEEDSCOPY(entry)) {
3974 entry->etype |= UVM_ET_SUBMAP;
3975 entry->object.sub_map = submap;
3976 entry->offset = 0;
3996 struct vm_map_entry *entry;
4008 for (entry = uvm_map_entrybyaddr(&map->addr, start);
4009 entry != NULL && entry->start < end;
4010 entry = RBT_NEXT(uvm_map_addr, entry)) {
4012 if (UVM_ET_ISHOLE(entry) ||
4013 (entry->end < end && entry->end != VMMAP_FREE_END(entry)))
4017 if ((entry->protection & protection) != protection)
4082 struct vm_map_entry *entry, *entry1;
4104 entry = uvm_map_entrybyaddr(&map->addr, start);
4105 if (entry->end > start)
4106 UVM_MAP_CLIP_START(map, entry, start);
4108 entry = RBT_NEXT(uvm_map_addr, entry);
4111 entry1 = entry;
4121 while (entry != NULL && entry->start < end) {
4122 UVM_MAP_CLIP_END(map, entry, end);
4123 entry->inheritance = new_inheritance;
4124 entry = RBT_NEXT(uvm_map_addr, entry);
4177 struct vm_map_entry *entry, *entry1;
4189 entry = uvm_map_entrybyaddr(&map->addr, start);
4190 if (entry->end > start)
4191 UVM_MAP_CLIP_START(map, entry, start);
4193 entry = RBT_NEXT(uvm_map_addr, entry);
4196 entry1 = entry;
4203 while (entry != NULL && entry->start < end) {
4204 UVM_MAP_CLIP_END(map, entry, end);
4206 entry->etype |= UVM_ET_IMMUTABLE;
4208 entry->etype &= ~UVM_ET_IMMUTABLE;
4209 entry = RBT_NEXT(uvm_map_addr, entry);
4225 struct vm_map_entry *entry;
4245 entry = uvm_map_entrybyaddr(&map->addr, start);
4246 if (entry != NULL && entry->end > start)
4247 UVM_MAP_CLIP_START(map, entry, start);
4248 else if (entry!= NULL)
4249 entry = RBT_NEXT(uvm_map_addr, entry);
4254 while (entry != NULL && entry->start < end) {
4255 UVM_MAP_CLIP_END(map, entry, end);
4256 entry->advice = new_advice;
4257 entry = RBT_NEXT(uvm_map_addr, entry);
4281 struct vm_map_entry *first, *entry, *newentry, *tmp1, *tmp2;
4309 /* Lock srcmap, lookup first and last entry in <start,len>. */
4313 for (entry = first; entry != NULL && entry->end < end;
4314 entry = RBT_NEXT(uvm_map_addr, entry)) {
4315 if (VMMAP_FREE_END(entry) != entry->end ||
4316 UVM_ET_ISHOLE(entry)) {
4321 if (entry == NULL || UVM_ET_ISHOLE(entry)) {
4329 for (entry = first; entry != NULL && entry->start < end;
4330 entry = RBT_NEXT(uvm_map_addr, entry)) {
4331 if (UVM_ET_ISNEEDSCOPY(entry))
4332 amap_copy(srcmap, entry, M_NOWAIT,
4333 UVM_ET_ISSTACK(entry) ? FALSE : TRUE, start, end);
4334 if (UVM_ET_ISNEEDSCOPY(entry)) {
4359 for (entry = first; entry != NULL && entry->start < end;
4360 entry = RBT_NEXT(uvm_map_addr, entry)) {
4361 KDASSERT(!UVM_ET_ISNEEDSCOPY(entry));
4362 if (UVM_ET_ISHOLE(entry))
4366 cp_start = entry->start;
4372 cp_len = MIN(entry->end, end) - cp_start;
4376 entry->protection, entry->max_protection,
4377 entry, &dead, flags, AMAP_SHARED | AMAP_REFALL);
4431 struct vm_map_entry *first, *entry;
4451 for (entry = first; entry != NULL && entry->start < end;
4452 entry = RBT_NEXT(uvm_map_addr, entry)) {
4453 if (entry->etype & UVM_ET_IMMUTABLE)
4455 if (UVM_ET_ISSUBMAP(entry)) {
4459 if (UVM_ET_ISSUBMAP(entry) ||
4460 UVM_ET_ISHOLE(entry) ||
4461 (entry->end < end &&
4462 VMMAP_FREE_END(entry) != entry->end)) {
4471 for (entry = first; entry != NULL && entry->start < end;
4472 entry = RBT_NEXT(uvm_map_addr, entry)) {
4473 amap = entry->aref.ar_amap; /* top layer */
4474 if (UVM_ET_ISOBJ(entry))
4475 uobj = entry->object.uvm_obj;
4492 cp_start = MAX(entry->start, start);
4493 cp_end = MIN(entry->end, end);
4497 anon = amap_lookup(&entry->aref,
4498 cp_start - entry->start);
4543 amap_unadd(&entry->aref,
4544 cp_start - entry->start);
4556 cp_start = MAX(entry->start, start);
4557 cp_end = MIN(entry->end, end);
4568 ((entry->max_protection & PROT_WRITE) != 0 &&
4569 (entry->etype & UVM_ET_COPYONWRITE) == 0))) {
4572 cp_start - entry->start + entry->offset,
4573 cp_end - entry->start + entry->offset, flags);
4589 uvm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, vaddr_t addr)
4593 KASSERT(entry->start < addr && VMMAP_FREE_END(entry) > addr);
4597 uvm_map_splitentry(map, entry, tmp, addr);
4603 * Clippers are required to not change the pointers to the entry they are
4605 * Since uvm_map_splitentry turns the original entry into the lowest
4606 * entry (address wise) we do a swap between the new entry and the original
4607 * entry, prior to calling uvm_map_splitentry.
4610 uvm_map_clip_start(struct vm_map *map, struct vm_map_entry *entry, vaddr_t addr)
4616 free = uvm_map_uaddr_e(map, entry);
4617 uvm_mapent_free_remove(map, free, entry);
4618 uvm_mapent_addr_remove(map, entry);
4620 /* Copy entry. */
4621 KASSERT(entry->start < addr && VMMAP_FREE_END(entry) > addr);
4623 uvm_mapent_copy(entry, tmp);
4625 /* Put new entry in place of original entry. */
4630 uvm_map_splitentry(map, tmp, entry, addr);
4713 uvm_map_uaddr_e(struct vm_map *map, struct vm_map_entry *entry)
4715 return uvm_map_uaddr(map, VMMAP_FREE_START(entry));
4820 struct vm_map_entry *entry;
4846 entry = uvm_map_entrybyaddr(&map->addr, end);
4847 while (entry && entry->fspace < alloc_sz)
4848 entry = RBT_NEXT(uvm_map_addr, entry);
4849 if (entry) {
4850 end = MAX(VMMAP_FREE_START(entry), end);
4873 struct vm_map_entry *entry, *prev, *next;
4876 for (entry = RBT_MIN(uvm_map_addr, &map->addr); entry != NULL;
4877 entry = next) {
4878 next = RBT_NEXT(uvm_map_addr, entry);
4880 free = uvm_map_uaddr_e(map, entry);
4881 uvm_mapent_free_remove(map, free, entry);
4883 if (prev != NULL && entry->start == entry->end) {
4884 prev->fspace += VMMAP_FREE_END(entry) - entry->end;
4885 uvm_mapent_addr_remove(map, entry);
4886 DEAD_ENTRY_PUSH(dead, entry);
4888 prev = entry;
4898 struct vm_map_entry *entry;
4901 RBT_FOREACH(entry, uvm_map_addr, &map->addr) {
4902 min = VMMAP_FREE_START(entry);
4903 max = VMMAP_FREE_END(entry);
4904 entry->fspace = 0;
4906 entry = uvm_map_fix_space(map, entry, min, max, flags);
4969 uvm_map_fix_space(struct vm_map *map, struct vm_map_entry *entry,
4975 KASSERT(entry == NULL || (entry->etype & UVM_ET_FREEMAPPED) == 0);
4977 KDASSERT((entry != NULL && VMMAP_FREE_END(entry) == min) ||
4984 * for entry.
4986 entfree = (entry == NULL ? NULL :
4987 uvm_map_uaddr_e(map, entry));
4990 /* Claim guard page for entry. */
4991 if ((map->flags & VM_MAP_GUARDPAGES) && entry != NULL &&
4992 VMMAP_FREE_END(entry) == entry->end &&
4993 entry->start != entry->end) {
5003 entry->guard = 2 * PAGE_SIZE;
5006 entry->guard = PAGE_SIZE;
5013 * Handle the case where entry has a 2-page guard, but the
5014 * space after entry is freed.
5016 if (entry != NULL && entry->fspace == 0 &&
5017 entry->guard > PAGE_SIZE) {
5018 entry->guard = PAGE_SIZE;
5019 min = VMMAP_FREE_START(entry);
5028 * entry is started regardless (otherwise the allocators
5031 if (entry != NULL && free == entfree &&
5034 KDASSERT(VMMAP_FREE_END(entry) == min);
5035 entry->fspace += lmax - min;
5038 * Commit entry to free list: it'll not be added to
5040 * We'll start a new entry and add to that entry
5043 if (entry != NULL)
5044 uvm_mapent_free_insert(map, entfree, entry);
5046 /* New entry for new uaddr. */
5047 entry = uvm_mapent_alloc(map, flags);
5048 KDASSERT(entry != NULL);
5049 entry->end = entry->start = min;
5050 entry->guard = 0;
5051 entry->fspace = lmax - min;
5052 entry->object.uvm_obj = NULL;
5053 entry->offset = 0;
5054 entry->etype = 0;
5055 entry->protection = entry->max_protection = 0;
5056 entry->inheritance = 0;
5057 entry->wired_count = 0;
5058 entry->advice = 0;
5059 entry->aref.ar_pageoff = 0;
5060 entry->aref.ar_amap = NULL;
5061 uvm_mapent_addr_insert(map, entry);
5068 /* Finally put entry on the uaddr state. */
5069 if (entry != NULL)
5070 uvm_mapent_free_insert(map, entfree, entry);
5072 return entry;
5087 struct vm_map_entry *entry, *last;
5113 entry = uvm_map_entrybyaddr(&map->addr, addr);
5115 if (uvm_map_isavail(map, NULL, &entry, &last, addr, sz)) {
5131 * toward the first free page in entry),
5137 if (entry == NULL) {
5142 entry = RBT_MIN(uvm_map_addr, &map->addr);
5143 } else if (VMMAP_FREE_START(entry) <= addr) {
5145 entry = RBT_NEXT(uvm_map_addr, entry);
5148 /* Test if the next entry is sufficient for the allocation. */
5149 for (; entry != NULL;
5150 entry = RBT_NEXT(uvm_map_addr, entry)) {
5151 if (entry->fspace == 0)
5153 addr = VMMAP_FREE_START(entry);
5160 if (addr >= VMMAP_FREE_END(entry))
5165 if (VMMAP_FREE_END(entry) > map->b_end) {
5173 if (VMMAP_FREE_END(entry) > map->s_end) {
5181 if (uvm_map_isavail(map, NULL, &entry, &last, addr, sz)) {
5365 struct vm_map_entry *entry;
5383 RBT_FOREACH(entry, uvm_map_addr, &map->addr) {
5388 if (start != 0 && entry->start < start)
5390 kve->kve_start = entry->start;
5391 kve->kve_end = entry->end;
5392 kve->kve_guard = entry->guard;
5393 kve->kve_fspace = entry->fspace;
5394 kve->kve_fspace_augment = entry->fspace_augment;
5395 kve->kve_offset = entry->offset;
5396 kve->kve_wired_count = entry->wired_count;
5397 kve->kve_etype = entry->etype;
5398 kve->kve_protection = entry->protection;
5399 kve->kve_max_protection = entry->max_protection;
5400 kve->kve_advice = entry->advice;
5401 kve->kve_inheritance = entry->inheritance;
5402 kve->kve_flags = entry->flags;