Lines Matching +defs:file +defs:start
212 #define UVM_ASSERT(map, cond, file, line) \
213 uvm_tree_assert((map), (cond), #cond, (file), (line))
307 * Entries with start == end may only exist if they are the first entry
315 return e1->start < e2->start ? -1 : e1->start > e2->start;
412 KDASSERT(entry->start <= entry->end);
413 KDASSERT((entry->start & (vaddr_t)PAGE_MASK) == 0 &&
417 entry->start, entry->end, entry->protection, NULL);
426 entry->start, entry->end, entry->guard, entry->fspace,
427 res, res->start, res->end, res->guard, res->fspace);
440 entry->start, entry->end, entry->protection, NULL);
528 if (iter->start > addr)
612 KDASSERT((*start_ptr)->start <= addr &&
614 (*end_ptr)->start < addr + sz &&
626 if (i->start != i->end && i->end > addr)
866 KDASSERT(entry->start == *addr && entry->end == *addr + sz);
1135 KDASSERT(entry->start == *addr && entry->end == *addr + sz);
1219 if (e1->etype != e2->etype || e1->end != e2->start)
1241 if (e1->offset + (e1->end - e1->start) != e2->offset)
1355 atop(entry->end - entry->start),
1409 KDASSERT(last->start < addr + sz && VMMAP_FREE_END(last) >= addr + sz);
1429 entry->start = addr;
1449 KDASSERT(last->start == last->end);
1456 if (first->start == addr) {
1567 (*entry)->start <= address && (*entry)->end > address;
1623 ie->ie_start = entry->start;
1676 for (iter = first; iter != NULL && iter->start < end;
1683 printf("prev->start 0x%lx, prev->end 0x%lx, "
1684 "iter->start 0x%lx, iter->end 0x%lx\n",
1685 prev->start, prev->end, iter->start, iter->end);
1689 if (prev != NULL && prev->end != iter->start)
1691 if (iter->start == iter->end || UVM_ET_ISHOLE(iter))
1715 vaddr_t start, end;
1722 start = round_page(addr);
1728 if (start == addr)
1729 start += PAGE_SIZE;
1732 if (start < map->min_offset || end >= map->max_offset || end < start)
1742 return uvm_mapanon(map, &start, end - start, 0, flags);
1784 uvm_unmap(struct vm_map *map, vaddr_t start, vaddr_t end)
1788 KASSERT((start & (vaddr_t)PAGE_MASK) == 0 &&
1792 uvm_unmap_remove(map, start, end, &dead, FALSE, TRUE, FALSE);
1825 VMMAP_FREE_END(prev) != entry->start)
1829 if (prev == NULL && entry->start == entry->end && markfree) {
1834 addr = entry->start;
1860 uvm_fault_unwire_locked(map, entry->start, entry->end);
1872 uvm_km_pgremove_intrsafe(entry->start, entry->end);
1910 uvm_km_pgremove(entry->object.uvm_obj, entry->start,
1914 pmap_remove(map->pmap, entry->start, entry->end);
1928 * Remove all entries from start to end.
1935 uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
1941 start = MAX(start, map->min_offset);
1943 if (start >= end)
1949 entry = uvm_map_entrybyaddr(&map->addr, start);
1950 KDASSERT(entry != NULL && entry->start <= start);
1956 if (entry1->end <= start)
1958 for (; entry1 != NULL && entry1->start < end; entry1 = next) {
1959 KDASSERT(entry1->start >= start);
1962 if (entry1->start == entry1->end || UVM_ET_ISHOLE(entry1))
1969 if (entry->end <= start && markfree)
1972 UVM_MAP_CLIP_START(map, entry, start);
1979 for (; entry != NULL && entry->start < end; entry = next) {
1980 KDASSERT(entry->start >= start);
1983 KDASSERT(entry->start >= start && entry->end <= end);
2007 uvmspace_dused(map, entry->start, entry->end);
2010 map->size -= entry->end - entry->start;
2020 for (entry = uvm_map_entrybyaddr(&map->addr, start);
2021 entry != NULL && entry->start < end;
2023 KDASSERT(entry->end <= start ||
2024 entry->start == entry->end ||
2029 for (a = start; a < end; a += PAGE_SIZE)
2049 KDASSERT(iter->start >= start_addr && iter->end <= end_addr);
2054 uvm_fault_unwire_locked(map, iter->start, iter->end);
2087 KDASSERT(iter->start >= start_addr && iter->end <= end_addr);
2088 if (UVM_ET_ISHOLE(iter) || iter->start == iter->end ||
2104 iter->start, iter->end);
2121 if (UVM_ET_ISHOLE(iter) || iter->start == iter->end ||
2125 error = uvm_fault_wire(map, iter->start, iter->end,
2145 first->start == first->end ||
2152 first->start, first->end);
2159 if (UVM_ET_ISHOLE(iter) || iter->start == iter->end ||
2194 uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end,
2200 start = trunc_page(start);
2203 if (start > end)
2205 if (start == end)
2207 if (start < map->min_offset)
2219 * Initial test on start is different, because of the different
2222 first = uvm_map_entrybyaddr(&map->addr, start);
2223 if (first->end <= start || UVM_ET_ISHOLE(first)) {
2232 for (last = first; last != NULL && last->start < end;
2269 UVM_MAP_CLIP_START(map, first, start);
2281 uvm_map_pageable_pgon(map, first, tmp, start, end);
2294 UVM_MAP_CLIP_START(map, first, start);
2306 return uvm_map_pageable_wire(map, first, tmp, start, end,
2352 size += iter->end - iter->start;
2553 KASSERT(orig->start < split && VMMAP_FREE_END(orig) > split);
2566 adj = split - orig->start;
2573 next->start = next->end = split;
2583 orig->end = next->start = split;
2627 char *file, int line)
2640 panic("uvm_tree_sanity %p%s (%s %d): %s", map, map_special, file,
2648 uvm_tree_sanity(struct vm_map *map, char *file, int line)
2658 * Valid start, end.
2661 UVM_ASSERT(map, iter->end >= iter->start, file, line);
2662 UVM_ASSERT(map, VMMAP_FREE_END(iter) >= iter->end, file, line);
2665 UVM_ASSERT(map, iter->start < VMMAP_FREE_END(iter),
2666 file, line);
2669 UVM_ASSERT(map, iter->start >= vm_map_min(map) &&
2670 VMMAP_FREE_END(iter) <= vm_map_max(map), file, line);
2673 UVM_ASSERT(map, iter->start == addr, file, line);
2688 file, line);
2695 file, line);
2698 file, line);
2701 UVM_ASSERT(map, addr == vm_map_max(map), file, line);
2705 uvm_tree_size_chk(struct vm_map *map, char *file, int line)
2713 size += iter->end - iter->start;
2718 UVM_ASSERT(map, map->size == size, file, line);
2744 imin = imax = iter->start;
2865 entry, entry->start, entry->end, entry->object.uvm_obj,
3037 uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
3048 if (start > end)
3050 start = MAX(start, map->min_offset);
3052 if (start >= end)
3061 * - first will contain first entry at or after start.
3063 first = uvm_map_entrybyaddr(&map->addr, start);
3065 if (first->end <= start)
3069 for (iter = first; iter != NULL && iter->start < end;
3072 if (iter->start == iter->end || UVM_ET_ISHOLE(iter))
3082 map, MAX(start, iter->start), MIN(end, iter->end));
3114 for (iter = first; iter != NULL && iter->start < end;
3117 if (iter->start == iter->end || UVM_ET_ISHOLE(iter))
3135 UVM_MAP_CLIP_START(map, iter, start);
3156 uvmspace_dused(map, iter->start,
3161 uvmspace_dused(map, iter->start,
3184 pmap_protect(map->pmap, iter->start, iter->end,
3198 if (uvm_map_pageable(map, iter->start, iter->end,
3292 uvmspace_exec(struct proc *p, vaddr_t start, vaddr_t end)
3299 KASSERT((start & (vaddr_t)PAGE_MASK) == 0);
3357 map->min_offset = start;
3370 nvm = uvmspace_alloc(start, end,
3472 if (psrc_entry != NULL && psrc_entry->end != src_entry->start)
3474 if (src_entry->start >= srcaddr + sz)
3485 * srcaddr > map entry start? means we are in the middle of a
3488 if (srcaddr > src_entry->start)
3489 s_off = srcaddr - src_entry->start;
3490 else if (srcaddr == src_entry->start)
3493 panic("uvm_share: map entry start > srcaddr");
3495 remain = src_entry->end - src_entry->start - s_off;
3567 (new_entry->end - new_entry->start) >> PAGE_SHIFT,
3619 new_entry = uvm_mapent_share(new_map, old_entry->start,
3620 old_entry->end - old_entry->start, 0, old_entry->protection,
3641 new_entry = uvm_mapent_clone(new_map, old_entry->start,
3642 old_entry->end - old_entry->start, 0, old_entry->protection,
3723 old_entry->start,
3748 pmap_protect(new_map->pmap, new_entry->start,
3768 new_entry = uvm_mapent_clone(new_map, old_entry->start,
3769 old_entry->end - old_entry->start, 0, old_entry->protection,
3777 atop(new_entry->end - new_entry->start), 0);
3822 if (old_entry->start == old_entry->end)
3857 new_map->size += new_entry->end - new_entry->start;
3861 new_map, new_entry->start, new_entry->end);
3904 * If executable skip first two pages, otherwise start
3952 uvm_map_submap(struct vm_map *map, vaddr_t start, vaddr_t end,
3958 if (start > map->max_offset || end > map->max_offset ||
3959 start < map->min_offset || end < map->min_offset)
3964 if (uvm_map_lookup_entry(map, start, &entry)) {
3965 UVM_MAP_CLIP_START(map, entry, start);
3971 entry->start == start && entry->end == end &&
3993 uvm_map_checkprot(struct vm_map *map, vaddr_t start, vaddr_t end,
4000 if (start < map->min_offset || end > map->max_offset || start > end)
4002 if (start == end)
4008 for (entry = uvm_map_entrybyaddr(&map->addr, start);
4009 entry != NULL && entry->start < end;
4079 uvm_map_inherit(struct vm_map *map, vaddr_t start, vaddr_t end,
4095 if (start > end)
4097 start = MAX(start, map->min_offset);
4099 if (start >= end)
4104 entry = uvm_map_entrybyaddr(&map->addr, start);
4105 if (entry->end > start)
4106 UVM_MAP_CLIP_START(map, entry, start);
4112 while (entry1 != NULL && entry1->start < end) {
4121 while (entry != NULL && entry->start < end) {
4135 check_copyin_add(struct vm_map *map, vaddr_t start, vaddr_t end)
4141 map->check_copyin[map->check_copyin_count].start = start;
4154 uvm_map_check_copyin_add(struct vm_map *map, vaddr_t start, vaddr_t end)
4156 if (start > end)
4158 start = MAX(start, map->min_offset);
4160 if (start >= end)
4163 check_copyin_add(map, start, end);
4175 uvm_map_immutable(struct vm_map *map, vaddr_t start, vaddr_t end, int imut)
4180 if (start > end)
4182 start = MAX(start, map->min_offset);
4184 if (start >= end)
4189 entry = uvm_map_entrybyaddr(&map->addr, start);
4190 if (entry->end > start)
4191 UVM_MAP_CLIP_START(map, entry, start);
4197 while (entry1 != NULL && entry1->start < end) {
4203 while (entry != NULL && entry->start < end) {
4223 uvm_map_advice(struct vm_map *map, vaddr_t start, vaddr_t end, int new_advice)
4236 if (start > end)
4238 start = MAX(start, map->min_offset);
4240 if (start >= end)
4245 entry = uvm_map_entrybyaddr(&map->addr, start);
4246 if (entry != NULL && entry->end > start)
4247 UVM_MAP_CLIP_START(map, entry, start);
4254 while (entry != NULL && entry->start < end) {
4270 * => start must be page aligned
4277 uvm_map_extract(struct vm_map *srcmap, vaddr_t start, vsize_t len,
4289 end = start + len;
4296 if ((start & (vaddr_t)PAGE_MASK) != 0 ||
4297 (end & (vaddr_t)PAGE_MASK) != 0 || end < start)
4299 if (start < srcmap->min_offset || end > srcmap->max_offset)
4309 /* Lock srcmap, lookup first and last entry in <start,len>. */
4310 first = uvm_map_entrybyaddr(&srcmap->addr, start);
4329 for (entry = first; entry != NULL && entry->start < end;
4333 UVM_ET_ISSTACK(entry) ? FALSE : TRUE, start, end);
4347 MAX(PAGE_SIZE, PMAP_PREFER_ALIGN()), PMAP_PREFER_OFFSET(start),
4358 /* step 1: start looping through map entries, performing extraction. */
4359 for (entry = first; entry != NULL && entry->start < end;
4366 cp_start = entry->start;
4367 if (cp_start < start) {
4368 cp_off = start - cp_start;
4369 cp_start = start;
4375 cp_start - start + dstaddr, cp_len, cp_off,
4429 uvm_map_clean(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
4444 if (start > end || start < map->min_offset || end > map->max_offset)
4448 first = uvm_map_entrybyaddr(&map->addr, start);
4451 for (entry = first; entry != NULL && entry->start < end;
4471 for (entry = first; entry != NULL && entry->start < end;
4492 cp_start = MAX(entry->start, start);
4498 cp_start - entry->start);
4544 cp_start - entry->start);
4556 cp_start = MAX(entry->start, start);
4572 cp_start - entry->start + entry->offset,
4573 cp_end - entry->start + entry->offset, flags);
4593 KASSERT(entry->start < addr && VMMAP_FREE_END(entry) > addr);
4621 KASSERT(entry->start < addr && VMMAP_FREE_END(entry) > addr);
4644 * Choose free list based on address at start of free space.
4705 * Choose free list based on address at start of free space.
4767 * Update map allocation start and end addresses from proc vmspace.
4883 if (prev != NULL && entry->start == entry->end) {
4913 * Change {a,b}_{start,end} allocation ranges and associated free lists.
4993 entry->start != entry->end) {
5040 * We'll start a new entry and add to that entry
5049 entry->end = entry->start = min;
5195 vm_map_lock_try_ln(struct vm_map *map, char *file, int line)
5226 LPRINTF(("map lock: %p (at %s %d)\n", map, file, line));
5227 uvm_tree_sanity(map, file, line);
5228 uvm_tree_size_chk(map, file, line);
5234 vm_map_lock_ln(struct vm_map *map, char *file, int line)
5268 LPRINTF(("map lock: %p (at %s %d)\n", map, file, line));
5269 uvm_tree_sanity(map, file, line);
5270 uvm_tree_size_chk(map, file, line);
5274 vm_map_lock_read_ln(struct vm_map *map, char *file, int line)
5280 LPRINTF(("map lock: %p (at %s %d)\n", map, file, line));
5281 uvm_tree_sanity(map, file, line);
5282 uvm_tree_size_chk(map, file, line);
5286 vm_map_unlock_ln(struct vm_map *map, char *file, int line)
5289 uvm_tree_sanity(map, file, line);
5290 uvm_tree_size_chk(map, file, line);
5291 LPRINTF(("map unlock: %p (at %s %d)\n", map, file, line));
5299 vm_map_unlock_read_ln(struct vm_map *map, char *file, int line)
5301 /* XXX: RO */ uvm_tree_sanity(map, file, line);
5302 /* XXX: RO */ uvm_tree_size_chk(map, file, line);
5303 LPRINTF(("map unlock: %p (at %s %d)\n", map, file, line));
5311 vm_map_busy_ln(struct vm_map *map, char *file, int line)
5323 vm_map_unbusy_ln(struct vm_map *map, char *file, int line)
5340 vm_map_assert_anylock_ln(struct vm_map *map, char *file, int line)
5342 LPRINTF(("map assert read or write locked: %p (at %s %d)\n", map, file, line));
5350 vm_map_assert_wrlock_ln(struct vm_map *map, char *file, int line)
5352 LPRINTF(("map assert write locked: %p (at %s %d)\n", map, file, line));
5366 vaddr_t start;
5380 start = (vaddr_t)kve[0].kve_start;
5388 if (start != 0 && entry->start < start)
5390 kve->kve_start = entry->start;