Lines Matching +defs:file +defs:end
212 #define UVM_ASSERT(map, cond, file, line) \
213 uvm_tree_assert((map), (cond), #cond, (file), (line))
274 * This global represents the end of the kernel virtual address
307 * Entries with start == end may only exist if they are the first entry
412 KDASSERT(entry->start <= entry->end);
414 (entry->end & (vaddr_t)PAGE_MASK) == 0);
417 entry->start, entry->end, entry->protection, NULL);
426 entry->start, entry->end, entry->guard, entry->fspace,
427 res, res->start, res->end, res->guard, res->fspace);
440 entry->start, entry->end, entry->protection, NULL);
626 if (i->start != i->end && i->end > addr)
866 KDASSERT(entry->start == *addr && entry->end == *addr + sz);
1135 KDASSERT(entry->start == *addr && entry->end == *addr + sz);
1219 if (e1->etype != e2->etype || e1->end != e2->start)
1241 if (e1->offset + (e1->end - e1->start) != e2->offset)
1292 e1->end = e2->end;
1355 atop(entry->end - entry->start),
1408 KDASSERT(first->end <= addr && VMMAP_FREE_END(first) > addr);
1430 entry->end = min;
1449 KDASSERT(last->start == last->end);
1567 (*entry)->start <= address && (*entry)->end > address;
1624 ie->ie_end = entry->end;
1665 vaddr_t end = addr + sz;
1676 for (iter = first; iter != NULL && iter->start < end;
1683 printf("prev->start 0x%lx, prev->end 0x%lx, "
1684 "iter->start 0x%lx, iter->end 0x%lx\n",
1685 prev->start, prev->end, iter->start, iter->end);
1689 if (prev != NULL && prev->end != iter->start)
1691 if (iter->start == iter->end || UVM_ET_ISHOLE(iter))
1715 vaddr_t start, end;
1723 end = trunc_page(addr + sz);
1725 if (end == addr + sz)
1726 end -= PAGE_SIZE;
1732 if (start < map->min_offset || end >= map->max_offset || end < start)
1742 return uvm_mapanon(map, &start, end - start, 0, flags);
1784 uvm_unmap(struct vm_map *map, vaddr_t start, vaddr_t end)
1789 (end & (vaddr_t)PAGE_MASK) == 0);
1792 uvm_unmap_remove(map, start, end, &dead, FALSE, TRUE, FALSE);
1816 vaddr_t end; /* End of freed range. */
1829 if (prev == NULL && entry->start == entry->end && markfree) {
1835 end = VMMAP_FREE_END(entry);
1846 *prev_ptr = uvm_map_fix_space(map, prev, addr, end, 0);
1860 uvm_fault_unwire_locked(map, entry->start, entry->end);
1872 uvm_km_pgremove_intrsafe(entry->start, entry->end);
1911 entry->end);
1914 pmap_remove(map->pmap, entry->start, entry->end);
1928 * Remove all entries from start to end.
1935 uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
1942 end = MIN(end, map->max_offset);
1943 if (start >= end)
1956 if (entry1->end <= start)
1958 for (; entry1 != NULL && entry1->start < end; entry1 = next) {
1962 if (entry1->start == entry1->end || UVM_ET_ISHOLE(entry1))
1969 if (entry->end <= start && markfree)
1975 * Iterate entries until we reach end address.
1979 for (; entry != NULL && entry->start < end; entry = next) {
1981 if (entry->end > end || !markfree)
1982 UVM_MAP_CLIP_END(map, entry, end);
1983 KDASSERT(entry->start >= start && entry->end <= end);
2007 uvmspace_dused(map, entry->start, entry->end);
2010 map->size -= entry->end - entry->start;
2021 entry != NULL && entry->start < end;
2023 KDASSERT(entry->end <= start ||
2024 entry->start == entry->end ||
2029 for (a = start; a < end; a += PAGE_SIZE)
2037 * Mark all entries from first until end (exclusive) as pageable.
2043 struct vm_map_entry *end, vaddr_t start_addr, vaddr_t end_addr)
2047 for (iter = first; iter != end;
2049 KDASSERT(iter->start >= start_addr && iter->end <= end_addr);
2054 uvm_fault_unwire_locked(map, iter->start, iter->end);
2059 * Mark all entries from first until end (exclusive) as wired.
2066 struct vm_map_entry *end, vaddr_t start_addr, vaddr_t end_addr,
2085 for (iter = first; iter != end;
2087 KDASSERT(iter->start >= start_addr && iter->end <= end_addr);
2088 if (UVM_ET_ISHOLE(iter) || iter->start == iter->end ||
2104 iter->start, iter->end);
2119 for (iter = first; error == 0 && iter != end;
2121 if (UVM_ET_ISHOLE(iter) || iter->start == iter->end ||
2125 error = uvm_fault_wire(map, iter->start, iter->end,
2145 first->start == first->end ||
2152 first->start, first->end);
2157 for (; iter != end;
2159 if (UVM_ET_ISHOLE(iter) || iter->start == iter->end ||
2194 uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end,
2201 end = round_page(end);
2203 if (start > end)
2205 if (start == end)
2209 if (end > map->max_offset)
2223 if (first->end <= start || UVM_ET_ISHOLE(first)) {
2232 for (last = first; last != NULL && last->start < end;
2235 (last->end < end && VMMAP_FREE_END(last) != last->end)) {
2253 if (last->end < end) {
2271 * Split last at end.
2276 UVM_MAP_CLIP_END(map, last, end);
2281 uvm_map_pageable_pgon(map, first, tmp, start, end);
2296 * Split last at end.
2301 UVM_MAP_CLIP_END(map, last, end);
2306 return uvm_map_pageable_wire(map, first, tmp, start, end,
2352 size += iter->end - iter->start;
2484 * The set [entry, end] is also referred to as the wavefront.
2569 if (split >= orig->end) {
2573 next->start = next->end = split;
2578 orig->guard = MIN(orig->guard, split - orig->end);
2583 orig->end = next->start = split;
2627 char *file, int line)
2640 panic("uvm_tree_sanity %p%s (%s %d): %s", map, map_special, file,
2648 uvm_tree_sanity(struct vm_map *map, char *file, int line)
2658 * Valid start, end.
2659 * Catch overflow for end+fspace.
2661 UVM_ASSERT(map, iter->end >= iter->start, file, line);
2662 UVM_ASSERT(map, VMMAP_FREE_END(iter) >= iter->end, file, line);
2666 file, line);
2670 VMMAP_FREE_END(iter) <= vm_map_max(map), file, line);
2673 UVM_ASSERT(map, iter->start == addr, file, line);
2688 file, line);
2695 file, line);
2698 file, line);
2701 UVM_ASSERT(map, addr == vm_map_max(map), file, line);
2705 uvm_tree_size_chk(struct vm_map *map, char *file, int line)
2713 size += iter->end - iter->start;
2718 UVM_ASSERT(map, map->size == size, file, line);
2755 while (imin != iter->end) {
2760 imax = iter->end;
2865 entry, entry->start, entry->end, entry->object.uvm_obj,
3037 uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
3048 if (start > end)
3051 end = MIN(end, map->max_offset);
3052 if (start >= end)
3065 if (first->end <= start)
3069 for (iter = first; iter != NULL && iter->start < end;
3072 if (iter->start == iter->end || UVM_ET_ISHOLE(iter))
3082 map, MAX(start, iter->start), MIN(end, iter->end));
3114 for (iter = first; iter != NULL && iter->start < end;
3117 if (iter->start == iter->end || UVM_ET_ISHOLE(iter))
3136 UVM_MAP_CLIP_END(map, iter, end);
3157 iter->end);
3162 iter->end);
3184 pmap_protect(map->pmap, iter->start, iter->end,
3198 if (uvm_map_pageable(map, iter->start, iter->end,
3292 uvmspace_exec(struct proc *p, vaddr_t start, vaddr_t end)
3300 KASSERT((end & (vaddr_t)PAGE_MASK) == 0 ||
3301 (end & (vaddr_t)PAGE_MASK) == (vaddr_t)PAGE_MASK);
3350 if (end & (vaddr_t)PAGE_MASK) {
3351 end += 1;
3352 if (end == 0) /* overflow */
3353 end -= PAGE_SIZE;
3358 map->max_offset = end;
3370 nvm = uvmspace_alloc(start, end,
3472 if (psrc_entry != NULL && psrc_entry->end != src_entry->start)
3495 remain = src_entry->end - src_entry->start - s_off;
3567 (new_entry->end - new_entry->start) >> PAGE_SHIFT,
3620 old_entry->end - old_entry->start, 0, old_entry->protection,
3642 old_entry->end - old_entry->start, 0, old_entry->protection,
3662 * we will end up in a situation where both the
3724 old_entry->end,
3749 new_entry->end,
3769 old_entry->end - old_entry->start, 0, old_entry->protection,
3777 atop(new_entry->end - new_entry->start), 0);
3822 if (old_entry->start == old_entry->end)
3857 new_map->size += new_entry->end - new_entry->start;
3861 new_map, new_entry->start, new_entry->end);
3952 uvm_map_submap(struct vm_map *map, vaddr_t start, vaddr_t end,
3958 if (start > map->max_offset || end > map->max_offset ||
3959 start < map->min_offset || end < map->min_offset)
3966 UVM_MAP_CLIP_END(map, entry, end);
3971 entry->start == start && entry->end == end &&
3993 uvm_map_checkprot(struct vm_map *map, vaddr_t start, vaddr_t end,
4000 if (start < map->min_offset || end > map->max_offset || start > end)
4002 if (start == end)
4009 entry != NULL && entry->start < end;
4013 (entry->end < end && entry->end != VMMAP_FREE_END(entry)))
4079 uvm_map_inherit(struct vm_map *map, vaddr_t start, vaddr_t end,
4095 if (start > end)
4098 end = MIN(end, map->max_offset);
4099 if (start >= end)
4105 if (entry->end > start)
4112 while (entry1 != NULL && entry1->start < end) {
4121 while (entry != NULL && entry->start < end) {
4122 UVM_MAP_CLIP_END(map, entry, end);
4135 check_copyin_add(struct vm_map *map, vaddr_t start, vaddr_t end)
4142 map->check_copyin[map->check_copyin_count].end = end;
4154 uvm_map_check_copyin_add(struct vm_map *map, vaddr_t start, vaddr_t end)
4156 if (start > end)
4159 end = MIN(end, map->max_offset);
4160 if (start >= end)
4163 check_copyin_add(map, start, end);
4175 uvm_map_immutable(struct vm_map *map, vaddr_t start, vaddr_t end, int imut)
4180 if (start > end)
4183 end = MIN(end, map->max_offset);
4184 if (start >= end)
4190 if (entry->end > start)
4197 while (entry1 != NULL && entry1->start < end) {
4203 while (entry != NULL && entry->start < end) {
4204 UVM_MAP_CLIP_END(map, entry, end);
4223 uvm_map_advice(struct vm_map *map, vaddr_t start, vaddr_t end, int new_advice)
4236 if (start > end)
4239 end = MIN(end, map->max_offset);
4240 if (start >= end)
4246 if (entry != NULL && entry->end > start)
4254 while (entry != NULL && entry->start < end) {
4255 UVM_MAP_CLIP_END(map, entry, end);
4283 vaddr_t end;
4289 end = start + len;
4297 (end & (vaddr_t)PAGE_MASK) != 0 || end < start)
4299 if (start < srcmap->min_offset || end > srcmap->max_offset)
4313 for (entry = first; entry != NULL && entry->end < end;
4315 if (VMMAP_FREE_END(entry) != entry->end ||
4329 for (entry = first; entry != NULL && entry->start < end;
4333 UVM_ET_ISSTACK(entry) ? FALSE : TRUE, start, end);
4359 for (entry = first; entry != NULL && entry->start < end;
4372 cp_len = MIN(entry->end, end) - cp_start;
4429 uvm_map_clean(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
4444 if (start > end || start < map->min_offset || end > map->max_offset)
4451 for (entry = first; entry != NULL && entry->start < end;
4461 (entry->end < end &&
4462 VMMAP_FREE_END(entry) != entry->end)) {
4471 for (entry = first; entry != NULL && entry->start < end;
4493 cp_end = MIN(entry->end, end);
4557 cp_end = MIN(entry->end, end);
4767 * Update map allocation start and end addresses from proc vmspace.
4819 vaddr_t end;
4845 end = MAX(uvm_maxkaddr, map->min_offset);
4846 entry = uvm_map_entrybyaddr(&map->addr, end);
4850 end = MAX(VMMAP_FREE_START(entry), end);
4851 end += MIN(sz, map->max_offset - end);
4853 end = map->max_offset;
4857 uvm_maxkaddr = pmap_growkernel(end);
4859 uvm_maxkaddr = MAX(uvm_maxkaddr, end);
4883 if (prev != NULL && entry->start == entry->end) {
4884 prev->fspace += VMMAP_FREE_END(entry) - entry->end;
4913 * Change {a,b}_{start,end} allocation ranges and associated free lists.
4992 VMMAP_FREE_END(entry) == entry->end &&
4993 entry->start != entry->end) {
5049 entry->end = entry->start = min;
5195 vm_map_lock_try_ln(struct vm_map *map, char *file, int line)
5226 LPRINTF(("map lock: %p (at %s %d)\n", map, file, line));
5227 uvm_tree_sanity(map, file, line);
5228 uvm_tree_size_chk(map, file, line);
5234 vm_map_lock_ln(struct vm_map *map, char *file, int line)
5268 LPRINTF(("map lock: %p (at %s %d)\n", map, file, line));
5269 uvm_tree_sanity(map, file, line);
5270 uvm_tree_size_chk(map, file, line);
5274 vm_map_lock_read_ln(struct vm_map *map, char *file, int line)
5280 LPRINTF(("map lock: %p (at %s %d)\n", map, file, line));
5281 uvm_tree_sanity(map, file, line);
5282 uvm_tree_size_chk(map, file, line);
5286 vm_map_unlock_ln(struct vm_map *map, char *file, int line)
5289 uvm_tree_sanity(map, file, line);
5290 uvm_tree_size_chk(map, file, line);
5291 LPRINTF(("map unlock: %p (at %s %d)\n", map, file, line));
5299 vm_map_unlock_read_ln(struct vm_map *map, char *file, int line)
5301 /* XXX: RO */ uvm_tree_sanity(map, file, line);
5302 /* XXX: RO */ uvm_tree_size_chk(map, file, line);
5303 LPRINTF(("map unlock: %p (at %s %d)\n", map, file, line));
5311 vm_map_busy_ln(struct vm_map *map, char *file, int line)
5323 vm_map_unbusy_ln(struct vm_map *map, char *file, int line)
5340 vm_map_assert_anylock_ln(struct vm_map *map, char *file, int line)
5342 LPRINTF(("map assert read or write locked: %p (at %s %d)\n", map, file, line));
5350 vm_map_assert_wrlock_ln(struct vm_map *map, char *file, int line)
5352 LPRINTF(("map assert write locked: %p (at %s %d)\n", map, file, line));
5391 kve->kve_end = entry->end;