xref: /netbsd-src/sys/uvm/uvm_map.c (revision 835080d5e678e165a8c64b1ec4e616adde88537c)
1 /*	$NetBSD: uvm_map.c,v 1.413 2024/07/16 16:48:54 uwe Exp $	*/
2 
3 /*
4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
5  * Copyright (c) 1991, 1993, The Regents of the University of California.
6  *
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * The Mach Operating System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)vm_map.c    8.3 (Berkeley) 1/12/94
37  * from: Id: uvm_map.c,v 1.1.2.27 1998/02/07 01:16:54 chs Exp
38  *
39  *
40  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
41  * All rights reserved.
42  *
43  * Permission to use, copy, modify and distribute this software and
44  * its documentation is hereby granted, provided that both the copyright
45  * notice and this permission notice appear in all copies of the
46  * software, derivative works or modified versions, and any portions
47  * thereof, and that both notices appear in supporting documentation.
48  *
49  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
50  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
51  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52  *
53  * Carnegie Mellon requests users of this software to return to
54  *
55  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
56  *  School of Computer Science
57  *  Carnegie Mellon University
58  *  Pittsburgh PA 15213-3890
59  *
60  * any improvements or extensions that they make and grant Carnegie the
61  * rights to redistribute these changes.
62  */
63 
64 /*
65  * uvm_map.c: uvm map operations
66  */
67 
68 #include <sys/cdefs.h>
69 __KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.413 2024/07/16 16:48:54 uwe Exp $");
70 
71 #include "opt_ddb.h"
72 #include "opt_pax.h"
73 #include "opt_uvmhist.h"
74 #include "opt_uvm.h"
75 #include "opt_sysv.h"
76 
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/mman.h>
80 #include <sys/proc.h>
81 #include <sys/pool.h>
82 #include <sys/kernel.h>
83 #include <sys/mount.h>
84 #include <sys/pax.h>
85 #include <sys/vnode.h>
86 #include <sys/filedesc.h>
87 #include <sys/lockdebug.h>
88 #include <sys/atomic.h>
89 #include <sys/sysctl.h>
90 #ifndef __USER_VA0_IS_SAFE
91 #include <sys/kauth.h>
92 #include "opt_user_va0_disable_default.h"
93 #endif
94 
95 #include <sys/shm.h>
96 
97 #include <uvm/uvm.h>
98 #include <uvm/uvm_readahead.h>
99 
100 #if defined(DDB) || defined(DEBUGPRINT)
101 #include <uvm/uvm_ddb.h>
102 #endif
103 
104 #ifdef UVMHIST
105 #ifndef UVMHIST_MAPHIST_SIZE
106 #define UVMHIST_MAPHIST_SIZE 100
107 #endif
108 static struct kern_history_ent maphistbuf[UVMHIST_MAPHIST_SIZE];
109 UVMHIST_DEFINE(maphist) = UVMHIST_INITIALIZER(maphist, maphistbuf);
110 #endif
111 
112 #if !defined(UVMMAP_COUNTERS)
113 
114 #define	UVMMAP_EVCNT_DEFINE(name)	/* nothing */
115 #define UVMMAP_EVCNT_INCR(ev)		/* nothing */
116 #define UVMMAP_EVCNT_DECR(ev)		/* nothing */
117 
118 #else /* defined(UVMMAP_NOCOUNTERS) */
119 
120 #include <sys/evcnt.h>
121 #define	UVMMAP_EVCNT_DEFINE(name) \
122 struct evcnt uvmmap_evcnt_##name = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, \
123     "uvmmap", #name); \
124 EVCNT_ATTACH_STATIC(uvmmap_evcnt_##name);
125 #define	UVMMAP_EVCNT_INCR(ev)		uvmmap_evcnt_##ev.ev_count++
126 #define	UVMMAP_EVCNT_DECR(ev)		uvmmap_evcnt_##ev.ev_count--
127 
128 #endif /* defined(UVMMAP_NOCOUNTERS) */
129 
130 UVMMAP_EVCNT_DEFINE(ubackmerge)
131 UVMMAP_EVCNT_DEFINE(uforwmerge)
132 UVMMAP_EVCNT_DEFINE(ubimerge)
133 UVMMAP_EVCNT_DEFINE(unomerge)
134 UVMMAP_EVCNT_DEFINE(kbackmerge)
135 UVMMAP_EVCNT_DEFINE(kforwmerge)
136 UVMMAP_EVCNT_DEFINE(kbimerge)
137 UVMMAP_EVCNT_DEFINE(knomerge)
138 UVMMAP_EVCNT_DEFINE(map_call)
139 UVMMAP_EVCNT_DEFINE(mlk_call)
140 UVMMAP_EVCNT_DEFINE(mlk_hint)
141 UVMMAP_EVCNT_DEFINE(mlk_tree)
142 UVMMAP_EVCNT_DEFINE(mlk_treeloop)
143 
144 const char vmmapbsy[] = "vmmapbsy";
145 
146 /*
147  * cache for dynamically-allocated map entries.
148  */
149 
150 static struct pool_cache uvm_map_entry_cache;
151 
152 #ifdef PMAP_GROWKERNEL
153 /*
154  * This global represents the end of the kernel virtual address
155  * space.  If we want to exceed this, we must grow the kernel
156  * virtual address space dynamically.
157  *
158  * Note, this variable is locked by kernel_map's lock.
159  */
160 vaddr_t uvm_maxkaddr;
161 #endif
162 
163 #ifndef __USER_VA0_IS_SAFE
164 #ifndef __USER_VA0_DISABLE_DEFAULT
165 #define __USER_VA0_DISABLE_DEFAULT 1
166 #endif
167 #ifdef USER_VA0_DISABLE_DEFAULT /* kernel config option overrides */
168 #undef __USER_VA0_DISABLE_DEFAULT
169 #define __USER_VA0_DISABLE_DEFAULT USER_VA0_DISABLE_DEFAULT
170 #endif
171 int user_va0_disable = __USER_VA0_DISABLE_DEFAULT;
172 #endif
173 
174 /*
175  * macros
176  */
177 
178 /*
179  * uvm_map_align_va: round down or up virtual address
180  */
181 static __inline void
182 uvm_map_align_va(vaddr_t *vap, vsize_t align, int topdown)
183 {
184 
185 	KASSERT(powerof2(align));
186 
187 	if (align != 0 && (*vap & (align - 1)) != 0) {
188 		if (topdown)
189 			*vap = rounddown2(*vap, align);
190 		else
191 			*vap = roundup2(*vap, align);
192 	}
193 }
194 
195 /*
196  * UVM_ET_ISCOMPATIBLE: check some requirements for map entry merging
197  */
198 extern struct vm_map *pager_map;
199 
200 #define	UVM_ET_ISCOMPATIBLE(ent, type, uobj, meflags, \
201     prot, maxprot, inh, adv, wire) \
202 	((ent)->etype == (type) && \
203 	(((ent)->flags ^ (meflags)) & (UVM_MAP_NOMERGE)) == 0 && \
204 	(ent)->object.uvm_obj == (uobj) && \
205 	(ent)->protection == (prot) && \
206 	(ent)->max_protection == (maxprot) && \
207 	(ent)->inheritance == (inh) && \
208 	(ent)->advice == (adv) && \
209 	(ent)->wired_count == (wire))
210 
211 /*
212  * uvm_map_entry_link: insert entry into a map
213  *
214  * => map must be locked
215  */
216 #define uvm_map_entry_link(map, after_where, entry) do { \
217 	uvm_mapent_check(entry); \
218 	(map)->nentries++; \
219 	(entry)->prev = (after_where); \
220 	(entry)->next = (after_where)->next; \
221 	(entry)->prev->next = (entry); \
222 	(entry)->next->prev = (entry); \
223 	uvm_rb_insert((map), (entry)); \
224 } while (/*CONSTCOND*/ 0)
225 
226 /*
227  * uvm_map_entry_unlink: remove entry from a map
228  *
229  * => map must be locked
230  */
231 #define uvm_map_entry_unlink(map, entry) do { \
232 	KASSERT((entry) != (map)->first_free); \
233 	KASSERT((entry) != (map)->hint); \
234 	uvm_mapent_check(entry); \
235 	(map)->nentries--; \
236 	(entry)->next->prev = (entry)->prev; \
237 	(entry)->prev->next = (entry)->next; \
238 	uvm_rb_remove((map), (entry)); \
239 } while (/*CONSTCOND*/ 0)
240 
241 /*
242  * SAVE_HINT: saves the specified entry as the hint for future lookups.
243  *
244  * => map need not be locked.
245  */
246 #define SAVE_HINT(map, check, value) do { \
247 	if ((map)->hint == (check)) \
248 		(map)->hint = (value); \
249 } while (/*CONSTCOND*/ 0)
250 
251 /*
252  * clear_hints: ensure that hints don't point to the entry.
253  *
254  * => map must be write-locked.
255  */
256 static void
257 clear_hints(struct vm_map *map, struct vm_map_entry *ent)
258 {
259 
260 	SAVE_HINT(map, ent, ent->prev);
261 	if (map->first_free == ent) {
262 		map->first_free = ent->prev;
263 	}
264 }
265 
266 /*
267  * VM_MAP_RANGE_CHECK: check and correct range
268  *
269  * => map must at least be read locked
270  */
271 
272 #define VM_MAP_RANGE_CHECK(map, start, end) do { \
273 	if (start < vm_map_min(map))		\
274 		start = vm_map_min(map);	\
275 	if (end > vm_map_max(map))		\
276 		end = vm_map_max(map);		\
277 	if (start > end)			\
278 		start = end;			\
279 } while (/*CONSTCOND*/ 0)
280 
281 /*
282  * local prototypes
283  */
284 
285 static struct vm_map_entry *
286 		uvm_mapent_alloc(struct vm_map *, int);
287 static void	uvm_mapent_copy(struct vm_map_entry *, struct vm_map_entry *);
288 static void	uvm_mapent_free(struct vm_map_entry *);
289 #if defined(DEBUG)
290 static void	_uvm_mapent_check(const struct vm_map_entry *, int);
291 #define	uvm_mapent_check(map)	_uvm_mapent_check(map, __LINE__)
292 #else /* defined(DEBUG) */
293 #define	uvm_mapent_check(e)	/* nothing */
294 #endif /* defined(DEBUG) */
295 
296 static void	uvm_map_entry_unwire(struct vm_map *, struct vm_map_entry *);
297 static void	uvm_map_reference_amap(struct vm_map_entry *, int);
298 static int	uvm_map_space_avail(vaddr_t *, vsize_t, voff_t, vsize_t, int,
299 		    int, struct vm_map_entry *);
300 static void	uvm_map_unreference_amap(struct vm_map_entry *, int);
301 
302 int _uvm_map_sanity(struct vm_map *);
303 int _uvm_tree_sanity(struct vm_map *);
304 static vsize_t uvm_rb_maxgap(const struct vm_map_entry *);
305 
306 #define	ROOT_ENTRY(map)		((struct vm_map_entry *)(map)->rb_tree.rbt_root)
307 #define	LEFT_ENTRY(entry)	((struct vm_map_entry *)(entry)->rb_node.rb_left)
308 #define	RIGHT_ENTRY(entry)	((struct vm_map_entry *)(entry)->rb_node.rb_right)
309 #define	PARENT_ENTRY(map, entry) \
310 	(ROOT_ENTRY(map) == (entry) \
311 	    ? NULL : (struct vm_map_entry *)RB_FATHER(&(entry)->rb_node))
312 
313 /*
314  * These get filled in if/when SYSVSHM shared memory code is loaded
315  *
316  * We do this with function pointers rather the #ifdef SYSVSHM so the
317  * SYSVSHM code can be loaded and unloaded
318  */
319 void (*uvm_shmexit)(struct vmspace *) = NULL;
320 void (*uvm_shmfork)(struct vmspace *, struct vmspace *) = NULL;
321 
322 static int
323 uvm_map_compare_nodes(void *ctx, const void *nparent, const void *nkey)
324 {
325 	const struct vm_map_entry *eparent = nparent;
326 	const struct vm_map_entry *ekey = nkey;
327 
328 	KASSERT(eparent->start < ekey->start || eparent->start >= ekey->end);
329 	KASSERT(ekey->start < eparent->start || ekey->start >= eparent->end);
330 
331 	if (eparent->start < ekey->start)
332 		return -1;
333 	if (eparent->end >= ekey->start)
334 		return 1;
335 	return 0;
336 }
337 
338 static int
339 uvm_map_compare_key(void *ctx, const void *nparent, const void *vkey)
340 {
341 	const struct vm_map_entry *eparent = nparent;
342 	const vaddr_t va = *(const vaddr_t *) vkey;
343 
344 	if (eparent->start < va)
345 		return -1;
346 	if (eparent->end >= va)
347 		return 1;
348 	return 0;
349 }
350 
351 static const rb_tree_ops_t uvm_map_tree_ops = {
352 	.rbto_compare_nodes = uvm_map_compare_nodes,
353 	.rbto_compare_key = uvm_map_compare_key,
354 	.rbto_node_offset = offsetof(struct vm_map_entry, rb_node),
355 	.rbto_context = NULL
356 };
357 
358 /*
359  * uvm_rb_gap: return the gap size between our entry and next entry.
360  */
361 static inline vsize_t
362 uvm_rb_gap(const struct vm_map_entry *entry)
363 {
364 
365 	KASSERT(entry->next != NULL);
366 	return entry->next->start - entry->end;
367 }
368 
369 static vsize_t
370 uvm_rb_maxgap(const struct vm_map_entry *entry)
371 {
372 	struct vm_map_entry *child;
373 	vsize_t maxgap = entry->gap;
374 
375 	/*
376 	 * We need maxgap to be the largest gap of us or any of our
377 	 * descendents.  Since each of our children's maxgap is the
378 	 * cached value of their largest gap of themselves or their
379 	 * descendents, we can just use that value and avoid recursing
380 	 * down the tree to calculate it.
381 	 */
382 	if ((child = LEFT_ENTRY(entry)) != NULL && maxgap < child->maxgap)
383 		maxgap = child->maxgap;
384 
385 	if ((child = RIGHT_ENTRY(entry)) != NULL && maxgap < child->maxgap)
386 		maxgap = child->maxgap;
387 
388 	return maxgap;
389 }
390 
391 static void
392 uvm_rb_fixup(struct vm_map *map, struct vm_map_entry *entry)
393 {
394 	struct vm_map_entry *parent;
395 
396 	KASSERT(entry->gap == uvm_rb_gap(entry));
397 	entry->maxgap = uvm_rb_maxgap(entry);
398 
399 	while ((parent = PARENT_ENTRY(map, entry)) != NULL) {
400 		struct vm_map_entry *brother;
401 		vsize_t maxgap = parent->gap;
402 		unsigned int which;
403 
404 		KDASSERT(parent->gap == uvm_rb_gap(parent));
405 		if (maxgap < entry->maxgap)
406 			maxgap = entry->maxgap;
407 		/*
408 		 * Since we work towards the root, we know entry's maxgap
409 		 * value is OK, but its brothers may now be out-of-date due
410 		 * to rebalancing.  So refresh it.
411 		 */
412 		which = RB_POSITION(&entry->rb_node) ^ RB_DIR_OTHER;
413 		brother = (struct vm_map_entry *)parent->rb_node.rb_nodes[which];
414 		if (brother != NULL) {
415 			KDASSERT(brother->gap == uvm_rb_gap(brother));
416 			brother->maxgap = uvm_rb_maxgap(brother);
417 			if (maxgap < brother->maxgap)
418 				maxgap = brother->maxgap;
419 		}
420 
421 		parent->maxgap = maxgap;
422 		entry = parent;
423 	}
424 }
425 
426 static void
427 uvm_rb_insert(struct vm_map *map, struct vm_map_entry *entry)
428 {
429 	struct vm_map_entry *ret __diagused;
430 
431 	entry->gap = entry->maxgap = uvm_rb_gap(entry);
432 	if (entry->prev != &map->header)
433 		entry->prev->gap = uvm_rb_gap(entry->prev);
434 
435 	ret = rb_tree_insert_node(&map->rb_tree, entry);
436 	KASSERTMSG(ret == entry,
437 	    "uvm_rb_insert: map %p: duplicate entry %p", map, ret);
438 
439 	/*
440 	 * If the previous entry is not our immediate left child, then it's an
441 	 * ancestor and will be fixed up on the way to the root.  We don't
442 	 * have to check entry->prev against &map->header since &map->header
443 	 * will never be in the tree.
444 	 */
445 	uvm_rb_fixup(map,
446 	    LEFT_ENTRY(entry) == entry->prev ? entry->prev : entry);
447 }
448 
449 static void
450 uvm_rb_remove(struct vm_map *map, struct vm_map_entry *entry)
451 {
452 	struct vm_map_entry *prev_parent = NULL, *next_parent = NULL;
453 
454 	/*
455 	 * If we are removing an interior node, then an adjacent node will
456 	 * be used to replace its position in the tree.  Therefore we will
457 	 * need to fixup the tree starting at the parent of the replacement
458 	 * node.  So record their parents for later use.
459 	 */
460 	if (entry->prev != &map->header)
461 		prev_parent = PARENT_ENTRY(map, entry->prev);
462 	if (entry->next != &map->header)
463 		next_parent = PARENT_ENTRY(map, entry->next);
464 
465 	rb_tree_remove_node(&map->rb_tree, entry);
466 
467 	/*
468 	 * If the previous node has a new parent, fixup the tree starting
469 	 * at the previous node's old parent.
470 	 */
471 	if (entry->prev != &map->header) {
472 		/*
473 		 * Update the previous entry's gap due to our absence.
474 		 */
475 		entry->prev->gap = uvm_rb_gap(entry->prev);
476 		uvm_rb_fixup(map, entry->prev);
477 		if (prev_parent != NULL
478 		    && prev_parent != entry
479 		    && prev_parent != PARENT_ENTRY(map, entry->prev))
480 			uvm_rb_fixup(map, prev_parent);
481 	}
482 
483 	/*
484 	 * If the next node has a new parent, fixup the tree starting
485 	 * at the next node's old parent.
486 	 */
487 	if (entry->next != &map->header) {
488 		uvm_rb_fixup(map, entry->next);
489 		if (next_parent != NULL
490 		    && next_parent != entry
491 		    && next_parent != PARENT_ENTRY(map, entry->next))
492 			uvm_rb_fixup(map, next_parent);
493 	}
494 }
495 
496 #if defined(DEBUG)
497 int uvm_debug_check_map = 0;
498 int uvm_debug_check_rbtree = 0;
499 #define uvm_map_check(map, name) \
500 	_uvm_map_check((map), (name), __FILE__, __LINE__)
501 static void
502 _uvm_map_check(struct vm_map *map, const char *name,
503     const char *file, int line)
504 {
505 
506 	if ((uvm_debug_check_map && _uvm_map_sanity(map)) ||
507 	    (uvm_debug_check_rbtree && _uvm_tree_sanity(map))) {
508 		panic("uvm_map_check failed: \"%s\" map=%p (%s:%d)",
509 		    name, map, file, line);
510 	}
511 }
512 #else /* defined(DEBUG) */
513 #define uvm_map_check(map, name)	/* nothing */
514 #endif /* defined(DEBUG) */
515 
516 #if defined(DEBUG) || defined(DDB)
517 int
518 _uvm_map_sanity(struct vm_map *map)
519 {
520 	bool first_free_found = false;
521 	bool hint_found = false;
522 	const struct vm_map_entry *e;
523 	struct vm_map_entry *hint = map->hint;
524 
525 	e = &map->header;
526 	for (;;) {
527 		if (map->first_free == e) {
528 			first_free_found = true;
529 		} else if (!first_free_found && e->next->start > e->end) {
530 			printf("first_free %p should be %p\n",
531 			    map->first_free, e);
532 			return -1;
533 		}
534 		if (hint == e) {
535 			hint_found = true;
536 		}
537 
538 		e = e->next;
539 		if (e == &map->header) {
540 			break;
541 		}
542 	}
543 	if (!first_free_found) {
544 		printf("stale first_free\n");
545 		return -1;
546 	}
547 	if (!hint_found) {
548 		printf("stale hint\n");
549 		return -1;
550 	}
551 	return 0;
552 }
553 
554 int
555 _uvm_tree_sanity(struct vm_map *map)
556 {
557 	struct vm_map_entry *tmp, *trtmp;
558 	int n = 0, i = 1;
559 
560 	for (tmp = map->header.next; tmp != &map->header; tmp = tmp->next) {
561 		if (tmp->gap != uvm_rb_gap(tmp)) {
562 			printf("%d/%d gap %#lx != %#lx %s\n",
563 			    n + 1, map->nentries,
564 			    (ulong)tmp->gap, (ulong)uvm_rb_gap(tmp),
565 			    tmp->next == &map->header ? "(last)" : "");
566 			goto error;
567 		}
568 		/*
569 		 * If any entries are out of order, tmp->gap will be unsigned
570 		 * and will likely exceed the size of the map.
571 		 */
572 		if (tmp->gap >= vm_map_max(map) - vm_map_min(map)) {
573 			printf("too large gap %zu\n", (size_t)tmp->gap);
574 			goto error;
575 		}
576 		n++;
577 	}
578 
579 	if (n != map->nentries) {
580 		printf("nentries: %d vs %d\n", n, map->nentries);
581 		goto error;
582 	}
583 
584 	trtmp = NULL;
585 	for (tmp = map->header.next; tmp != &map->header; tmp = tmp->next) {
586 		if (tmp->maxgap != uvm_rb_maxgap(tmp)) {
587 			printf("maxgap %#lx != %#lx\n",
588 			    (ulong)tmp->maxgap,
589 			    (ulong)uvm_rb_maxgap(tmp));
590 			goto error;
591 		}
592 		if (trtmp != NULL && trtmp->start >= tmp->start) {
593 			printf("corrupt: 0x%"PRIxVADDR"x >= 0x%"PRIxVADDR"x\n",
594 			    trtmp->start, tmp->start);
595 			goto error;
596 		}
597 
598 		trtmp = tmp;
599 	}
600 
601 	for (tmp = map->header.next; tmp != &map->header;
602 	    tmp = tmp->next, i++) {
603 		trtmp = rb_tree_iterate(&map->rb_tree, tmp, RB_DIR_LEFT);
604 		if (trtmp == NULL)
605 			trtmp = &map->header;
606 		if (tmp->prev != trtmp) {
607 			printf("lookup: %d: %p->prev=%p: %p\n",
608 			    i, tmp, tmp->prev, trtmp);
609 			goto error;
610 		}
611 		trtmp = rb_tree_iterate(&map->rb_tree, tmp, RB_DIR_RIGHT);
612 		if (trtmp == NULL)
613 			trtmp = &map->header;
614 		if (tmp->next != trtmp) {
615 			printf("lookup: %d: %p->next=%p: %p\n",
616 			    i, tmp, tmp->next, trtmp);
617 			goto error;
618 		}
619 		trtmp = rb_tree_find_node(&map->rb_tree, &tmp->start);
620 		if (trtmp != tmp) {
621 			printf("lookup: %d: %p - %p: %p\n", i, tmp, trtmp,
622 			    PARENT_ENTRY(map, tmp));
623 			goto error;
624 		}
625 	}
626 
627 	return (0);
628  error:
629 	return (-1);
630 }
631 #endif /* defined(DEBUG) || defined(DDB) */
632 
633 /*
634  * vm_map_lock: acquire an exclusive (write) lock on a map.
635  *
636  * => The locking protocol provides for guaranteed upgrade from shared ->
637  *    exclusive by whichever thread currently has the map marked busy.
638  *    See "LOCKING PROTOCOL NOTES" in uvm_map.h.  This is horrible; among
639  *    other problems, it defeats any fairness guarantees provided by RW
640  *    locks.
641  */
642 
643 void
644 vm_map_lock(struct vm_map *map)
645 {
646 
647 	for (;;) {
648 		rw_enter(&map->lock, RW_WRITER);
649 		if (map->busy == NULL || map->busy == curlwp) {
650 			break;
651 		}
652 		mutex_enter(&map->misc_lock);
653 		rw_exit(&map->lock);
654 		if (map->busy != NULL) {
655 			cv_wait(&map->cv, &map->misc_lock);
656 		}
657 		mutex_exit(&map->misc_lock);
658 	}
659 	map->timestamp++;
660 }
661 
662 /*
663  * vm_map_lock_try: try to lock a map, failing if it is already locked.
664  */
665 
666 bool
667 vm_map_lock_try(struct vm_map *map)
668 {
669 
670 	if (!rw_tryenter(&map->lock, RW_WRITER)) {
671 		return false;
672 	}
673 	if (map->busy != NULL) {
674 		rw_exit(&map->lock);
675 		return false;
676 	}
677 	map->timestamp++;
678 	return true;
679 }
680 
681 /*
682  * vm_map_unlock: release an exclusive lock on a map.
683  */
684 
685 void
686 vm_map_unlock(struct vm_map *map)
687 {
688 
689 	KASSERT(rw_write_held(&map->lock));
690 	KASSERT(map->busy == NULL || map->busy == curlwp);
691 	rw_exit(&map->lock);
692 }
693 
694 /*
695  * vm_map_unbusy: mark the map as unbusy, and wake any waiters that
696  *     want an exclusive lock.
697  */
698 
699 void
700 vm_map_unbusy(struct vm_map *map)
701 {
702 
703 	KASSERT(map->busy == curlwp);
704 
705 	/*
706 	 * Safe to clear 'busy' and 'waiters' with only a read lock held:
707 	 *
708 	 * o they can only be set with a write lock held
709 	 * o writers are blocked out with a read or write hold
710 	 * o at any time, only one thread owns the set of values
711 	 */
712 	mutex_enter(&map->misc_lock);
713 	map->busy = NULL;
714 	cv_broadcast(&map->cv);
715 	mutex_exit(&map->misc_lock);
716 }
717 
718 /*
719  * vm_map_lock_read: acquire a shared (read) lock on a map.
720  */
721 
722 void
723 vm_map_lock_read(struct vm_map *map)
724 {
725 
726 	rw_enter(&map->lock, RW_READER);
727 }
728 
729 /*
730  * vm_map_unlock_read: release a shared lock on a map.
731  */
732 
733 void
734 vm_map_unlock_read(struct vm_map *map)
735 {
736 
737 	rw_exit(&map->lock);
738 }
739 
740 /*
741  * vm_map_busy: mark a map as busy.
742  *
743  * => the caller must hold the map write locked
744  */
745 
746 void
747 vm_map_busy(struct vm_map *map)
748 {
749 
750 	KASSERT(rw_write_held(&map->lock));
751 	KASSERT(map->busy == NULL);
752 
753 	map->busy = curlwp;
754 }
755 
756 /*
757  * vm_map_locked_p: return true if the map is write locked.
758  *
759  * => only for debug purposes like KASSERTs.
760  * => should not be used to verify that a map is not locked.
761  */
762 
763 bool
764 vm_map_locked_p(struct vm_map *map)
765 {
766 
767 	return rw_write_held(&map->lock);
768 }
769 
770 /*
771  * uvm_mapent_alloc: allocate a map entry
772  */
773 
774 static struct vm_map_entry *
775 uvm_mapent_alloc(struct vm_map *map, int flags)
776 {
777 	struct vm_map_entry *me;
778 	int pflags = (flags & UVM_FLAG_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
779 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
780 
781 	me = pool_cache_get(&uvm_map_entry_cache, pflags);
782 	if (__predict_false(me == NULL)) {
783 		return NULL;
784 	}
785 	me->flags = 0;
786 
787 	UVMHIST_LOG(maphist, "<- new entry=%#jx [kentry=%jd]", (uintptr_t)me,
788 	    (map == kernel_map), 0, 0);
789 	return me;
790 }
791 
792 /*
793  * uvm_mapent_free: free map entry
794  */
795 
796 static void
797 uvm_mapent_free(struct vm_map_entry *me)
798 {
799 	UVMHIST_FUNC(__func__);
800 	UVMHIST_CALLARGS(maphist,"<- freeing map entry=%#jx [flags=%#jx]",
801 		(uintptr_t)me, me->flags, 0, 0);
802 	pool_cache_put(&uvm_map_entry_cache, me);
803 }
804 
805 /*
806  * uvm_mapent_copy: copy a map entry, preserving flags
807  */
808 
809 static inline void
810 uvm_mapent_copy(struct vm_map_entry *src, struct vm_map_entry *dst)
811 {
812 
813 	memcpy(dst, src, sizeof(*dst));
814 	dst->flags = 0;
815 }
816 
817 #if defined(DEBUG)
818 static void
819 _uvm_mapent_check(const struct vm_map_entry *entry, int line)
820 {
821 
822 	if (entry->start >= entry->end) {
823 		goto bad;
824 	}
825 	if (UVM_ET_ISOBJ(entry)) {
826 		if (entry->object.uvm_obj == NULL) {
827 			goto bad;
828 		}
829 	} else if (UVM_ET_ISSUBMAP(entry)) {
830 		if (entry->object.sub_map == NULL) {
831 			goto bad;
832 		}
833 	} else {
834 		if (entry->object.uvm_obj != NULL ||
835 		    entry->object.sub_map != NULL) {
836 			goto bad;
837 		}
838 	}
839 	if (!UVM_ET_ISOBJ(entry)) {
840 		if (entry->offset != 0) {
841 			goto bad;
842 		}
843 	}
844 
845 	return;
846 
847 bad:
848 	panic("%s: bad entry %p, line %d", __func__, entry, line);
849 }
850 #endif /* defined(DEBUG) */
851 
852 /*
853  * uvm_map_entry_unwire: unwire a map entry
854  *
855  * => map should be locked by caller
856  */
857 
858 static inline void
859 uvm_map_entry_unwire(struct vm_map *map, struct vm_map_entry *entry)
860 {
861 
862 	entry->wired_count = 0;
863 	uvm_fault_unwire_locked(map, entry->start, entry->end);
864 }
865 
866 
867 /*
868  * wrapper for calling amap_ref()
869  */
870 static inline void
871 uvm_map_reference_amap(struct vm_map_entry *entry, int flags)
872 {
873 
874 	amap_ref(entry->aref.ar_amap, entry->aref.ar_pageoff,
875 	    (entry->end - entry->start) >> PAGE_SHIFT, flags);
876 }
877 
878 
879 /*
880  * wrapper for calling amap_unref()
881  */
882 static inline void
883 uvm_map_unreference_amap(struct vm_map_entry *entry, int flags)
884 {
885 
886 	amap_unref(entry->aref.ar_amap, entry->aref.ar_pageoff,
887 	    (entry->end - entry->start) >> PAGE_SHIFT, flags);
888 }
889 
890 
891 /*
892  * uvm_map_init: init mapping system at boot time.
893  */
894 
895 void
896 uvm_map_init(void)
897 {
898 	/*
899 	 * first, init logging system.
900 	 */
901 
902 	UVMHIST_FUNC(__func__);
903 	UVMHIST_LINK_STATIC(maphist);
904 	UVMHIST_LINK_STATIC(pdhist);
905 	UVMHIST_CALLED(maphist);
906 	UVMHIST_LOG(maphist,"<starting uvm map system>", 0, 0, 0, 0);
907 
908 	/*
909 	 * initialize the global lock for kernel map entry.
910 	 */
911 
912 	mutex_init(&uvm_kentry_lock, MUTEX_DRIVER, IPL_VM);
913 }
914 
915 /*
916  * uvm_map_init_caches: init mapping system caches.
917  */
918 void
919 uvm_map_init_caches(void)
920 {
921 	/*
922 	 * initialize caches.
923 	 */
924 
925 	pool_cache_bootstrap(&uvm_map_entry_cache, sizeof(struct vm_map_entry),
926 	    coherency_unit, 0, PR_LARGECACHE, "vmmpepl", NULL, IPL_NONE, NULL,
927 	    NULL, NULL);
928 }
929 
930 /*
931  * clippers
932  */
933 
934 /*
935  * uvm_mapent_splitadj: adjust map entries for splitting, after uvm_mapent_copy.
936  */
937 
938 static void
939 uvm_mapent_splitadj(struct vm_map_entry *entry1, struct vm_map_entry *entry2,
940     vaddr_t splitat)
941 {
942 	vaddr_t adj;
943 
944 	KASSERT(entry1->start < splitat);
945 	KASSERT(splitat < entry1->end);
946 
947 	adj = splitat - entry1->start;
948 	entry1->end = entry2->start = splitat;
949 
950 	if (entry1->aref.ar_amap) {
951 		amap_splitref(&entry1->aref, &entry2->aref, adj);
952 	}
953 	if (UVM_ET_ISSUBMAP(entry1)) {
954 		/* ... unlikely to happen, but play it safe */
955 		 uvm_map_reference(entry1->object.sub_map);
956 	} else if (UVM_ET_ISOBJ(entry1)) {
957 		KASSERT(entry1->object.uvm_obj != NULL); /* suppress coverity */
958 		entry2->offset += adj;
959 		if (entry1->object.uvm_obj->pgops &&
960 		    entry1->object.uvm_obj->pgops->pgo_reference)
961 			entry1->object.uvm_obj->pgops->pgo_reference(
962 			    entry1->object.uvm_obj);
963 	}
964 }
965 
966 /*
967  * uvm_map_clip_start: ensure that the entry begins at or after
968  *	the starting address, if it doesn't we split the entry.
969  *
970  * => caller should use UVM_MAP_CLIP_START macro rather than calling
971  *    this directly
972  * => map must be locked by caller
973  */
974 
975 void
976 uvm_map_clip_start(struct vm_map *map, struct vm_map_entry *entry,
977     vaddr_t start)
978 {
979 	struct vm_map_entry *new_entry;
980 
981 	/* uvm_map_simplify_entry(map, entry); */ /* XXX */
982 
983 	uvm_map_check(map, "clip_start entry");
984 	uvm_mapent_check(entry);
985 
986 	/*
987 	 * Split off the front portion.  note that we must insert the new
988 	 * entry BEFORE this one, so that this entry has the specified
989 	 * starting address.
990 	 */
991 	new_entry = uvm_mapent_alloc(map, 0);
992 	uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
993 	uvm_mapent_splitadj(new_entry, entry, start);
994 	uvm_map_entry_link(map, entry->prev, new_entry);
995 
996 	uvm_map_check(map, "clip_start leave");
997 }
998 
999 /*
1000  * uvm_map_clip_end: ensure that the entry ends at or before
1001  *	the ending address, if it does't we split the reference
1002  *
1003  * => caller should use UVM_MAP_CLIP_END macro rather than calling
1004  *    this directly
1005  * => map must be locked by caller
1006  */
1007 
1008 void
1009 uvm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, vaddr_t end)
1010 {
1011 	struct vm_map_entry *new_entry;
1012 
1013 	uvm_map_check(map, "clip_end entry");
1014 	uvm_mapent_check(entry);
1015 
1016 	/*
1017 	 *	Create a new entry and insert it
1018 	 *	AFTER the specified entry
1019 	 */
1020 	new_entry = uvm_mapent_alloc(map, 0);
1021 	uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
1022 	uvm_mapent_splitadj(entry, new_entry, end);
1023 	uvm_map_entry_link(map, entry, new_entry);
1024 
1025 	uvm_map_check(map, "clip_end leave");
1026 }
1027 
1028 /*
1029  *   M A P   -   m a i n   e n t r y   p o i n t
1030  */
1031 /*
1032  * uvm_map: establish a valid mapping in a map
1033  *
1034  * => assume startp is page aligned.
1035  * => assume size is a multiple of PAGE_SIZE.
1036  * => assume sys_mmap provides enough of a "hint" to have us skip
1037  *	over text/data/bss area.
1038  * => map must be unlocked (we will lock it)
1039  * => <uobj,uoffset> value meanings (4 cases):
1040  *	 [1] <NULL,uoffset>		== uoffset is a hint for PMAP_PREFER
1041  *	 [2] <NULL,UVM_UNKNOWN_OFFSET>	== don't PMAP_PREFER
1042  *	 [3] <uobj,uoffset>		== normal mapping
1043  *	 [4] <uobj,UVM_UNKNOWN_OFFSET>	== uvm_map finds offset based on VA
1044  *
1045  *    case [4] is for kernel mappings where we don't know the offset until
1046  *    we've found a virtual address.   note that kernel object offsets are
1047  *    always relative to vm_map_min(kernel_map).
1048  *
1049  * => if `align' is non-zero, we align the virtual address to the specified
1050  *	alignment.
1051  *	this is provided as a mechanism for large pages.
1052  *
1053  * => XXXCDC: need way to map in external amap?
1054  */
1055 
1056 int
1057 uvm_map(struct vm_map *map, vaddr_t *startp /* IN/OUT */, vsize_t size,
1058     struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags)
1059 {
1060 	struct uvm_map_args args;
1061 	struct vm_map_entry *new_entry;
1062 	int error;
1063 
1064 	KASSERT((size & PAGE_MASK) == 0);
1065 	KASSERT((flags & UVM_FLAG_FIXED) == 0 || align == 0);
1066 
1067 	/*
1068 	 * for pager_map, allocate the new entry first to avoid sleeping
1069 	 * for memory while we have the map locked.
1070 	 */
1071 
1072 	new_entry = NULL;
1073 	if (map == pager_map) {
1074 		new_entry = uvm_mapent_alloc(map, (flags & UVM_FLAG_NOWAIT));
1075 		if (__predict_false(new_entry == NULL))
1076 			return ENOMEM;
1077 	}
1078 	if (map == pager_map)
1079 		flags |= UVM_FLAG_NOMERGE;
1080 
1081 	error = uvm_map_prepare(map, *startp, size, uobj, uoffset, align,
1082 	    flags, &args);
1083 	if (!error) {
1084 		error = uvm_map_enter(map, &args, new_entry);
1085 		*startp = args.uma_start;
1086 	} else if (new_entry) {
1087 		uvm_mapent_free(new_entry);
1088 	}
1089 
1090 #if defined(DEBUG)
1091 	if (!error && VM_MAP_IS_KERNEL(map) && (flags & UVM_FLAG_NOWAIT) == 0) {
1092 		uvm_km_check_empty(map, *startp, *startp + size);
1093 	}
1094 #endif /* defined(DEBUG) */
1095 
1096 	return error;
1097 }
1098 
1099 /*
1100  * uvm_map_prepare:
1101  *
1102  * called with map unlocked.
1103  * on success, returns the map locked.
1104  */
1105 
1106 int
1107 uvm_map_prepare(struct vm_map *map, vaddr_t start, vsize_t size,
1108     struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags,
1109     struct uvm_map_args *args)
1110 {
1111 	struct vm_map_entry *prev_entry;
1112 	vm_prot_t prot = UVM_PROTECTION(flags);
1113 	vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
1114 
1115 	UVMHIST_FUNC(__func__);
1116 	UVMHIST_CALLARGS(maphist, "(map=%#jx, start=%#jx, size=%jx, flags=%#jx)",
1117 	    (uintptr_t)map, start, size, flags);
1118 	UVMHIST_LOG(maphist, "  uobj/offset %#jx/%jd", (uintptr_t)uobj,
1119 	    uoffset,0,0);
1120 
1121 	/*
1122 	 * detect a popular device driver bug.
1123 	 */
1124 
1125 	KASSERT(doing_shutdown || curlwp != NULL);
1126 
1127 	/*
1128 	 * zero-sized mapping doesn't make any sense.
1129 	 */
1130 	KASSERT(size > 0);
1131 
1132 	KASSERT((~flags & (UVM_FLAG_NOWAIT | UVM_FLAG_WAITVA)) != 0);
1133 
1134 	uvm_map_check(map, "map entry");
1135 
1136 	/*
1137 	 * check sanity of protection code
1138 	 */
1139 
1140 	if ((prot & maxprot) != prot) {
1141 		UVMHIST_LOG(maphist, "<- prot. failure:  prot=%#jx, max=%#jx",
1142 		prot, maxprot,0,0);
1143 		return EACCES;
1144 	}
1145 
1146 	/*
1147 	 * figure out where to put new VM range
1148 	 */
1149 retry:
1150 	if (vm_map_lock_try(map) == false) {
1151 		if ((flags & UVM_FLAG_TRYLOCK) != 0) {
1152 			return EAGAIN;
1153 		}
1154 		vm_map_lock(map); /* could sleep here */
1155 	}
1156 	if (flags & UVM_FLAG_UNMAP) {
1157 		KASSERT(flags & UVM_FLAG_FIXED);
1158 		KASSERT((flags & UVM_FLAG_NOWAIT) == 0);
1159 
1160 		/*
1161 		 * Set prev_entry to what it will need to be after any existing
1162 		 * entries are removed later in uvm_map_enter().
1163 		 */
1164 
1165 		if (uvm_map_lookup_entry(map, start, &prev_entry)) {
1166 			if (start == prev_entry->start)
1167 				prev_entry = prev_entry->prev;
1168 			else
1169 				UVM_MAP_CLIP_END(map, prev_entry, start);
1170 			SAVE_HINT(map, map->hint, prev_entry);
1171 		}
1172 	} else {
1173 		prev_entry = uvm_map_findspace(map, start, size, &start,
1174 		    uobj, uoffset, align, flags);
1175 	}
1176 	if (prev_entry == NULL) {
1177 		unsigned int timestamp;
1178 
1179 		timestamp = map->timestamp;
1180 		UVMHIST_LOG(maphist,"waiting va timestamp=%#jx",
1181 			    timestamp,0,0,0);
1182 		map->flags |= VM_MAP_WANTVA;
1183 		vm_map_unlock(map);
1184 
1185 		/*
1186 		 * try to reclaim kva and wait until someone does unmap.
1187 		 * fragile locking here, so we awaken every second to
1188 		 * recheck the condition.
1189 		 */
1190 
1191 		mutex_enter(&map->misc_lock);
1192 		while ((map->flags & VM_MAP_WANTVA) != 0 &&
1193 		   map->timestamp == timestamp) {
1194 			if ((flags & UVM_FLAG_WAITVA) == 0) {
1195 				mutex_exit(&map->misc_lock);
1196 				UVMHIST_LOG(maphist,
1197 				    "<- uvm_map_findspace failed!", 0,0,0,0);
1198 				return ENOMEM;
1199 			} else {
1200 				cv_timedwait(&map->cv, &map->misc_lock, hz);
1201 			}
1202 		}
1203 		mutex_exit(&map->misc_lock);
1204 		goto retry;
1205 	}
1206 
1207 #ifdef PMAP_GROWKERNEL
1208 	/*
1209 	 * If the kernel pmap can't map the requested space,
1210 	 * then allocate more resources for it.
1211 	 */
1212 	if (map == kernel_map && uvm_maxkaddr < (start + size))
1213 		uvm_maxkaddr = pmap_growkernel(start + size);
1214 #endif
1215 
1216 	UVMMAP_EVCNT_INCR(map_call);
1217 
1218 	/*
1219 	 * if uobj is null, then uoffset is either a VAC hint for PMAP_PREFER
1220 	 * [typically from uvm_map_reserve] or it is UVM_UNKNOWN_OFFSET.   in
1221 	 * either case we want to zero it  before storing it in the map entry
1222 	 * (because it looks strange and confusing when debugging...)
1223 	 *
1224 	 * if uobj is not null
1225 	 *   if uoffset is not UVM_UNKNOWN_OFFSET then we have a normal mapping
1226 	 *      and we do not need to change uoffset.
1227 	 *   if uoffset is UVM_UNKNOWN_OFFSET then we need to find the offset
1228 	 *      now (based on the starting address of the map).   this case is
1229 	 *      for kernel object mappings where we don't know the offset until
1230 	 *      the virtual address is found (with uvm_map_findspace).   the
1231 	 *      offset is the distance we are from the start of the map.
1232 	 */
1233 
1234 	if (uobj == NULL) {
1235 		uoffset = 0;
1236 	} else {
1237 		if (uoffset == UVM_UNKNOWN_OFFSET) {
1238 			KASSERT(UVM_OBJ_IS_KERN_OBJECT(uobj));
1239 			uoffset = start - vm_map_min(kernel_map);
1240 		}
1241 	}
1242 
1243 	args->uma_flags = flags;
1244 	args->uma_prev = prev_entry;
1245 	args->uma_start = start;
1246 	args->uma_size = size;
1247 	args->uma_uobj = uobj;
1248 	args->uma_uoffset = uoffset;
1249 
1250 	UVMHIST_LOG(maphist, "<- done!", 0,0,0,0);
1251 	return 0;
1252 }
1253 
1254 /*
1255  * uvm_map_enter:
1256  *
1257  * called with map locked.
1258  * unlock the map before returning.
1259  */
1260 
1261 int
1262 uvm_map_enter(struct vm_map *map, const struct uvm_map_args *args,
1263     struct vm_map_entry *new_entry)
1264 {
1265 	struct vm_map_entry *prev_entry = args->uma_prev;
1266 	struct vm_map_entry *dead = NULL, *dead_entries = NULL;
1267 
1268 	const uvm_flag_t flags = args->uma_flags;
1269 	const vm_prot_t prot = UVM_PROTECTION(flags);
1270 	const vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
1271 	const vm_inherit_t inherit = UVM_INHERIT(flags);
1272 	const int amapwaitflag = (flags & UVM_FLAG_NOWAIT) ?
1273 	    AMAP_EXTEND_NOWAIT : 0;
1274 	const int advice = UVM_ADVICE(flags);
1275 
1276 	vaddr_t start = args->uma_start;
1277 	vsize_t size = args->uma_size;
1278 	struct uvm_object *uobj = args->uma_uobj;
1279 	voff_t uoffset = args->uma_uoffset;
1280 
1281 	const int kmap = (vm_map_pmap(map) == pmap_kernel());
1282 	int merged = 0;
1283 	int error;
1284 	int newetype;
1285 
1286 	UVMHIST_FUNC(__func__);
1287 	UVMHIST_CALLARGS(maphist, "(map=%#jx, start=%#jx, size=%ju, flags=%#jx)",
1288 	    (uintptr_t)map, start, size, flags);
1289 	UVMHIST_LOG(maphist, "  uobj/offset %#jx/%jd", (uintptr_t)uobj,
1290 	    uoffset,0,0);
1291 
1292 	KASSERT(map->hint == prev_entry); /* bimerge case assumes this */
1293 	KASSERT(vm_map_locked_p(map));
1294 	KASSERT((flags & (UVM_FLAG_NOWAIT | UVM_FLAG_UNMAP)) !=
1295 		(UVM_FLAG_NOWAIT | UVM_FLAG_UNMAP));
1296 
1297 	if (uobj)
1298 		newetype = UVM_ET_OBJ;
1299 	else
1300 		newetype = 0;
1301 
1302 	if (flags & UVM_FLAG_COPYONW) {
1303 		newetype |= UVM_ET_COPYONWRITE;
1304 		if ((flags & UVM_FLAG_OVERLAY) == 0)
1305 			newetype |= UVM_ET_NEEDSCOPY;
1306 	}
1307 
1308 	/*
1309 	 * For mappings with unmap, remove any old entries now.  Adding the new
1310 	 * entry cannot fail because that can only happen if UVM_FLAG_NOWAIT
1311 	 * is set, and we do not support nowait and unmap together.
1312 	 */
1313 
1314 	if (flags & UVM_FLAG_UNMAP) {
1315 		KASSERT(flags & UVM_FLAG_FIXED);
1316 		uvm_unmap_remove(map, start, start + size, &dead_entries, 0);
1317 #ifdef DEBUG
1318 		struct vm_map_entry *tmp_entry __diagused;
1319 		bool rv __diagused;
1320 
1321 		rv = uvm_map_lookup_entry(map, start, &tmp_entry);
1322 		KASSERT(!rv);
1323 		KASSERTMSG(prev_entry == tmp_entry,
1324 			   "args %p prev_entry %p tmp_entry %p",
1325 			   args, prev_entry, tmp_entry);
1326 #endif
1327 		SAVE_HINT(map, map->hint, prev_entry);
1328 	}
1329 
1330 	/*
1331 	 * try and insert in map by extending previous entry, if possible.
1332 	 * XXX: we don't try and pull back the next entry.   might be useful
1333 	 * for a stack, but we are currently allocating our stack in advance.
1334 	 */
1335 
1336 	if (flags & UVM_FLAG_NOMERGE)
1337 		goto nomerge;
1338 
1339 	if (prev_entry->end == start &&
1340 	    prev_entry != &map->header &&
1341 	    UVM_ET_ISCOMPATIBLE(prev_entry, newetype, uobj, 0,
1342 	    prot, maxprot, inherit, advice, 0)) {
1343 
1344 		if (uobj && prev_entry->offset +
1345 		    (prev_entry->end - prev_entry->start) != uoffset)
1346 			goto forwardmerge;
1347 
1348 		/*
1349 		 * can't extend a shared amap.  note: no need to lock amap to
1350 		 * look at refs since we don't care about its exact value.
1351 		 * if it is one (i.e. we have only reference) it will stay there
1352 		 */
1353 
1354 		if (prev_entry->aref.ar_amap &&
1355 		    amap_refs(prev_entry->aref.ar_amap) != 1) {
1356 			goto forwardmerge;
1357 		}
1358 
1359 		if (prev_entry->aref.ar_amap) {
1360 			error = amap_extend(prev_entry, size,
1361 			    amapwaitflag | AMAP_EXTEND_FORWARDS);
1362 			if (error)
1363 				goto nomerge;
1364 		}
1365 
1366 		if (kmap) {
1367 			UVMMAP_EVCNT_INCR(kbackmerge);
1368 		} else {
1369 			UVMMAP_EVCNT_INCR(ubackmerge);
1370 		}
1371 		UVMHIST_LOG(maphist,"  starting back merge", 0, 0, 0, 0);
1372 
1373 		/*
1374 		 * drop our reference to uobj since we are extending a reference
1375 		 * that we already have (the ref count can not drop to zero).
1376 		 */
1377 
1378 		if (uobj && uobj->pgops->pgo_detach)
1379 			uobj->pgops->pgo_detach(uobj);
1380 
1381 		/*
1382 		 * Now that we've merged the entries, note that we've grown
1383 		 * and our gap has shrunk.  Then fix the tree.
1384 		 */
1385 		prev_entry->end += size;
1386 		prev_entry->gap -= size;
1387 		uvm_rb_fixup(map, prev_entry);
1388 
1389 		uvm_map_check(map, "map backmerged");
1390 
1391 		UVMHIST_LOG(maphist,"<- done (via backmerge)!", 0, 0, 0, 0);
1392 		merged++;
1393 	}
1394 
1395 forwardmerge:
1396 	if (prev_entry->next->start == (start + size) &&
1397 	    prev_entry->next != &map->header &&
1398 	    UVM_ET_ISCOMPATIBLE(prev_entry->next, newetype, uobj, 0,
1399 	    prot, maxprot, inherit, advice, 0)) {
1400 
1401 		if (uobj && prev_entry->next->offset != uoffset + size)
1402 			goto nomerge;
1403 
1404 		/*
1405 		 * can't extend a shared amap.  note: no need to lock amap to
1406 		 * look at refs since we don't care about its exact value.
1407 		 * if it is one (i.e. we have only reference) it will stay there.
1408 		 *
1409 		 * note that we also can't merge two amaps, so if we
1410 		 * merged with the previous entry which has an amap,
1411 		 * and the next entry also has an amap, we give up.
1412 		 *
1413 		 * Interesting cases:
1414 		 * amap, new, amap -> give up second merge (single fwd extend)
1415 		 * amap, new, none -> double forward extend (extend again here)
1416 		 * none, new, amap -> double backward extend (done here)
1417 		 * uobj, new, amap -> single backward extend (done here)
1418 		 *
1419 		 * XXX should we attempt to deal with someone refilling
1420 		 * the deallocated region between two entries that are
1421 		 * backed by the same amap (ie, arefs is 2, "prev" and
1422 		 * "next" refer to it, and adding this allocation will
1423 		 * close the hole, thus restoring arefs to 1 and
1424 		 * deallocating the "next" vm_map_entry)?  -- @@@
1425 		 */
1426 
1427 		if (prev_entry->next->aref.ar_amap &&
1428 		    (amap_refs(prev_entry->next->aref.ar_amap) != 1 ||
1429 		     (merged && prev_entry->aref.ar_amap))) {
1430 			goto nomerge;
1431 		}
1432 
1433 		if (merged) {
1434 			/*
1435 			 * Try to extend the amap of the previous entry to
1436 			 * cover the next entry as well.  If it doesn't work
1437 			 * just skip on, don't actually give up, since we've
1438 			 * already completed the back merge.
1439 			 */
1440 			if (prev_entry->aref.ar_amap) {
1441 				if (amap_extend(prev_entry,
1442 				    prev_entry->next->end -
1443 				    prev_entry->next->start,
1444 				    amapwaitflag | AMAP_EXTEND_FORWARDS))
1445 					goto nomerge;
1446 			}
1447 
1448 			/*
1449 			 * Try to extend the amap of the *next* entry
1450 			 * back to cover the new allocation *and* the
1451 			 * previous entry as well (the previous merge
1452 			 * didn't have an amap already otherwise we
1453 			 * wouldn't be checking here for an amap).  If
1454 			 * it doesn't work just skip on, again, don't
1455 			 * actually give up, since we've already
1456 			 * completed the back merge.
1457 			 */
1458 			else if (prev_entry->next->aref.ar_amap) {
1459 				if (amap_extend(prev_entry->next,
1460 				    prev_entry->end -
1461 				    prev_entry->start,
1462 				    amapwaitflag | AMAP_EXTEND_BACKWARDS))
1463 					goto nomerge;
1464 			}
1465 		} else {
1466 			/*
1467 			 * Pull the next entry's amap backwards to cover this
1468 			 * new allocation.
1469 			 */
1470 			if (prev_entry->next->aref.ar_amap) {
1471 				error = amap_extend(prev_entry->next, size,
1472 				    amapwaitflag | AMAP_EXTEND_BACKWARDS);
1473 				if (error)
1474 					goto nomerge;
1475 			}
1476 		}
1477 
1478 		if (merged) {
1479 			if (kmap) {
1480 				UVMMAP_EVCNT_DECR(kbackmerge);
1481 				UVMMAP_EVCNT_INCR(kbimerge);
1482 			} else {
1483 				UVMMAP_EVCNT_DECR(ubackmerge);
1484 				UVMMAP_EVCNT_INCR(ubimerge);
1485 			}
1486 		} else {
1487 			if (kmap) {
1488 				UVMMAP_EVCNT_INCR(kforwmerge);
1489 			} else {
1490 				UVMMAP_EVCNT_INCR(uforwmerge);
1491 			}
1492 		}
1493 		UVMHIST_LOG(maphist,"  starting forward merge", 0, 0, 0, 0);
1494 
1495 		/*
1496 		 * drop our reference to uobj since we are extending a reference
1497 		 * that we already have (the ref count can not drop to zero).
1498 		 */
1499 		if (uobj && uobj->pgops->pgo_detach)
1500 			uobj->pgops->pgo_detach(uobj);
1501 
1502 		if (merged) {
1503 			dead = prev_entry->next;
1504 			prev_entry->end = dead->end;
1505 			uvm_map_entry_unlink(map, dead);
1506 			if (dead->aref.ar_amap != NULL) {
1507 				prev_entry->aref = dead->aref;
1508 				dead->aref.ar_amap = NULL;
1509 			}
1510 		} else {
1511 			prev_entry->next->start -= size;
1512 			if (prev_entry != &map->header) {
1513 				prev_entry->gap -= size;
1514 				KASSERT(prev_entry->gap == uvm_rb_gap(prev_entry));
1515 				uvm_rb_fixup(map, prev_entry);
1516 			}
1517 			if (uobj)
1518 				prev_entry->next->offset = uoffset;
1519 		}
1520 
1521 		uvm_map_check(map, "map forwardmerged");
1522 
1523 		UVMHIST_LOG(maphist,"<- done forwardmerge", 0, 0, 0, 0);
1524 		merged++;
1525 	}
1526 
1527 nomerge:
1528 	if (!merged) {
1529 		UVMHIST_LOG(maphist,"  allocating new map entry", 0, 0, 0, 0);
1530 		if (kmap) {
1531 			UVMMAP_EVCNT_INCR(knomerge);
1532 		} else {
1533 			UVMMAP_EVCNT_INCR(unomerge);
1534 		}
1535 
1536 		/*
1537 		 * allocate new entry and link it in.
1538 		 */
1539 
1540 		if (new_entry == NULL) {
1541 			new_entry = uvm_mapent_alloc(map,
1542 				(flags & UVM_FLAG_NOWAIT));
1543 			if (__predict_false(new_entry == NULL)) {
1544 				error = ENOMEM;
1545 				goto done;
1546 			}
1547 		}
1548 		new_entry->start = start;
1549 		new_entry->end = new_entry->start + size;
1550 		new_entry->object.uvm_obj = uobj;
1551 		new_entry->offset = uoffset;
1552 
1553 		new_entry->etype = newetype;
1554 
1555 		if (flags & UVM_FLAG_NOMERGE) {
1556 			new_entry->flags |= UVM_MAP_NOMERGE;
1557 		}
1558 
1559 		new_entry->protection = prot;
1560 		new_entry->max_protection = maxprot;
1561 		new_entry->inheritance = inherit;
1562 		new_entry->wired_count = 0;
1563 		new_entry->advice = advice;
1564 		if (flags & UVM_FLAG_OVERLAY) {
1565 
1566 			/*
1567 			 * to_add: for BSS we overallocate a little since we
1568 			 * are likely to extend
1569 			 */
1570 
1571 			vaddr_t to_add = (flags & UVM_FLAG_AMAPPAD) ?
1572 				UVM_AMAP_CHUNK << PAGE_SHIFT : 0;
1573 			struct vm_amap *amap = amap_alloc(size, to_add,
1574 			    (flags & UVM_FLAG_NOWAIT));
1575 			if (__predict_false(amap == NULL)) {
1576 				error = ENOMEM;
1577 				goto done;
1578 			}
1579 			new_entry->aref.ar_pageoff = 0;
1580 			new_entry->aref.ar_amap = amap;
1581 		} else {
1582 			new_entry->aref.ar_pageoff = 0;
1583 			new_entry->aref.ar_amap = NULL;
1584 		}
1585 		uvm_map_entry_link(map, prev_entry, new_entry);
1586 
1587 		/*
1588 		 * Update the free space hint
1589 		 */
1590 
1591 		if ((map->first_free == prev_entry) &&
1592 		    (prev_entry->end >= new_entry->start))
1593 			map->first_free = new_entry;
1594 
1595 		new_entry = NULL;
1596 	}
1597 
1598 	map->size += size;
1599 
1600 	UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
1601 
1602 	error = 0;
1603 
1604 done:
1605 	vm_map_unlock(map);
1606 
1607 	if (new_entry) {
1608 		uvm_mapent_free(new_entry);
1609 	}
1610 	if (dead) {
1611 		KDASSERT(merged);
1612 		uvm_mapent_free(dead);
1613 	}
1614 	if (dead_entries)
1615 		uvm_unmap_detach(dead_entries, 0);
1616 
1617 	return error;
1618 }
1619 
1620 /*
1621  * uvm_map_lookup_entry_bytree: lookup an entry in tree
1622  */
1623 
1624 static inline bool
1625 uvm_map_lookup_entry_bytree(struct vm_map *map, vaddr_t address,
1626     struct vm_map_entry **entry	/* OUT */)
1627 {
1628 	struct vm_map_entry *prev = &map->header;
1629 	struct vm_map_entry *cur = ROOT_ENTRY(map);
1630 
1631 	while (cur) {
1632 		UVMMAP_EVCNT_INCR(mlk_treeloop);
1633 		if (address >= cur->start) {
1634 			if (address < cur->end) {
1635 				*entry = cur;
1636 				return true;
1637 			}
1638 			prev = cur;
1639 			cur = RIGHT_ENTRY(cur);
1640 		} else
1641 			cur = LEFT_ENTRY(cur);
1642 	}
1643 	*entry = prev;
1644 	return false;
1645 }
1646 
1647 /*
1648  * uvm_map_lookup_entry: find map entry at or before an address
1649  *
1650  * => map must at least be read-locked by caller
1651  * => entry is returned in "entry"
1652  * => return value is true if address is in the returned entry
1653  */
1654 
1655 bool
1656 uvm_map_lookup_entry(struct vm_map *map, vaddr_t address,
1657     struct vm_map_entry **entry	/* OUT */)
1658 {
1659 	struct vm_map_entry *cur;
1660 	UVMHIST_FUNC(__func__);
1661 	UVMHIST_CALLARGS(maphist,"(map=%#jx,addr=%#jx,ent=%#jx)",
1662 	    (uintptr_t)map, address, (uintptr_t)entry, 0);
1663 
1664 	/*
1665 	 * make a quick check to see if we are already looking at
1666 	 * the entry we want (which is usually the case).  note also
1667 	 * that we don't need to save the hint here...  it is the
1668 	 * same hint (unless we are at the header, in which case the
1669 	 * hint didn't buy us anything anyway).
1670 	 */
1671 
1672 	cur = map->hint;
1673 	UVMMAP_EVCNT_INCR(mlk_call);
1674 	if (cur != &map->header &&
1675 	    address >= cur->start && cur->end > address) {
1676 		UVMMAP_EVCNT_INCR(mlk_hint);
1677 		*entry = cur;
1678 		UVMHIST_LOG(maphist,"<- got it via hint (%#jx)",
1679 		    (uintptr_t)cur, 0, 0, 0);
1680 		uvm_mapent_check(*entry);
1681 		return (true);
1682 	}
1683 	uvm_map_check(map, __func__);
1684 
1685 	/*
1686 	 * lookup in the tree.
1687 	 */
1688 
1689 	UVMMAP_EVCNT_INCR(mlk_tree);
1690 	if (__predict_true(uvm_map_lookup_entry_bytree(map, address, entry))) {
1691 		SAVE_HINT(map, map->hint, *entry);
1692 		UVMHIST_LOG(maphist,"<- search got it (%#jx)",
1693 		    (uintptr_t)cur, 0, 0, 0);
1694 		KDASSERT((*entry)->start <= address);
1695 		KDASSERT(address < (*entry)->end);
1696 		uvm_mapent_check(*entry);
1697 		return (true);
1698 	}
1699 
1700 	SAVE_HINT(map, map->hint, *entry);
1701 	UVMHIST_LOG(maphist,"<- failed!",0,0,0,0);
1702 	KDASSERT((*entry) == &map->header || (*entry)->end <= address);
1703 	KDASSERT((*entry)->next == &map->header ||
1704 	    address < (*entry)->next->start);
1705 	return (false);
1706 }
1707 
1708 /*
1709  * See if the range between start and start + length fits in the gap
1710  * entry->next->start and entry->end.  Returns 1 if fits, 0 if doesn't
1711  * fit, and -1 address wraps around.
1712  */
1713 static int
1714 uvm_map_space_avail(vaddr_t *start, vsize_t length, voff_t uoffset,
1715     vsize_t align, int flags, int topdown, struct vm_map_entry *entry)
1716 {
1717 	vaddr_t end;
1718 
1719 #ifdef PMAP_PREFER
1720 	/*
1721 	 * push start address forward as needed to avoid VAC alias problems.
1722 	 * we only do this if a valid offset is specified.
1723 	 */
1724 
1725 	if (uoffset != UVM_UNKNOWN_OFFSET)
1726 		PMAP_PREFER(uoffset, start, length, topdown);
1727 #endif
1728 	if ((flags & UVM_FLAG_COLORMATCH) != 0) {
1729 		KASSERT(align < uvmexp.ncolors);
1730 		if (uvmexp.ncolors > 1) {
1731 			const u_int colormask = uvmexp.colormask;
1732 			const u_int colorsize = colormask + 1;
1733 			vaddr_t hint = atop(*start);
1734 			const u_int color = hint & colormask;
1735 			if (color != align) {
1736 				hint -= color;	/* adjust to color boundary */
1737 				KASSERT((hint & colormask) == 0);
1738 				if (topdown) {
1739 					if (align > color)
1740 						hint -= colorsize;
1741 				} else {
1742 					if (align < color)
1743 						hint += colorsize;
1744 				}
1745 				*start = ptoa(hint + align); /* adjust to color */
1746 			}
1747 		}
1748 	} else {
1749 		KASSERT(powerof2(align));
1750 		uvm_map_align_va(start, align, topdown);
1751 		/*
1752 		 * XXX Should we PMAP_PREFER() here again?
1753 		 * eh...i think we're okay
1754 		 */
1755 	}
1756 
1757 	/*
1758 	 * Find the end of the proposed new region.  Be sure we didn't
1759 	 * wrap around the address; if so, we lose.  Otherwise, if the
1760 	 * proposed new region fits before the next entry, we win.
1761 	 */
1762 
1763 	end = *start + length;
1764 	if (end < *start)
1765 		return (-1);
1766 
1767 	if (entry->next->start >= end && *start >= entry->end)
1768 		return (1);
1769 
1770 	return (0);
1771 }
1772 
1773 static void
1774 uvm_findspace_invariants(struct vm_map *map, vaddr_t orig_hint, vaddr_t length,
1775     struct uvm_object *uobj, voff_t uoffset, vsize_t align, int flags,
1776     vaddr_t hint, struct vm_map_entry *entry, int line)
1777 {
1778 	const int topdown = map->flags & VM_MAP_TOPDOWN;
1779 	const int hint_location_ok =
1780 		topdown ? hint <= orig_hint
1781 			: hint >= orig_hint;
1782 
1783 #if !(defined(__sh3__) && defined(DIAGNOSTIC)) /* XXXRO: kern/51254 */
1784 #define UVM_FINDSPACE_KASSERTMSG KASSERTMSG
1785 
1786 #else  /* sh3 && DIAGNOSTIC */
1787 /* like KASSERTMSG but make it not fatal */
1788 #define UVM_FINDSPACE_KASSERTMSG(e, msg, ...)			\
1789 		(__predict_true((e)) ? (void)0 :		\
1790 		    printf(__KASSERTSTR msg "\n",		\
1791 			"weak diagnostic ", #e,			\
1792 			__FILE__, __LINE__, ## __VA_ARGS__))
1793 #endif
1794 
1795 	UVM_FINDSPACE_KASSERTMSG(hint_location_ok,
1796 	    "%s map=%p hint=%#" PRIxVADDR " %s orig_hint=%#" PRIxVADDR
1797 	    " length=%#" PRIxVSIZE " uobj=%p uoffset=%#llx align=%" PRIxVSIZE
1798 	    " flags=%#x entry=%p (uvm_map_findspace line %d)",
1799 	    topdown ? "topdown" : "bottomup",
1800 	    map, hint, topdown ? ">" : "<", orig_hint,
1801 	    length, uobj, (unsigned long long)uoffset, align,
1802 	    flags, entry, line);
1803 }
1804 
1805 /*
1806  * uvm_map_findspace: find "length" sized space in "map".
1807  *
1808  * => "hint" is a hint about where we want it, unless UVM_FLAG_FIXED is
1809  *	set in "flags" (in which case we insist on using "hint").
1810  * => "result" is VA returned
1811  * => uobj/uoffset are to be used to handle VAC alignment, if required
1812  * => if "align" is non-zero, we attempt to align to that value.
1813  * => caller must at least have read-locked map
1814  * => returns NULL on failure, or pointer to prev. map entry if success
1815  * => note this is a cross between the old vm_map_findspace and vm_map_find
1816  */
1817 
1818 struct vm_map_entry *
1819 uvm_map_findspace(struct vm_map *map, vaddr_t hint, vsize_t length,
1820     vaddr_t *result /* OUT */, struct uvm_object *uobj, voff_t uoffset,
1821     vsize_t align, int flags)
1822 {
1823 #define	INVARIANTS()							      \
1824 	uvm_findspace_invariants(map, orig_hint, length, uobj, uoffset, align,\
1825 	    flags, hint, entry, __LINE__)
1826 	struct vm_map_entry *entry = NULL;
1827 	struct vm_map_entry *child, *prev, *tmp;
1828 	vaddr_t orig_hint __diagused;
1829 	const int topdown = map->flags & VM_MAP_TOPDOWN;
1830 	int avail;
1831 	UVMHIST_FUNC(__func__);
1832 	UVMHIST_CALLARGS(maphist, "(map=%#jx, hint=%#jx, len=%ju, flags=%#jx...",
1833 	    (uintptr_t)map, hint, length, flags);
1834 	UVMHIST_LOG(maphist, " uobj=%#jx, uoffset=%#jx, align=%#jx)",
1835 	    (uintptr_t)uobj, uoffset, align, 0);
1836 
1837 	KASSERT((flags & UVM_FLAG_COLORMATCH) != 0 || powerof2(align));
1838 	KASSERT((flags & UVM_FLAG_COLORMATCH) == 0 || align < uvmexp.ncolors);
1839 	KASSERT((flags & UVM_FLAG_FIXED) == 0 || align == 0);
1840 
1841 	uvm_map_check(map, "map_findspace entry");
1842 
1843 	/*
1844 	 * Clamp the hint to the VM map's min/max address, and remmeber
1845 	 * the clamped original hint.  Remember the original hint,
1846 	 * clamped to the min/max address.  If we are aligning, then we
1847 	 * may have to try again with no alignment constraint if we
1848 	 * fail the first time.
1849 	 *
1850 	 * We use the original hint to verify later that the search has
1851 	 * been monotonic -- that is, nonincreasing or nondecreasing,
1852 	 * according to topdown or !topdown respectively.  But the
1853 	 * clamping is not monotonic.
1854 	 */
1855 	if (hint < vm_map_min(map)) {	/* check ranges ... */
1856 		if (flags & UVM_FLAG_FIXED) {
1857 			UVMHIST_LOG(maphist,"<- VA below map range",0,0,0,0);
1858 			return (NULL);
1859 		}
1860 		hint = vm_map_min(map);
1861 	}
1862 	if (hint > vm_map_max(map)) {
1863 		UVMHIST_LOG(maphist,"<- VA %#jx > range [%#jx->%#jx]",
1864 		    hint, vm_map_min(map), vm_map_max(map), 0);
1865 		return (NULL);
1866 	}
1867 	orig_hint = hint;
1868 	INVARIANTS();
1869 
1870 	UVMHIST_LOG(maphist,"<- VA %#jx vs range [%#jx->%#jx]",
1871 	    hint, vm_map_min(map), vm_map_max(map), 0);
1872 
1873 	/*
1874 	 * hint may not be aligned properly; we need round up or down it
1875 	 * before proceeding further.
1876 	 */
1877 	if ((flags & UVM_FLAG_COLORMATCH) == 0) {
1878 		uvm_map_align_va(&hint, align, topdown);
1879 		INVARIANTS();
1880 	}
1881 
1882 	UVMHIST_LOG(maphist,"<- VA %#jx vs range [%#jx->%#jx]",
1883 	    hint, vm_map_min(map), vm_map_max(map), 0);
1884 	/*
1885 	 * Look for the first possible address; if there's already
1886 	 * something at this address, we have to start after it.
1887 	 */
1888 
1889 	/*
1890 	 * @@@: there are four, no, eight cases to consider.
1891 	 *
1892 	 * 0: found,     fixed,     bottom up -> fail
1893 	 * 1: found,     fixed,     top down  -> fail
1894 	 * 2: found,     not fixed, bottom up -> start after entry->end,
1895 	 *                                       loop up
1896 	 * 3: found,     not fixed, top down  -> start before entry->start,
1897 	 *                                       loop down
1898 	 * 4: not found, fixed,     bottom up -> check entry->next->start, fail
1899 	 * 5: not found, fixed,     top down  -> check entry->next->start, fail
1900 	 * 6: not found, not fixed, bottom up -> check entry->next->start,
1901 	 *                                       loop up
1902 	 * 7: not found, not fixed, top down  -> check entry->next->start,
1903 	 *                                       loop down
1904 	 *
1905 	 * as you can see, it reduces to roughly five cases, and that
1906 	 * adding top down mapping only adds one unique case (without
1907 	 * it, there would be four cases).
1908 	 */
1909 
1910 	if ((flags & UVM_FLAG_FIXED) == 0 &&
1911 	    hint == (topdown ? vm_map_max(map) : vm_map_min(map))) {
1912 		/*
1913 		 * The uvm_map_findspace algorithm is monotonic -- for
1914 		 * topdown VM it starts with a high hint and returns a
1915 		 * lower free address; for !topdown VM it starts with a
1916 		 * low hint and returns a higher free address.  As an
1917 		 * optimization, start with the first (highest for
1918 		 * topdown, lowest for !topdown) free address.
1919 		 *
1920 		 * XXX This `optimization' probably doesn't actually do
1921 		 * much in practice unless userland explicitly passes
1922 		 * the VM map's minimum or maximum address, which
1923 		 * varies from machine to machine (VM_MAX/MIN_ADDRESS,
1924 		 * e.g. 0x7fbfdfeff000 on amd64 but 0xfffffffff000 on
1925 		 * aarch64) and may vary according to other factors
1926 		 * like sysctl vm.user_va0_disable.  In particular, if
1927 		 * the user specifies 0 as a hint to mmap, then mmap
1928 		 * will choose a default address which is usually _not_
1929 		 * VM_MAX/MIN_ADDRESS but something else instead like
1930 		 * VM_MAX_ADDRESS - stack size - guard page overhead,
1931 		 * in which case this branch is never hit.
1932 		 *
1933 		 * In fact, this branch appears to have been broken for
1934 		 * two decades between when topdown was introduced in
1935 		 * ~2003 and when it was adapted to handle the topdown
1936 		 * case without violating the monotonicity assertion in
1937 		 * 2022.  Maybe Someone^TM should either ditch the
1938 		 * optimization or find a better way to do it.
1939 		 */
1940 		entry = map->first_free;
1941 	} else {
1942 		if (uvm_map_lookup_entry(map, hint, &entry)) {
1943 			/* "hint" address already in use ... */
1944 			if (flags & UVM_FLAG_FIXED) {
1945 				UVMHIST_LOG(maphist, "<- fixed & VA in use",
1946 				    0, 0, 0, 0);
1947 				return (NULL);
1948 			}
1949 			if (topdown)
1950 				/* Start from lower gap. */
1951 				entry = entry->prev;
1952 		} else if (flags & UVM_FLAG_FIXED) {
1953 			if (entry->next->start >= hint + length &&
1954 			    hint + length > hint)
1955 				goto found;
1956 
1957 			/* "hint" address is gap but too small */
1958 			UVMHIST_LOG(maphist, "<- fixed mapping failed",
1959 			    0, 0, 0, 0);
1960 			return (NULL); /* only one shot at it ... */
1961 		} else {
1962 			/*
1963 			 * See if given hint fits in this gap.
1964 			 */
1965 			avail = uvm_map_space_avail(&hint, length,
1966 			    uoffset, align, flags, topdown, entry);
1967 			INVARIANTS();
1968 			switch (avail) {
1969 			case 1:
1970 				goto found;
1971 			case -1:
1972 				goto wraparound;
1973 			}
1974 
1975 			if (topdown) {
1976 				/*
1977 				 * Still there is a chance to fit
1978 				 * if hint > entry->end.
1979 				 */
1980 			} else {
1981 				/* Start from higher gap. */
1982 				entry = entry->next;
1983 				if (entry == &map->header)
1984 					goto notfound;
1985 				goto nextgap;
1986 			}
1987 		}
1988 	}
1989 
1990 	/*
1991 	 * Note that all UVM_FLAGS_FIXED case is already handled.
1992 	 */
1993 	KDASSERT((flags & UVM_FLAG_FIXED) == 0);
1994 
1995 	/* Try to find the space in the red-black tree */
1996 
1997 	/* Check slot before any entry */
1998 	if (topdown) {
1999 		KASSERTMSG(entry->next->start >= vm_map_min(map),
2000 		    "map=%p entry=%p entry->next=%p"
2001 		    " entry->next->start=0x%"PRIxVADDR" min=0x%"PRIxVADDR,
2002 		    map, entry, entry->next,
2003 		    entry->next->start, vm_map_min(map));
2004 		if (length > entry->next->start - vm_map_min(map))
2005 			hint = vm_map_min(map); /* XXX goto wraparound? */
2006 		else
2007 			hint = entry->next->start - length;
2008 		KASSERT(hint >= vm_map_min(map));
2009 	} else {
2010 		hint = entry->end;
2011 	}
2012 	INVARIANTS();
2013 	avail = uvm_map_space_avail(&hint, length, uoffset, align, flags,
2014 	    topdown, entry);
2015 	INVARIANTS();
2016 	switch (avail) {
2017 	case 1:
2018 		goto found;
2019 	case -1:
2020 		goto wraparound;
2021 	}
2022 
2023 nextgap:
2024 	KDASSERT((flags & UVM_FLAG_FIXED) == 0);
2025 	/* If there is not enough space in the whole tree, we fail */
2026 	tmp = ROOT_ENTRY(map);
2027 	if (tmp == NULL || tmp->maxgap < length)
2028 		goto notfound;
2029 
2030 	prev = NULL; /* previous candidate */
2031 
2032 	/* Find an entry close to hint that has enough space */
2033 	for (; tmp;) {
2034 		KASSERT(tmp->next->start == tmp->end + tmp->gap);
2035 		if (topdown) {
2036 			if (tmp->next->start < hint + length &&
2037 			    (prev == NULL || tmp->end > prev->end)) {
2038 				if (tmp->gap >= length)
2039 					prev = tmp;
2040 				else if ((child = LEFT_ENTRY(tmp)) != NULL
2041 				    && child->maxgap >= length)
2042 					prev = tmp;
2043 			}
2044 		} else {
2045 			if (tmp->end >= hint &&
2046 			    (prev == NULL || tmp->end < prev->end)) {
2047 				if (tmp->gap >= length)
2048 					prev = tmp;
2049 				else if ((child = RIGHT_ENTRY(tmp)) != NULL
2050 				    && child->maxgap >= length)
2051 					prev = tmp;
2052 			}
2053 		}
2054 		if (tmp->next->start < hint + length)
2055 			child = RIGHT_ENTRY(tmp);
2056 		else if (tmp->end > hint)
2057 			child = LEFT_ENTRY(tmp);
2058 		else {
2059 			if (tmp->gap >= length)
2060 				break;
2061 			if (topdown)
2062 				child = LEFT_ENTRY(tmp);
2063 			else
2064 				child = RIGHT_ENTRY(tmp);
2065 		}
2066 		if (child == NULL || child->maxgap < length)
2067 			break;
2068 		tmp = child;
2069 	}
2070 
2071 	if (tmp != NULL && tmp->start < hint && hint < tmp->next->start) {
2072 		/*
2073 		 * Check if the entry that we found satifies the
2074 		 * space requirement
2075 		 */
2076 		if (topdown) {
2077 			if (hint > tmp->next->start - length)
2078 				hint = tmp->next->start - length;
2079 		} else {
2080 			if (hint < tmp->end)
2081 				hint = tmp->end;
2082 		}
2083 		INVARIANTS();
2084 		avail = uvm_map_space_avail(&hint, length, uoffset, align,
2085 		    flags, topdown, tmp);
2086 		INVARIANTS();
2087 		switch (avail) {
2088 		case 1:
2089 			entry = tmp;
2090 			goto found;
2091 		case -1:
2092 			goto wraparound;
2093 		}
2094 		if (tmp->gap >= length)
2095 			goto listsearch;
2096 	}
2097 	if (prev == NULL)
2098 		goto notfound;
2099 
2100 	if (topdown) {
2101 		KASSERT(orig_hint >= prev->next->start - length ||
2102 		    prev->next->start - length > prev->next->start);
2103 		hint = prev->next->start - length;
2104 	} else {
2105 		KASSERT(orig_hint <= prev->end);
2106 		hint = prev->end;
2107 	}
2108 	INVARIANTS();
2109 	avail = uvm_map_space_avail(&hint, length, uoffset, align,
2110 	    flags, topdown, prev);
2111 	INVARIANTS();
2112 	switch (avail) {
2113 	case 1:
2114 		entry = prev;
2115 		goto found;
2116 	case -1:
2117 		goto wraparound;
2118 	}
2119 	if (prev->gap >= length)
2120 		goto listsearch;
2121 
2122 	if (topdown)
2123 		tmp = LEFT_ENTRY(prev);
2124 	else
2125 		tmp = RIGHT_ENTRY(prev);
2126 	for (;;) {
2127 		KASSERT(tmp);
2128 		KASSERTMSG(tmp->maxgap >= length,
2129 		    "tmp->maxgap=0x%"PRIxVSIZE" length=0x%"PRIxVSIZE,
2130 		    tmp->maxgap, length);
2131 		if (topdown)
2132 			child = RIGHT_ENTRY(tmp);
2133 		else
2134 			child = LEFT_ENTRY(tmp);
2135 		if (child && child->maxgap >= length) {
2136 			tmp = child;
2137 			continue;
2138 		}
2139 		if (tmp->gap >= length)
2140 			break;
2141 		if (topdown)
2142 			tmp = LEFT_ENTRY(tmp);
2143 		else
2144 			tmp = RIGHT_ENTRY(tmp);
2145 	}
2146 
2147 	if (topdown) {
2148 		KASSERT(orig_hint >= tmp->next->start - length ||
2149 		    tmp->next->start - length > tmp->next->start);
2150 		hint = tmp->next->start - length;
2151 	} else {
2152 		KASSERT(orig_hint <= tmp->end);
2153 		hint = tmp->end;
2154 	}
2155 	INVARIANTS();
2156 	avail = uvm_map_space_avail(&hint, length, uoffset, align,
2157 	    flags, topdown, tmp);
2158 	INVARIANTS();
2159 	switch (avail) {
2160 	case 1:
2161 		entry = tmp;
2162 		goto found;
2163 	case -1:
2164 		goto wraparound;
2165 	}
2166 
2167 	/*
2168 	 * The tree fails to find an entry because of offset or alignment
2169 	 * restrictions.  Search the list instead.
2170 	 */
2171  listsearch:
2172 	/*
2173 	 * Look through the rest of the map, trying to fit a new region in
2174 	 * the gap between existing regions, or after the very last region.
2175 	 * note: entry->end = base VA of current gap,
2176 	 *	 entry->next->start = VA of end of current gap
2177 	 */
2178 
2179 	INVARIANTS();
2180 	for (;;) {
2181 		/* Update hint for current gap. */
2182 		hint = topdown ? entry->next->start - length : entry->end;
2183 		INVARIANTS();
2184 
2185 		/* See if it fits. */
2186 		avail = uvm_map_space_avail(&hint, length, uoffset, align,
2187 		    flags, topdown, entry);
2188 		INVARIANTS();
2189 		switch (avail) {
2190 		case 1:
2191 			goto found;
2192 		case -1:
2193 			goto wraparound;
2194 		}
2195 
2196 		/* Advance to next/previous gap */
2197 		if (topdown) {
2198 			if (entry == &map->header) {
2199 				UVMHIST_LOG(maphist, "<- failed (off start)",
2200 				    0,0,0,0);
2201 				goto notfound;
2202 			}
2203 			entry = entry->prev;
2204 		} else {
2205 			entry = entry->next;
2206 			if (entry == &map->header) {
2207 				UVMHIST_LOG(maphist, "<- failed (off end)",
2208 				    0,0,0,0);
2209 				goto notfound;
2210 			}
2211 		}
2212 	}
2213 
2214  found:
2215 	SAVE_HINT(map, map->hint, entry);
2216 	*result = hint;
2217 	UVMHIST_LOG(maphist,"<- got it!  (result=%#jx)", hint, 0,0,0);
2218 	INVARIANTS();
2219 	KASSERT(entry->end <= hint);
2220 	KASSERT(hint + length <= entry->next->start);
2221 	return (entry);
2222 
2223  wraparound:
2224 	UVMHIST_LOG(maphist, "<- failed (wrap around)", 0,0,0,0);
2225 
2226 	return (NULL);
2227 
2228  notfound:
2229 	UVMHIST_LOG(maphist, "<- failed (notfound)", 0,0,0,0);
2230 
2231 	return (NULL);
2232 #undef INVARIANTS
2233 }
2234 
2235 /*
2236  *   U N M A P   -   m a i n   h e l p e r   f u n c t i o n s
2237  */
2238 
2239 /*
2240  * uvm_unmap_remove: remove mappings from a vm_map (from "start" up to "stop")
2241  *
2242  * => caller must check alignment and size
2243  * => map must be locked by caller
2244  * => we return a list of map entries that we've remove from the map
2245  *    in "entry_list"
2246  */
2247 
2248 void
2249 uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
2250     struct vm_map_entry **entry_list /* OUT */, int flags)
2251 {
2252 	struct vm_map_entry *entry, *first_entry, *next;
2253 	vaddr_t len;
2254 	UVMHIST_FUNC(__func__);
2255 	UVMHIST_CALLARGS(maphist,"(map=%#jx, start=%#jx, end=%#jx)",
2256 	    (uintptr_t)map, start, end, 0);
2257 	VM_MAP_RANGE_CHECK(map, start, end);
2258 
2259 	uvm_map_check(map, "unmap_remove entry");
2260 
2261 	/*
2262 	 * find first entry
2263 	 */
2264 
2265 	if (uvm_map_lookup_entry(map, start, &first_entry) == true) {
2266 		/* clip and go... */
2267 		entry = first_entry;
2268 		UVM_MAP_CLIP_START(map, entry, start);
2269 		/* critical!  prevents stale hint */
2270 		SAVE_HINT(map, entry, entry->prev);
2271 	} else {
2272 		entry = first_entry->next;
2273 	}
2274 
2275 	/*
2276 	 * save the free space hint
2277 	 */
2278 
2279 	if (map->first_free != &map->header && map->first_free->start >= start)
2280 		map->first_free = entry->prev;
2281 
2282 	/*
2283 	 * note: we now re-use first_entry for a different task.  we remove
2284 	 * a number of map entries from the map and save them in a linked
2285 	 * list headed by "first_entry".  once we remove them from the map
2286 	 * the caller should unlock the map and drop the references to the
2287 	 * backing objects [c.f. uvm_unmap_detach].  the object is to
2288 	 * separate unmapping from reference dropping.  why?
2289 	 *   [1] the map has to be locked for unmapping
2290 	 *   [2] the map need not be locked for reference dropping
2291 	 *   [3] dropping references may trigger pager I/O, and if we hit
2292 	 *       a pager that does synchronous I/O we may have to wait for it.
2293 	 *   [4] we would like all waiting for I/O to occur with maps unlocked
2294 	 *       so that we don't block other threads.
2295 	 */
2296 
2297 	first_entry = NULL;
2298 	*entry_list = NULL;
2299 
2300 	/*
2301 	 * break up the area into map entry sized regions and unmap.  note
2302 	 * that all mappings have to be removed before we can even consider
2303 	 * dropping references to amaps or VM objects (otherwise we could end
2304 	 * up with a mapping to a page on the free list which would be very bad)
2305 	 */
2306 
2307 	while ((entry != &map->header) && (entry->start < end)) {
2308 		KASSERT((entry->flags & UVM_MAP_STATIC) == 0);
2309 
2310 		UVM_MAP_CLIP_END(map, entry, end);
2311 		next = entry->next;
2312 		len = entry->end - entry->start;
2313 
2314 		/*
2315 		 * unwire before removing addresses from the pmap; otherwise
2316 		 * unwiring will put the entries back into the pmap (XXX).
2317 		 */
2318 
2319 		if (VM_MAPENT_ISWIRED(entry)) {
2320 			uvm_map_entry_unwire(map, entry);
2321 		}
2322 		if (flags & UVM_FLAG_VAONLY) {
2323 
2324 			/* nothing */
2325 
2326 		} else if ((map->flags & VM_MAP_PAGEABLE) == 0) {
2327 
2328 			/*
2329 			 * if the map is non-pageable, any pages mapped there
2330 			 * must be wired and entered with pmap_kenter_pa(),
2331 			 * and we should free any such pages immediately.
2332 			 * this is mostly used for kmem_map.
2333 			 */
2334 			KASSERT(vm_map_pmap(map) == pmap_kernel());
2335 
2336 			uvm_km_pgremove_intrsafe(map, entry->start, entry->end);
2337 		} else if (UVM_ET_ISOBJ(entry) &&
2338 			   UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) {
2339 			panic("%s: kernel object %p %p\n",
2340 			    __func__, map, entry);
2341 		} else if (UVM_ET_ISOBJ(entry) || entry->aref.ar_amap) {
2342 			/*
2343 			 * remove mappings the standard way.  lock object
2344 			 * and/or amap to ensure vm_page state does not
2345 			 * change while in pmap_remove().
2346 			 */
2347 
2348 #ifdef __HAVE_UNLOCKED_PMAP /* XXX temporary */
2349 			uvm_map_lock_entry(entry, RW_WRITER);
2350 #else
2351 			uvm_map_lock_entry(entry, RW_READER);
2352 #endif
2353 			pmap_remove(map->pmap, entry->start, entry->end);
2354 
2355 			/*
2356 			 * note: if map is dying, leave pmap_update() for
2357 			 * later.  if the map is to be reused (exec) then
2358 			 * pmap_update() will be called.  if the map is
2359 			 * being disposed of (exit) then pmap_destroy()
2360 			 * will be called.
2361 			 */
2362 
2363 			if ((map->flags & VM_MAP_DYING) == 0) {
2364 				pmap_update(vm_map_pmap(map));
2365 			} else {
2366 				KASSERT(vm_map_pmap(map) != pmap_kernel());
2367 			}
2368 
2369 			uvm_map_unlock_entry(entry);
2370 		}
2371 
2372 #if defined(UVMDEBUG)
2373 		/*
2374 		 * check if there's remaining mapping,
2375 		 * which is a bug in caller.
2376 		 */
2377 
2378 		vaddr_t va;
2379 		for (va = entry->start; va < entry->end;
2380 		    va += PAGE_SIZE) {
2381 			if (pmap_extract(vm_map_pmap(map), va, NULL)) {
2382 				panic("%s: %#"PRIxVADDR" has mapping",
2383 				    __func__, va);
2384 			}
2385 		}
2386 
2387 		if (VM_MAP_IS_KERNEL(map) && (flags & UVM_FLAG_NOWAIT) == 0) {
2388 			uvm_km_check_empty(map, entry->start, entry->end);
2389 		}
2390 #endif /* defined(UVMDEBUG) */
2391 
2392 		/*
2393 		 * remove entry from map and put it on our list of entries
2394 		 * that we've nuked.  then go to next entry.
2395 		 */
2396 
2397 		UVMHIST_LOG(maphist, "  removed map entry %#jx",
2398 		    (uintptr_t)entry, 0, 0, 0);
2399 
2400 		/* critical!  prevents stale hint */
2401 		SAVE_HINT(map, entry, entry->prev);
2402 
2403 		uvm_map_entry_unlink(map, entry);
2404 		KASSERT(map->size >= len);
2405 		map->size -= len;
2406 		entry->prev = NULL;
2407 		entry->next = first_entry;
2408 		first_entry = entry;
2409 		entry = next;
2410 	}
2411 
2412 	uvm_map_check(map, "unmap_remove leave");
2413 
2414 	/*
2415 	 * now we've cleaned up the map and are ready for the caller to drop
2416 	 * references to the mapped objects.
2417 	 */
2418 
2419 	*entry_list = first_entry;
2420 	UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
2421 
2422 	if (map->flags & VM_MAP_WANTVA) {
2423 		mutex_enter(&map->misc_lock);
2424 		map->flags &= ~VM_MAP_WANTVA;
2425 		cv_broadcast(&map->cv);
2426 		mutex_exit(&map->misc_lock);
2427 	}
2428 }
2429 
2430 /*
2431  * uvm_unmap_detach: drop references in a chain of map entries
2432  *
2433  * => we will free the map entries as we traverse the list.
2434  */
2435 
2436 void
2437 uvm_unmap_detach(struct vm_map_entry *first_entry, int flags)
2438 {
2439 	struct vm_map_entry *next_entry;
2440 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
2441 
2442 	while (first_entry) {
2443 		KASSERT(!VM_MAPENT_ISWIRED(first_entry));
2444 		UVMHIST_LOG(maphist,
2445 		    "  detach %#jx: amap=%#jx, obj=%#jx, submap?=%jd",
2446 		    (uintptr_t)first_entry,
2447 		    (uintptr_t)first_entry->aref.ar_amap,
2448 		    (uintptr_t)first_entry->object.uvm_obj,
2449 		    UVM_ET_ISSUBMAP(first_entry));
2450 
2451 		/*
2452 		 * drop reference to amap, if we've got one
2453 		 */
2454 
2455 		if (first_entry->aref.ar_amap)
2456 			uvm_map_unreference_amap(first_entry, flags);
2457 
2458 		/*
2459 		 * drop reference to our backing object, if we've got one
2460 		 */
2461 
2462 		KASSERT(!UVM_ET_ISSUBMAP(first_entry));
2463 		if (UVM_ET_ISOBJ(first_entry) &&
2464 		    first_entry->object.uvm_obj->pgops->pgo_detach) {
2465 			(*first_entry->object.uvm_obj->pgops->pgo_detach)
2466 				(first_entry->object.uvm_obj);
2467 		}
2468 		next_entry = first_entry->next;
2469 		uvm_mapent_free(first_entry);
2470 		first_entry = next_entry;
2471 	}
2472 	UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
2473 }
2474 
2475 /*
2476  *   E X T R A C T I O N   F U N C T I O N S
2477  */
2478 
2479 /*
2480  * uvm_map_reserve: reserve space in a vm_map for future use.
2481  *
2482  * => we reserve space in a map by putting a dummy map entry in the
2483  *    map (dummy means obj=NULL, amap=NULL, prot=VM_PROT_NONE)
2484  * => map should be unlocked (we will write lock it)
2485  * => we return true if we were able to reserve space
2486  * => XXXCDC: should be inline?
2487  */
2488 
2489 int
2490 uvm_map_reserve(struct vm_map *map, vsize_t size,
2491     vaddr_t offset	/* hint for pmap_prefer */,
2492     vsize_t align	/* alignment */,
2493     vaddr_t *raddr	/* IN:hint, OUT: reserved VA */,
2494     uvm_flag_t flags	/* UVM_FLAG_FIXED or UVM_FLAG_COLORMATCH or 0 */)
2495 {
2496 	UVMHIST_FUNC(__func__);
2497 	UVMHIST_CALLARGS(maphist, "(map=%#jx, size=%#jx, offset=%#jx, addr=%#jx)",
2498 	    (uintptr_t)map, size, offset, (uintptr_t)raddr);
2499 
2500 	size = round_page(size);
2501 
2502 	/*
2503 	 * reserve some virtual space.
2504 	 */
2505 
2506 	if (uvm_map(map, raddr, size, NULL, offset, align,
2507 	    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
2508 	    UVM_ADV_RANDOM, UVM_FLAG_NOMERGE|flags)) != 0) {
2509 	    UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
2510 		return (false);
2511 	}
2512 
2513 	UVMHIST_LOG(maphist, "<- done (*raddr=%#jx)", *raddr,0,0,0);
2514 	return (true);
2515 }
2516 
2517 /*
2518  * uvm_map_replace: replace a reserved (blank) area of memory with
2519  * real mappings.
2520  *
2521  * => caller must WRITE-LOCK the map
2522  * => we return true if replacement was a success
2523  * => we expect the newents chain to have nnewents entrys on it and
2524  *    we expect newents->prev to point to the last entry on the list
2525  * => note newents is allowed to be NULL
2526  */
2527 
2528 static int
2529 uvm_map_replace(struct vm_map *map, vaddr_t start, vaddr_t end,
2530     struct vm_map_entry *newents, int nnewents, vsize_t nsize,
2531     struct vm_map_entry **oldentryp)
2532 {
2533 	struct vm_map_entry *oldent, *last;
2534 
2535 	uvm_map_check(map, "map_replace entry");
2536 
2537 	/*
2538 	 * first find the blank map entry at the specified address
2539 	 */
2540 
2541 	if (!uvm_map_lookup_entry(map, start, &oldent)) {
2542 		return (false);
2543 	}
2544 
2545 	/*
2546 	 * check to make sure we have a proper blank entry
2547 	 */
2548 
2549 	if (end < oldent->end) {
2550 		UVM_MAP_CLIP_END(map, oldent, end);
2551 	}
2552 	if (oldent->start != start || oldent->end != end ||
2553 	    oldent->object.uvm_obj != NULL || oldent->aref.ar_amap != NULL) {
2554 		return (false);
2555 	}
2556 
2557 #ifdef DIAGNOSTIC
2558 
2559 	/*
2560 	 * sanity check the newents chain
2561 	 */
2562 
2563 	{
2564 		struct vm_map_entry *tmpent = newents;
2565 		int nent = 0;
2566 		vsize_t sz = 0;
2567 		vaddr_t cur = start;
2568 
2569 		while (tmpent) {
2570 			nent++;
2571 			sz += tmpent->end - tmpent->start;
2572 			if (tmpent->start < cur)
2573 				panic("uvm_map_replace1");
2574 			if (tmpent->start >= tmpent->end || tmpent->end > end) {
2575 				panic("uvm_map_replace2: "
2576 				    "tmpent->start=%#"PRIxVADDR
2577 				    ", tmpent->end=%#"PRIxVADDR
2578 				    ", end=%#"PRIxVADDR,
2579 				    tmpent->start, tmpent->end, end);
2580 			}
2581 			cur = tmpent->end;
2582 			if (tmpent->next) {
2583 				if (tmpent->next->prev != tmpent)
2584 					panic("uvm_map_replace3");
2585 			} else {
2586 				if (newents->prev != tmpent)
2587 					panic("uvm_map_replace4");
2588 			}
2589 			tmpent = tmpent->next;
2590 		}
2591 		if (nent != nnewents)
2592 			panic("uvm_map_replace5");
2593 		if (sz != nsize)
2594 			panic("uvm_map_replace6");
2595 	}
2596 #endif
2597 
2598 	/*
2599 	 * map entry is a valid blank!   replace it.   (this does all the
2600 	 * work of map entry link/unlink...).
2601 	 */
2602 
2603 	if (newents) {
2604 		last = newents->prev;
2605 
2606 		/* critical: flush stale hints out of map */
2607 		SAVE_HINT(map, map->hint, newents);
2608 		if (map->first_free == oldent)
2609 			map->first_free = last;
2610 
2611 		last->next = oldent->next;
2612 		last->next->prev = last;
2613 
2614 		/* Fix RB tree */
2615 		uvm_rb_remove(map, oldent);
2616 
2617 		newents->prev = oldent->prev;
2618 		newents->prev->next = newents;
2619 		map->nentries = map->nentries + (nnewents - 1);
2620 
2621 		/* Fixup the RB tree */
2622 		{
2623 			int i;
2624 			struct vm_map_entry *tmp;
2625 
2626 			tmp = newents;
2627 			for (i = 0; i < nnewents && tmp; i++) {
2628 				uvm_rb_insert(map, tmp);
2629 				tmp = tmp->next;
2630 			}
2631 		}
2632 	} else {
2633 		/* NULL list of new entries: just remove the old one */
2634 		clear_hints(map, oldent);
2635 		uvm_map_entry_unlink(map, oldent);
2636 	}
2637 	map->size -= end - start - nsize;
2638 
2639 	uvm_map_check(map, "map_replace leave");
2640 
2641 	/*
2642 	 * now we can free the old blank entry and return.
2643 	 */
2644 
2645 	*oldentryp = oldent;
2646 	return (true);
2647 }
2648 
2649 /*
2650  * uvm_map_extract: extract a mapping from a map and put it somewhere
2651  *	(maybe removing the old mapping)
2652  *
2653  * => maps should be unlocked (we will write lock them)
2654  * => returns 0 on success, error code otherwise
2655  * => start must be page aligned
2656  * => len must be page sized
2657  * => flags:
2658  *      UVM_EXTRACT_REMOVE: remove mappings from srcmap
2659  *      UVM_EXTRACT_CONTIG: abort if unmapped area (advisory only)
2660  *      UVM_EXTRACT_QREF: for a temporary extraction do quick obj refs
2661  *      UVM_EXTRACT_FIXPROT: set prot to maxprot as we go
2662  *      UVM_EXTRACT_PROT_ALL: set prot to UVM_PROT_ALL as we go
2663  *    >>>NOTE: if you set REMOVE, you are not allowed to use CONTIG or QREF!<<<
2664  *    >>>NOTE: QREF's must be unmapped via the QREF path, thus should only
2665  *             be used from within the kernel in a kernel level map <<<
2666  */
2667 
2668 int
2669 uvm_map_extract(struct vm_map *srcmap, vaddr_t start, vsize_t len,
2670     struct vm_map *dstmap, vaddr_t *dstaddrp, int flags)
2671 {
2672 	vaddr_t dstaddr, end, newend, oldoffset, fudge, orig_fudge;
2673 	struct vm_map_entry *chain, *endchain, *entry, *orig_entry, *newentry,
2674 	    *deadentry, *oldentry;
2675 	struct vm_map_entry *resentry = NULL; /* a dummy reservation entry */
2676 	vsize_t elen __unused;
2677 	int nchain, error, copy_ok;
2678 	vsize_t nsize;
2679 	UVMHIST_FUNC(__func__);
2680 	UVMHIST_CALLARGS(maphist,"(srcmap=%#jx,start=%#jx, len=%#jx",
2681 	    (uintptr_t)srcmap, start, len, 0);
2682 	UVMHIST_LOG(maphist," ...,dstmap=%#jx, flags=%#jx)",
2683 	    (uintptr_t)dstmap, flags, 0, 0);
2684 
2685 	/*
2686 	 * step 0: sanity check: start must be on a page boundary, length
2687 	 * must be page sized.  can't ask for CONTIG/QREF if you asked for
2688 	 * REMOVE.
2689 	 */
2690 
2691 	KASSERTMSG((start & PAGE_MASK) == 0, "start=0x%"PRIxVADDR, start);
2692 	KASSERTMSG((len & PAGE_MASK) == 0, "len=0x%"PRIxVADDR, len);
2693 	KASSERT((flags & UVM_EXTRACT_REMOVE) == 0 ||
2694 		(flags & (UVM_EXTRACT_CONTIG|UVM_EXTRACT_QREF)) == 0);
2695 
2696 	/*
2697 	 * step 1: reserve space in the target map for the extracted area
2698 	 */
2699 
2700 	if ((flags & UVM_EXTRACT_RESERVED) == 0) {
2701 		dstaddr = vm_map_min(dstmap);
2702 		if (!uvm_map_reserve(dstmap, len, start,
2703 		    atop(start) & uvmexp.colormask, &dstaddr,
2704 		    UVM_FLAG_COLORMATCH))
2705 			return (ENOMEM);
2706 		KASSERT((atop(start ^ dstaddr) & uvmexp.colormask) == 0);
2707 		*dstaddrp = dstaddr;	/* pass address back to caller */
2708 		UVMHIST_LOG(maphist, "  dstaddr=%#jx", dstaddr,0,0,0);
2709 	} else {
2710 		dstaddr = *dstaddrp;
2711 	}
2712 
2713 	/*
2714 	 * step 2: setup for the extraction process loop by init'ing the
2715 	 * map entry chain, locking src map, and looking up the first useful
2716 	 * entry in the map.
2717 	 */
2718 
2719 	end = start + len;
2720 	newend = dstaddr + len;
2721 	chain = endchain = NULL;
2722 	nchain = 0;
2723 	nsize = 0;
2724 	vm_map_lock(srcmap);
2725 
2726 	if (uvm_map_lookup_entry(srcmap, start, &entry)) {
2727 
2728 		/* "start" is within an entry */
2729 		if (flags & UVM_EXTRACT_QREF) {
2730 
2731 			/*
2732 			 * for quick references we don't clip the entry, so
2733 			 * the entry may map space "before" the starting
2734 			 * virtual address... this is the "fudge" factor
2735 			 * (which can be non-zero only the first time
2736 			 * through the "while" loop in step 3).
2737 			 */
2738 
2739 			fudge = start - entry->start;
2740 		} else {
2741 
2742 			/*
2743 			 * normal reference: we clip the map to fit (thus
2744 			 * fudge is zero)
2745 			 */
2746 
2747 			UVM_MAP_CLIP_START(srcmap, entry, start);
2748 			SAVE_HINT(srcmap, srcmap->hint, entry->prev);
2749 			fudge = 0;
2750 		}
2751 	} else {
2752 
2753 		/* "start" is not within an entry ... skip to next entry */
2754 		if (flags & UVM_EXTRACT_CONTIG) {
2755 			error = EINVAL;
2756 			goto bad;    /* definite hole here ... */
2757 		}
2758 
2759 		entry = entry->next;
2760 		fudge = 0;
2761 	}
2762 
2763 	/* save values from srcmap for step 6 */
2764 	orig_entry = entry;
2765 	orig_fudge = fudge;
2766 
2767 	/*
2768 	 * step 3: now start looping through the map entries, extracting
2769 	 * as we go.
2770 	 */
2771 
2772 	while (entry->start < end && entry != &srcmap->header) {
2773 
2774 		/* if we are not doing a quick reference, clip it */
2775 		if ((flags & UVM_EXTRACT_QREF) == 0)
2776 			UVM_MAP_CLIP_END(srcmap, entry, end);
2777 
2778 		/* clear needs_copy (allow chunking) */
2779 		if (UVM_ET_ISNEEDSCOPY(entry)) {
2780 			amap_copy(srcmap, entry,
2781 			    AMAP_COPY_NOWAIT|AMAP_COPY_NOMERGE, start, end);
2782 			if (UVM_ET_ISNEEDSCOPY(entry)) {  /* failed? */
2783 				error = ENOMEM;
2784 				goto bad;
2785 			}
2786 
2787 			/* amap_copy could clip (during chunk)!  update fudge */
2788 			if (fudge) {
2789 				fudge = start - entry->start;
2790 				orig_fudge = fudge;
2791 			}
2792 		}
2793 
2794 		/* calculate the offset of this from "start" */
2795 		oldoffset = (entry->start + fudge) - start;
2796 
2797 		/* allocate a new map entry */
2798 		newentry = uvm_mapent_alloc(dstmap, 0);
2799 		if (newentry == NULL) {
2800 			error = ENOMEM;
2801 			goto bad;
2802 		}
2803 
2804 		/* set up new map entry */
2805 		newentry->next = NULL;
2806 		newentry->prev = endchain;
2807 		newentry->start = dstaddr + oldoffset;
2808 		newentry->end =
2809 		    newentry->start + (entry->end - (entry->start + fudge));
2810 		if (newentry->end > newend || newentry->end < newentry->start)
2811 			newentry->end = newend;
2812 		newentry->object.uvm_obj = entry->object.uvm_obj;
2813 		if (newentry->object.uvm_obj) {
2814 			if (newentry->object.uvm_obj->pgops->pgo_reference)
2815 				newentry->object.uvm_obj->pgops->
2816 				    pgo_reference(newentry->object.uvm_obj);
2817 			newentry->offset = entry->offset + fudge;
2818 		} else {
2819 			newentry->offset = 0;
2820 		}
2821 		newentry->etype = entry->etype;
2822 		if (flags & UVM_EXTRACT_PROT_ALL) {
2823 			newentry->protection = newentry->max_protection =
2824 			    UVM_PROT_ALL;
2825 		} else {
2826 			newentry->protection = (flags & UVM_EXTRACT_FIXPROT) ?
2827 			    entry->max_protection : entry->protection;
2828 			newentry->max_protection = entry->max_protection;
2829 		}
2830 		newentry->inheritance = entry->inheritance;
2831 		newentry->wired_count = 0;
2832 		newentry->aref.ar_amap = entry->aref.ar_amap;
2833 		if (newentry->aref.ar_amap) {
2834 			newentry->aref.ar_pageoff =
2835 			    entry->aref.ar_pageoff + (fudge >> PAGE_SHIFT);
2836 			uvm_map_reference_amap(newentry, AMAP_SHARED |
2837 			    ((flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0));
2838 		} else {
2839 			newentry->aref.ar_pageoff = 0;
2840 		}
2841 		newentry->advice = entry->advice;
2842 		if ((flags & UVM_EXTRACT_QREF) != 0) {
2843 			newentry->flags |= UVM_MAP_NOMERGE;
2844 		}
2845 
2846 		/* now link it on the chain */
2847 		nchain++;
2848 		nsize += newentry->end - newentry->start;
2849 		if (endchain == NULL) {
2850 			chain = endchain = newentry;
2851 		} else {
2852 			endchain->next = newentry;
2853 			endchain = newentry;
2854 		}
2855 
2856 		/* end of 'while' loop! */
2857 		if ((flags & UVM_EXTRACT_CONTIG) && entry->end < end &&
2858 		    (entry->next == &srcmap->header ||
2859 		    entry->next->start != entry->end)) {
2860 			error = EINVAL;
2861 			goto bad;
2862 		}
2863 		entry = entry->next;
2864 		fudge = 0;
2865 	}
2866 
2867 	/*
2868 	 * step 4: close off chain (in format expected by uvm_map_replace)
2869 	 */
2870 
2871 	if (chain)
2872 		chain->prev = endchain;
2873 
2874 	/*
2875 	 * step 5: attempt to lock the dest map so we can pmap_copy.
2876 	 * note usage of copy_ok:
2877 	 *   1 => dstmap locked, pmap_copy ok, and we "replace" here (step 5)
2878 	 *   0 => dstmap unlocked, NO pmap_copy, and we will "replace" in step 7
2879 	 */
2880 
2881 	if (srcmap == dstmap || vm_map_lock_try(dstmap) == true) {
2882 		copy_ok = 1;
2883 		if (!uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
2884 		    nchain, nsize, &resentry)) {
2885 			if (srcmap != dstmap)
2886 				vm_map_unlock(dstmap);
2887 			error = EIO;
2888 			goto bad;
2889 		}
2890 	} else {
2891 		copy_ok = 0;
2892 		/* replace deferred until step 7 */
2893 	}
2894 
2895 	/*
2896 	 * step 6: traverse the srcmap a second time to do the following:
2897 	 *  - if we got a lock on the dstmap do pmap_copy
2898 	 *  - if UVM_EXTRACT_REMOVE remove the entries
2899 	 * we make use of orig_entry and orig_fudge (saved in step 2)
2900 	 */
2901 
2902 	if (copy_ok || (flags & UVM_EXTRACT_REMOVE)) {
2903 
2904 		/* purge possible stale hints from srcmap */
2905 		if (flags & UVM_EXTRACT_REMOVE) {
2906 			SAVE_HINT(srcmap, srcmap->hint, orig_entry->prev);
2907 			if (srcmap->first_free != &srcmap->header &&
2908 			    srcmap->first_free->start >= start)
2909 				srcmap->first_free = orig_entry->prev;
2910 		}
2911 
2912 		entry = orig_entry;
2913 		fudge = orig_fudge;
2914 		deadentry = NULL;	/* for UVM_EXTRACT_REMOVE */
2915 
2916 		while (entry->start < end && entry != &srcmap->header) {
2917 			if (copy_ok) {
2918 				oldoffset = (entry->start + fudge) - start;
2919 				elen = MIN(end, entry->end) -
2920 				    (entry->start + fudge);
2921 				pmap_copy(dstmap->pmap, srcmap->pmap,
2922 				    dstaddr + oldoffset, elen,
2923 				    entry->start + fudge);
2924 			}
2925 
2926 			/* we advance "entry" in the following if statement */
2927 			if (flags & UVM_EXTRACT_REMOVE) {
2928 #ifdef __HAVE_UNLOCKED_PMAP /* XXX temporary */
2929 				uvm_map_lock_entry(entry, RW_WRITER);
2930 #else
2931 				uvm_map_lock_entry(entry, RW_READER);
2932 #endif
2933 				pmap_remove(srcmap->pmap, entry->start,
2934 						entry->end);
2935 				uvm_map_unlock_entry(entry);
2936 				oldentry = entry;	/* save entry */
2937 				entry = entry->next;	/* advance */
2938 				uvm_map_entry_unlink(srcmap, oldentry);
2939 							/* add to dead list */
2940 				oldentry->next = deadentry;
2941 				deadentry = oldentry;
2942 			} else {
2943 				entry = entry->next;		/* advance */
2944 			}
2945 
2946 			/* end of 'while' loop */
2947 			fudge = 0;
2948 		}
2949 		pmap_update(srcmap->pmap);
2950 
2951 		/*
2952 		 * unlock dstmap.  we will dispose of deadentry in
2953 		 * step 7 if needed
2954 		 */
2955 
2956 		if (copy_ok && srcmap != dstmap)
2957 			vm_map_unlock(dstmap);
2958 
2959 	} else {
2960 		deadentry = NULL;
2961 	}
2962 
2963 	/*
2964 	 * step 7: we are done with the source map, unlock.   if copy_ok
2965 	 * is 0 then we have not replaced the dummy mapping in dstmap yet
2966 	 * and we need to do so now.
2967 	 */
2968 
2969 	vm_map_unlock(srcmap);
2970 	if ((flags & UVM_EXTRACT_REMOVE) && deadentry)
2971 		uvm_unmap_detach(deadentry, 0);   /* dispose of old entries */
2972 
2973 	/* now do the replacement if we didn't do it in step 5 */
2974 	if (copy_ok == 0) {
2975 		vm_map_lock(dstmap);
2976 		error = uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
2977 		    nchain, nsize, &resentry);
2978 		vm_map_unlock(dstmap);
2979 
2980 		if (error == false) {
2981 			error = EIO;
2982 			goto bad2;
2983 		}
2984 	}
2985 
2986 	if (resentry != NULL)
2987 		uvm_mapent_free(resentry);
2988 
2989 	return (0);
2990 
2991 	/*
2992 	 * bad: failure recovery
2993 	 */
2994 bad:
2995 	vm_map_unlock(srcmap);
2996 bad2:			/* src already unlocked */
2997 	if (chain)
2998 		uvm_unmap_detach(chain,
2999 		    (flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0);
3000 
3001 	if (resentry != NULL)
3002 		uvm_mapent_free(resentry);
3003 
3004 	if ((flags & UVM_EXTRACT_RESERVED) == 0) {
3005 		uvm_unmap(dstmap, dstaddr, dstaddr+len);   /* ??? */
3006 	}
3007 	return (error);
3008 }
3009 
3010 /* end of extraction functions */
3011 
3012 /*
3013  * uvm_map_submap: punch down part of a map into a submap
3014  *
3015  * => only the kernel_map is allowed to be submapped
3016  * => the purpose of submapping is to break up the locking granularity
3017  *	of a larger map
3018  * => the range specified must have been mapped previously with a uvm_map()
3019  *	call [with uobj==NULL] to create a blank map entry in the main map.
3020  *	[And it had better still be blank!]
3021  * => maps which contain submaps should never be copied or forked.
3022  * => to remove a submap, use uvm_unmap() on the main map
3023  *	and then uvm_map_deallocate() the submap.
3024  * => main map must be unlocked.
3025  * => submap must have been init'd and have a zero reference count.
3026  *	[need not be locked as we don't actually reference it]
3027  */
3028 
3029 int
3030 uvm_map_submap(struct vm_map *map, vaddr_t start, vaddr_t end,
3031     struct vm_map *submap)
3032 {
3033 	struct vm_map_entry *entry;
3034 	int error;
3035 
3036 	vm_map_lock(map);
3037 	VM_MAP_RANGE_CHECK(map, start, end);
3038 
3039 	if (uvm_map_lookup_entry(map, start, &entry)) {
3040 		UVM_MAP_CLIP_START(map, entry, start);
3041 		UVM_MAP_CLIP_END(map, entry, end);	/* to be safe */
3042 	} else {
3043 		entry = NULL;
3044 	}
3045 
3046 	if (entry != NULL &&
3047 	    entry->start == start && entry->end == end &&
3048 	    entry->object.uvm_obj == NULL && entry->aref.ar_amap == NULL &&
3049 	    !UVM_ET_ISCOPYONWRITE(entry) && !UVM_ET_ISNEEDSCOPY(entry)) {
3050 		entry->etype |= UVM_ET_SUBMAP;
3051 		entry->object.sub_map = submap;
3052 		entry->offset = 0;
3053 		uvm_map_reference(submap);
3054 		error = 0;
3055 	} else {
3056 		error = EINVAL;
3057 	}
3058 	vm_map_unlock(map);
3059 
3060 	return error;
3061 }
3062 
3063 /*
3064  * uvm_map_protect_user: change map protection on behalf of the user.
3065  * Enforces PAX settings as necessary.
3066  */
3067 int
3068 uvm_map_protect_user(struct lwp *l, vaddr_t start, vaddr_t end,
3069     vm_prot_t new_prot)
3070 {
3071 	int error;
3072 
3073 	if ((error = PAX_MPROTECT_VALIDATE(l, new_prot)))
3074 		return error;
3075 
3076 	return uvm_map_protect(&l->l_proc->p_vmspace->vm_map, start, end,
3077 	    new_prot, false);
3078 }
3079 
3080 
3081 /*
3082  * uvm_map_protect: change map protection
3083  *
3084  * => set_max means set max_protection.
3085  * => map must be unlocked.
3086  */
3087 
3088 #define MASK(entry)	(UVM_ET_ISCOPYONWRITE(entry) ? \
3089 			 ~VM_PROT_WRITE : VM_PROT_ALL)
3090 
3091 int
3092 uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
3093     vm_prot_t new_prot, bool set_max)
3094 {
3095 	struct vm_map_entry *current, *entry;
3096 	int error = 0;
3097 	UVMHIST_FUNC(__func__);
3098 	UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx,new_prot=%#jx)",
3099 	    (uintptr_t)map, start, end, new_prot);
3100 
3101 	vm_map_lock(map);
3102 	VM_MAP_RANGE_CHECK(map, start, end);
3103 	if (uvm_map_lookup_entry(map, start, &entry)) {
3104 		UVM_MAP_CLIP_START(map, entry, start);
3105 	} else {
3106 		entry = entry->next;
3107 	}
3108 
3109 	/*
3110 	 * make a first pass to check for protection violations.
3111 	 */
3112 
3113 	current = entry;
3114 	while ((current != &map->header) && (current->start < end)) {
3115 		if (UVM_ET_ISSUBMAP(current)) {
3116 			error = EINVAL;
3117 			goto out;
3118 		}
3119 		if ((new_prot & current->max_protection) != new_prot) {
3120 			error = EACCES;
3121 			goto out;
3122 		}
3123 		/*
3124 		 * Don't allow VM_PROT_EXECUTE to be set on entries that
3125 		 * point to vnodes that are associated with a NOEXEC file
3126 		 * system.
3127 		 */
3128 		if (UVM_ET_ISOBJ(current) &&
3129 		    UVM_OBJ_IS_VNODE(current->object.uvm_obj)) {
3130 			struct vnode *vp =
3131 			    (struct vnode *) current->object.uvm_obj;
3132 
3133 			if ((new_prot & VM_PROT_EXECUTE) != 0 &&
3134 			    (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0) {
3135 				error = EACCES;
3136 				goto out;
3137 			}
3138 		}
3139 
3140 		current = current->next;
3141 	}
3142 
3143 	/* go back and fix up protections (no need to clip this time). */
3144 
3145 	current = entry;
3146 	while ((current != &map->header) && (current->start < end)) {
3147 		vm_prot_t old_prot;
3148 
3149 		UVM_MAP_CLIP_END(map, current, end);
3150 		old_prot = current->protection;
3151 		if (set_max)
3152 			current->protection =
3153 			    (current->max_protection = new_prot) & old_prot;
3154 		else
3155 			current->protection = new_prot;
3156 
3157 		/*
3158 		 * update physical map if necessary.  worry about copy-on-write
3159 		 * here -- CHECK THIS XXX
3160 		 */
3161 
3162 		if (current->protection != old_prot) {
3163 			/* update pmap! */
3164 #ifdef __HAVE_UNLOCKED_PMAP /* XXX temporary */
3165 			uvm_map_lock_entry(current, RW_WRITER);
3166 #else
3167 			uvm_map_lock_entry(current, RW_READER);
3168 #endif
3169 			pmap_protect(map->pmap, current->start, current->end,
3170 			    current->protection & MASK(current));
3171 			uvm_map_unlock_entry(current);
3172 
3173 			/*
3174 			 * If this entry points at a vnode, and the
3175 			 * protection includes VM_PROT_EXECUTE, mark
3176 			 * the vnode as VEXECMAP.
3177 			 */
3178 			if (UVM_ET_ISOBJ(current)) {
3179 				struct uvm_object *uobj =
3180 				    current->object.uvm_obj;
3181 
3182 				if (UVM_OBJ_IS_VNODE(uobj) &&
3183 				    (current->protection & VM_PROT_EXECUTE)) {
3184 					vn_markexec((struct vnode *) uobj);
3185 				}
3186 			}
3187 		}
3188 
3189 		/*
3190 		 * If the map is configured to lock any future mappings,
3191 		 * wire this entry now if the old protection was VM_PROT_NONE
3192 		 * and the new protection is not VM_PROT_NONE.
3193 		 */
3194 
3195 		if ((map->flags & VM_MAP_WIREFUTURE) != 0 &&
3196 		    VM_MAPENT_ISWIRED(current) == 0 &&
3197 		    old_prot == VM_PROT_NONE &&
3198 		    new_prot != VM_PROT_NONE) {
3199 
3200 			/*
3201 			 * We must call pmap_update() here because the
3202 			 * pmap_protect() call above might have removed some
3203 			 * pmap entries and uvm_map_pageable() might create
3204 			 * some new pmap entries that rely on the prior
3205 			 * removals being completely finished.
3206 			 */
3207 
3208 			pmap_update(map->pmap);
3209 
3210 			if (uvm_map_pageable(map, current->start,
3211 			    current->end, false,
3212 			    UVM_LK_ENTER|UVM_LK_EXIT) != 0) {
3213 
3214 				/*
3215 				 * If locking the entry fails, remember the
3216 				 * error if it's the first one.  Note we
3217 				 * still continue setting the protection in
3218 				 * the map, but will return the error
3219 				 * condition regardless.
3220 				 *
3221 				 * XXX Ignore what the actual error is,
3222 				 * XXX just call it a resource shortage
3223 				 * XXX so that it doesn't get confused
3224 				 * XXX what uvm_map_protect() itself would
3225 				 * XXX normally return.
3226 				 */
3227 
3228 				error = ENOMEM;
3229 			}
3230 		}
3231 		current = current->next;
3232 	}
3233 	pmap_update(map->pmap);
3234 
3235  out:
3236 	vm_map_unlock(map);
3237 
3238 	UVMHIST_LOG(maphist, "<- done, error=%jd",error,0,0,0);
3239 	return error;
3240 }
3241 
3242 #undef  MASK
3243 
3244 /*
3245  * uvm_map_inherit: set inheritance code for range of addrs in map.
3246  *
3247  * => map must be unlocked
3248  * => note that the inherit code is used during a "fork".  see fork
3249  *	code for details.
3250  */
3251 
3252 int
3253 uvm_map_inherit(struct vm_map *map, vaddr_t start, vaddr_t end,
3254     vm_inherit_t new_inheritance)
3255 {
3256 	struct vm_map_entry *entry, *temp_entry;
3257 	UVMHIST_FUNC(__func__);
3258 	UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx,new_inh=%#jx)",
3259 	    (uintptr_t)map, start, end, new_inheritance);
3260 
3261 	switch (new_inheritance) {
3262 	case MAP_INHERIT_NONE:
3263 	case MAP_INHERIT_COPY:
3264 	case MAP_INHERIT_SHARE:
3265 	case MAP_INHERIT_ZERO:
3266 		break;
3267 	default:
3268 		UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
3269 		return EINVAL;
3270 	}
3271 
3272 	vm_map_lock(map);
3273 	VM_MAP_RANGE_CHECK(map, start, end);
3274 	if (uvm_map_lookup_entry(map, start, &temp_entry)) {
3275 		entry = temp_entry;
3276 		UVM_MAP_CLIP_START(map, entry, start);
3277 	}  else {
3278 		entry = temp_entry->next;
3279 	}
3280 	while ((entry != &map->header) && (entry->start < end)) {
3281 		UVM_MAP_CLIP_END(map, entry, end);
3282 		entry->inheritance = new_inheritance;
3283 		entry = entry->next;
3284 	}
3285 	vm_map_unlock(map);
3286 	UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3287 	return 0;
3288 }
3289 
3290 /*
3291  * uvm_map_advice: set advice code for range of addrs in map.
3292  *
3293  * => map must be unlocked
3294  */
3295 
3296 int
3297 uvm_map_advice(struct vm_map *map, vaddr_t start, vaddr_t end, int new_advice)
3298 {
3299 	struct vm_map_entry *entry, *temp_entry;
3300 	UVMHIST_FUNC(__func__);
3301 	UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx,new_adv=%#jx)",
3302 	    (uintptr_t)map, start, end, new_advice);
3303 
3304 	vm_map_lock(map);
3305 	VM_MAP_RANGE_CHECK(map, start, end);
3306 	if (uvm_map_lookup_entry(map, start, &temp_entry)) {
3307 		entry = temp_entry;
3308 		UVM_MAP_CLIP_START(map, entry, start);
3309 	} else {
3310 		entry = temp_entry->next;
3311 	}
3312 
3313 	/*
3314 	 * XXXJRT: disallow holes?
3315 	 */
3316 
3317 	while ((entry != &map->header) && (entry->start < end)) {
3318 		UVM_MAP_CLIP_END(map, entry, end);
3319 
3320 		switch (new_advice) {
3321 		case MADV_NORMAL:
3322 		case MADV_RANDOM:
3323 		case MADV_SEQUENTIAL:
3324 			/* nothing special here */
3325 			break;
3326 
3327 		default:
3328 			vm_map_unlock(map);
3329 			UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
3330 			return EINVAL;
3331 		}
3332 		entry->advice = new_advice;
3333 		entry = entry->next;
3334 	}
3335 
3336 	vm_map_unlock(map);
3337 	UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3338 	return 0;
3339 }
3340 
3341 /*
3342  * uvm_map_willneed: apply MADV_WILLNEED
3343  */
3344 
3345 int
3346 uvm_map_willneed(struct vm_map *map, vaddr_t start, vaddr_t end)
3347 {
3348 	struct vm_map_entry *entry;
3349 	UVMHIST_FUNC(__func__);
3350 	UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx)",
3351 	    (uintptr_t)map, start, end, 0);
3352 
3353 	vm_map_lock_read(map);
3354 	VM_MAP_RANGE_CHECK(map, start, end);
3355 	if (!uvm_map_lookup_entry(map, start, &entry)) {
3356 		entry = entry->next;
3357 	}
3358 	while (entry->start < end) {
3359 		struct vm_amap * const amap = entry->aref.ar_amap;
3360 		struct uvm_object * const uobj = entry->object.uvm_obj;
3361 
3362 		KASSERT(entry != &map->header);
3363 		KASSERT(start < entry->end);
3364 		/*
3365 		 * For now, we handle only the easy but commonly-requested case.
3366 		 * ie. start prefetching of backing uobj pages.
3367 		 *
3368 		 * XXX It might be useful to pmap_enter() the already-in-core
3369 		 * pages by inventing a "weak" mode for uvm_fault() which would
3370 		 * only do the PGO_LOCKED pgo_get().
3371 		 */
3372 		if (UVM_ET_ISOBJ(entry) && amap == NULL && uobj != NULL) {
3373 			off_t offset;
3374 			off_t size;
3375 
3376 			offset = entry->offset;
3377 			if (start < entry->start) {
3378 				offset += entry->start - start;
3379 			}
3380 			size = entry->offset + (entry->end - entry->start);
3381 			if (entry->end < end) {
3382 				size -= end - entry->end;
3383 			}
3384 			uvm_readahead(uobj, offset, size);
3385 		}
3386 		entry = entry->next;
3387 	}
3388 	vm_map_unlock_read(map);
3389 	UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3390 	return 0;
3391 }
3392 
3393 /*
3394  * uvm_map_pageable: sets the pageability of a range in a map.
3395  *
3396  * => wires map entries.  should not be used for transient page locking.
3397  *	for that, use uvm_fault_wire()/uvm_fault_unwire() (see uvm_vslock()).
3398  * => regions specified as not pageable require lock-down (wired) memory
3399  *	and page tables.
3400  * => map must never be read-locked
3401  * => if islocked is true, map is already write-locked
3402  * => we always unlock the map, since we must downgrade to a read-lock
3403  *	to call uvm_fault_wire()
3404  * => XXXCDC: check this and try and clean it up.
3405  */
3406 
3407 int
3408 uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end,
3409     bool new_pageable, int lockflags)
3410 {
3411 	struct vm_map_entry *entry, *start_entry, *failed_entry;
3412 	int rv;
3413 #ifdef DIAGNOSTIC
3414 	u_int timestamp_save;
3415 #endif
3416 	UVMHIST_FUNC(__func__);
3417 	UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx,new_pageable=%ju)",
3418 	    (uintptr_t)map, start, end, new_pageable);
3419 	KASSERT(map->flags & VM_MAP_PAGEABLE);
3420 
3421 	if ((lockflags & UVM_LK_ENTER) == 0)
3422 		vm_map_lock(map);
3423 	VM_MAP_RANGE_CHECK(map, start, end);
3424 
3425 	/*
3426 	 * only one pageability change may take place at one time, since
3427 	 * uvm_fault_wire assumes it will be called only once for each
3428 	 * wiring/unwiring.  therefore, we have to make sure we're actually
3429 	 * changing the pageability for the entire region.  we do so before
3430 	 * making any changes.
3431 	 */
3432 
3433 	if (uvm_map_lookup_entry(map, start, &start_entry) == false) {
3434 		if ((lockflags & UVM_LK_EXIT) == 0)
3435 			vm_map_unlock(map);
3436 
3437 		UVMHIST_LOG(maphist,"<- done (fault)",0,0,0,0);
3438 		return EFAULT;
3439 	}
3440 	entry = start_entry;
3441 
3442 	if (start == end) {		/* nothing required */
3443 		if ((lockflags & UVM_LK_EXIT) == 0)
3444 			vm_map_unlock(map);
3445 
3446 		UVMHIST_LOG(maphist,"<- done (nothing)",0,0,0,0);
3447 		return 0;
3448 	}
3449 
3450 	/*
3451 	 * handle wiring and unwiring separately.
3452 	 */
3453 
3454 	if (new_pageable) {		/* unwire */
3455 		UVM_MAP_CLIP_START(map, entry, start);
3456 
3457 		/*
3458 		 * unwiring.  first ensure that the range to be unwired is
3459 		 * really wired down and that there are no holes.
3460 		 */
3461 
3462 		while ((entry != &map->header) && (entry->start < end)) {
3463 			if (entry->wired_count == 0 ||
3464 			    (entry->end < end &&
3465 			     (entry->next == &map->header ||
3466 			      entry->next->start > entry->end))) {
3467 				if ((lockflags & UVM_LK_EXIT) == 0)
3468 					vm_map_unlock(map);
3469 				UVMHIST_LOG(maphist, "<- done (INVAL)",0,0,0,0);
3470 				return EINVAL;
3471 			}
3472 			entry = entry->next;
3473 		}
3474 
3475 		/*
3476 		 * POSIX 1003.1b - a single munlock call unlocks a region,
3477 		 * regardless of the number of mlock calls made on that
3478 		 * region.
3479 		 */
3480 
3481 		entry = start_entry;
3482 		while ((entry != &map->header) && (entry->start < end)) {
3483 			UVM_MAP_CLIP_END(map, entry, end);
3484 			if (VM_MAPENT_ISWIRED(entry))
3485 				uvm_map_entry_unwire(map, entry);
3486 			entry = entry->next;
3487 		}
3488 		if ((lockflags & UVM_LK_EXIT) == 0)
3489 			vm_map_unlock(map);
3490 		UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
3491 		return 0;
3492 	}
3493 
3494 	/*
3495 	 * wire case: in two passes [XXXCDC: ugly block of code here]
3496 	 *
3497 	 * 1: holding the write lock, we create any anonymous maps that need
3498 	 *    to be created.  then we clip each map entry to the region to
3499 	 *    be wired and increment its wiring count.
3500 	 *
3501 	 * 2: we downgrade to a read lock, and call uvm_fault_wire to fault
3502 	 *    in the pages for any newly wired area (wired_count == 1).
3503 	 *
3504 	 *    downgrading to a read lock for uvm_fault_wire avoids a possible
3505 	 *    deadlock with another thread that may have faulted on one of
3506 	 *    the pages to be wired (it would mark the page busy, blocking
3507 	 *    us, then in turn block on the map lock that we hold).  because
3508 	 *    of problems in the recursive lock package, we cannot upgrade
3509 	 *    to a write lock in vm_map_lookup.  thus, any actions that
3510 	 *    require the write lock must be done beforehand.  because we
3511 	 *    keep the read lock on the map, the copy-on-write status of the
3512 	 *    entries we modify here cannot change.
3513 	 */
3514 
3515 	while ((entry != &map->header) && (entry->start < end)) {
3516 		if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3517 
3518 			/*
3519 			 * perform actions of vm_map_lookup that need the
3520 			 * write lock on the map: create an anonymous map
3521 			 * for a copy-on-write region, or an anonymous map
3522 			 * for a zero-fill region.  (XXXCDC: submap case
3523 			 * ok?)
3524 			 */
3525 
3526 			if (!UVM_ET_ISSUBMAP(entry)) {  /* not submap */
3527 				if (UVM_ET_ISNEEDSCOPY(entry) &&
3528 				    ((entry->max_protection & VM_PROT_WRITE) ||
3529 				     (entry->object.uvm_obj == NULL))) {
3530 					amap_copy(map, entry, 0, start, end);
3531 					/* XXXCDC: wait OK? */
3532 				}
3533 			}
3534 		}
3535 		UVM_MAP_CLIP_START(map, entry, start);
3536 		UVM_MAP_CLIP_END(map, entry, end);
3537 		entry->wired_count++;
3538 
3539 		/*
3540 		 * Check for holes
3541 		 */
3542 
3543 		if (entry->protection == VM_PROT_NONE ||
3544 		    (entry->end < end &&
3545 		     (entry->next == &map->header ||
3546 		      entry->next->start > entry->end))) {
3547 
3548 			/*
3549 			 * found one.  amap creation actions do not need to
3550 			 * be undone, but the wired counts need to be restored.
3551 			 */
3552 
3553 			while (entry != &map->header && entry->end > start) {
3554 				entry->wired_count--;
3555 				entry = entry->prev;
3556 			}
3557 			if ((lockflags & UVM_LK_EXIT) == 0)
3558 				vm_map_unlock(map);
3559 			UVMHIST_LOG(maphist,"<- done (INVALID WIRE)",0,0,0,0);
3560 			return EINVAL;
3561 		}
3562 		entry = entry->next;
3563 	}
3564 
3565 	/*
3566 	 * Pass 2.
3567 	 */
3568 
3569 #ifdef DIAGNOSTIC
3570 	timestamp_save = map->timestamp;
3571 #endif
3572 	vm_map_busy(map);
3573 	vm_map_unlock(map);
3574 
3575 	rv = 0;
3576 	entry = start_entry;
3577 	while (entry != &map->header && entry->start < end) {
3578 		if (entry->wired_count == 1) {
3579 			rv = uvm_fault_wire(map, entry->start, entry->end,
3580 			    entry->max_protection, 1);
3581 			if (rv) {
3582 
3583 				/*
3584 				 * wiring failed.  break out of the loop.
3585 				 * we'll clean up the map below, once we
3586 				 * have a write lock again.
3587 				 */
3588 
3589 				break;
3590 			}
3591 		}
3592 		entry = entry->next;
3593 	}
3594 
3595 	if (rv) {	/* failed? */
3596 
3597 		/*
3598 		 * Get back to an exclusive (write) lock.
3599 		 */
3600 
3601 		vm_map_lock(map);
3602 		vm_map_unbusy(map);
3603 
3604 #ifdef DIAGNOSTIC
3605 		if (timestamp_save + 1 != map->timestamp)
3606 			panic("uvm_map_pageable: stale map");
3607 #endif
3608 
3609 		/*
3610 		 * first drop the wiring count on all the entries
3611 		 * which haven't actually been wired yet.
3612 		 */
3613 
3614 		failed_entry = entry;
3615 		while (entry != &map->header && entry->start < end) {
3616 			entry->wired_count--;
3617 			entry = entry->next;
3618 		}
3619 
3620 		/*
3621 		 * now, unwire all the entries that were successfully
3622 		 * wired above.
3623 		 */
3624 
3625 		entry = start_entry;
3626 		while (entry != failed_entry) {
3627 			entry->wired_count--;
3628 			if (VM_MAPENT_ISWIRED(entry) == 0)
3629 				uvm_map_entry_unwire(map, entry);
3630 			entry = entry->next;
3631 		}
3632 		if ((lockflags & UVM_LK_EXIT) == 0)
3633 			vm_map_unlock(map);
3634 		UVMHIST_LOG(maphist, "<- done (RV=%jd)", rv,0,0,0);
3635 		return (rv);
3636 	}
3637 
3638 	if ((lockflags & UVM_LK_EXIT) == 0) {
3639 		vm_map_unbusy(map);
3640 	} else {
3641 
3642 		/*
3643 		 * Get back to an exclusive (write) lock.
3644 		 */
3645 
3646 		vm_map_lock(map);
3647 		vm_map_unbusy(map);
3648 	}
3649 
3650 	UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
3651 	return 0;
3652 }
3653 
3654 /*
3655  * uvm_map_pageable_all: special case of uvm_map_pageable - affects
3656  * all mapped regions.
3657  *
3658  * => map must not be locked.
3659  * => if no flags are specified, all regions are unwired.
3660  * => XXXJRT: has some of the same problems as uvm_map_pageable() above.
3661  */
3662 
3663 int
3664 uvm_map_pageable_all(struct vm_map *map, int flags, vsize_t limit)
3665 {
3666 	struct vm_map_entry *entry, *failed_entry;
3667 	vsize_t size;
3668 	int rv;
3669 #ifdef DIAGNOSTIC
3670 	u_int timestamp_save;
3671 #endif
3672 	UVMHIST_FUNC(__func__);
3673 	UVMHIST_CALLARGS(maphist,"(map=%#jx,flags=%#jx)", (uintptr_t)map, flags,
3674 	    0, 0);
3675 
3676 	KASSERT(map->flags & VM_MAP_PAGEABLE);
3677 
3678 	vm_map_lock(map);
3679 
3680 	/*
3681 	 * handle wiring and unwiring separately.
3682 	 */
3683 
3684 	if (flags == 0) {			/* unwire */
3685 
3686 		/*
3687 		 * POSIX 1003.1b -- munlockall unlocks all regions,
3688 		 * regardless of how many times mlockall has been called.
3689 		 */
3690 
3691 		for (entry = map->header.next; entry != &map->header;
3692 		     entry = entry->next) {
3693 			if (VM_MAPENT_ISWIRED(entry))
3694 				uvm_map_entry_unwire(map, entry);
3695 		}
3696 		map->flags &= ~VM_MAP_WIREFUTURE;
3697 		vm_map_unlock(map);
3698 		UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
3699 		return 0;
3700 	}
3701 
3702 	if (flags & MCL_FUTURE) {
3703 
3704 		/*
3705 		 * must wire all future mappings; remember this.
3706 		 */
3707 
3708 		map->flags |= VM_MAP_WIREFUTURE;
3709 	}
3710 
3711 	if ((flags & MCL_CURRENT) == 0) {
3712 
3713 		/*
3714 		 * no more work to do!
3715 		 */
3716 
3717 		UVMHIST_LOG(maphist,"<- done (OK no wire)",0,0,0,0);
3718 		vm_map_unlock(map);
3719 		return 0;
3720 	}
3721 
3722 	/*
3723 	 * wire case: in three passes [XXXCDC: ugly block of code here]
3724 	 *
3725 	 * 1: holding the write lock, count all pages mapped by non-wired
3726 	 *    entries.  if this would cause us to go over our limit, we fail.
3727 	 *
3728 	 * 2: still holding the write lock, we create any anonymous maps that
3729 	 *    need to be created.  then we increment its wiring count.
3730 	 *
3731 	 * 3: we downgrade to a read lock, and call uvm_fault_wire to fault
3732 	 *    in the pages for any newly wired area (wired_count == 1).
3733 	 *
3734 	 *    downgrading to a read lock for uvm_fault_wire avoids a possible
3735 	 *    deadlock with another thread that may have faulted on one of
3736 	 *    the pages to be wired (it would mark the page busy, blocking
3737 	 *    us, then in turn block on the map lock that we hold).  because
3738 	 *    of problems in the recursive lock package, we cannot upgrade
3739 	 *    to a write lock in vm_map_lookup.  thus, any actions that
3740 	 *    require the write lock must be done beforehand.  because we
3741 	 *    keep the read lock on the map, the copy-on-write status of the
3742 	 *    entries we modify here cannot change.
3743 	 */
3744 
3745 	for (size = 0, entry = map->header.next; entry != &map->header;
3746 	     entry = entry->next) {
3747 		if (entry->protection != VM_PROT_NONE &&
3748 		    VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3749 			size += entry->end - entry->start;
3750 		}
3751 	}
3752 
3753 	if (atop(size) + uvmexp.wired > uvmexp.wiredmax) {
3754 		vm_map_unlock(map);
3755 		return ENOMEM;
3756 	}
3757 
3758 	if (limit != 0 &&
3759 	    (size + ptoa(pmap_wired_count(vm_map_pmap(map))) > limit)) {
3760 		vm_map_unlock(map);
3761 		return ENOMEM;
3762 	}
3763 
3764 	/*
3765 	 * Pass 2.
3766 	 */
3767 
3768 	for (entry = map->header.next; entry != &map->header;
3769 	     entry = entry->next) {
3770 		if (entry->protection == VM_PROT_NONE)
3771 			continue;
3772 		if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3773 
3774 			/*
3775 			 * perform actions of vm_map_lookup that need the
3776 			 * write lock on the map: create an anonymous map
3777 			 * for a copy-on-write region, or an anonymous map
3778 			 * for a zero-fill region.  (XXXCDC: submap case
3779 			 * ok?)
3780 			 */
3781 
3782 			if (!UVM_ET_ISSUBMAP(entry)) {	/* not submap */
3783 				if (UVM_ET_ISNEEDSCOPY(entry) &&
3784 				    ((entry->max_protection & VM_PROT_WRITE) ||
3785 				     (entry->object.uvm_obj == NULL))) {
3786 					amap_copy(map, entry, 0, entry->start,
3787 					    entry->end);
3788 					/* XXXCDC: wait OK? */
3789 				}
3790 			}
3791 		}
3792 		entry->wired_count++;
3793 	}
3794 
3795 	/*
3796 	 * Pass 3.
3797 	 */
3798 
3799 #ifdef DIAGNOSTIC
3800 	timestamp_save = map->timestamp;
3801 #endif
3802 	vm_map_busy(map);
3803 	vm_map_unlock(map);
3804 
3805 	rv = 0;
3806 	for (entry = map->header.next; entry != &map->header;
3807 	     entry = entry->next) {
3808 		if (entry->wired_count == 1) {
3809 			rv = uvm_fault_wire(map, entry->start, entry->end,
3810 			    entry->max_protection, 1);
3811 			if (rv) {
3812 
3813 				/*
3814 				 * wiring failed.  break out of the loop.
3815 				 * we'll clean up the map below, once we
3816 				 * have a write lock again.
3817 				 */
3818 
3819 				break;
3820 			}
3821 		}
3822 	}
3823 
3824 	if (rv) {
3825 
3826 		/*
3827 		 * Get back an exclusive (write) lock.
3828 		 */
3829 
3830 		vm_map_lock(map);
3831 		vm_map_unbusy(map);
3832 
3833 #ifdef DIAGNOSTIC
3834 		if (timestamp_save + 1 != map->timestamp)
3835 			panic("uvm_map_pageable_all: stale map");
3836 #endif
3837 
3838 		/*
3839 		 * first drop the wiring count on all the entries
3840 		 * which haven't actually been wired yet.
3841 		 *
3842 		 * Skip VM_PROT_NONE entries like we did above.
3843 		 */
3844 
3845 		failed_entry = entry;
3846 		for (/* nothing */; entry != &map->header;
3847 		     entry = entry->next) {
3848 			if (entry->protection == VM_PROT_NONE)
3849 				continue;
3850 			entry->wired_count--;
3851 		}
3852 
3853 		/*
3854 		 * now, unwire all the entries that were successfully
3855 		 * wired above.
3856 		 *
3857 		 * Skip VM_PROT_NONE entries like we did above.
3858 		 */
3859 
3860 		for (entry = map->header.next; entry != failed_entry;
3861 		     entry = entry->next) {
3862 			if (entry->protection == VM_PROT_NONE)
3863 				continue;
3864 			entry->wired_count--;
3865 			if (VM_MAPENT_ISWIRED(entry))
3866 				uvm_map_entry_unwire(map, entry);
3867 		}
3868 		vm_map_unlock(map);
3869 		UVMHIST_LOG(maphist,"<- done (RV=%jd)", rv,0,0,0);
3870 		return (rv);
3871 	}
3872 
3873 	vm_map_unbusy(map);
3874 
3875 	UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
3876 	return 0;
3877 }
3878 
3879 /*
3880  * uvm_map_clean: clean out a map range
3881  *
3882  * => valid flags:
3883  *   if (flags & PGO_CLEANIT): dirty pages are cleaned first
3884  *   if (flags & PGO_SYNCIO): dirty pages are written synchronously
3885  *   if (flags & PGO_DEACTIVATE): any cached pages are deactivated after clean
3886  *   if (flags & PGO_FREE): any cached pages are freed after clean
3887  * => returns an error if any part of the specified range isn't mapped
3888  * => never a need to flush amap layer since the anonymous memory has
3889  *	no permanent home, but may deactivate pages there
3890  * => called from sys_msync() and sys_madvise()
3891  * => caller must not have map locked
3892  */
3893 
3894 int
3895 uvm_map_clean(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
3896 {
3897 	struct vm_map_entry *current, *entry;
3898 	struct uvm_object *uobj;
3899 	struct vm_amap *amap;
3900 	struct vm_anon *anon;
3901 	struct vm_page *pg;
3902 	vaddr_t offset;
3903 	vsize_t size;
3904 	voff_t uoff;
3905 	int error, refs;
3906 	UVMHIST_FUNC(__func__);
3907 	UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx,flags=%#jx)",
3908 	    (uintptr_t)map, start, end, flags);
3909 
3910 	KASSERT((flags & (PGO_FREE|PGO_DEACTIVATE)) !=
3911 		(PGO_FREE|PGO_DEACTIVATE));
3912 
3913 	vm_map_lock(map);
3914 	VM_MAP_RANGE_CHECK(map, start, end);
3915 	if (!uvm_map_lookup_entry(map, start, &entry)) {
3916 		vm_map_unlock(map);
3917 		return EFAULT;
3918 	}
3919 
3920 	/*
3921 	 * Make a first pass to check for holes and wiring problems.
3922 	 */
3923 
3924 	for (current = entry; current->start < end; current = current->next) {
3925 		if (UVM_ET_ISSUBMAP(current)) {
3926 			vm_map_unlock(map);
3927 			return EINVAL;
3928 		}
3929 		if ((flags & PGO_FREE) != 0 && VM_MAPENT_ISWIRED(entry)) {
3930 			vm_map_unlock(map);
3931 			return EBUSY;
3932 		}
3933 		if (end <= current->end) {
3934 			break;
3935 		}
3936 		if (current->end != current->next->start) {
3937 			vm_map_unlock(map);
3938 			return EFAULT;
3939 		}
3940 	}
3941 
3942 	vm_map_busy(map);
3943 	vm_map_unlock(map);
3944 	error = 0;
3945 	for (current = entry; start < end; current = current->next) {
3946 		amap = current->aref.ar_amap;	/* upper layer */
3947 		uobj = current->object.uvm_obj;	/* lower layer */
3948 		KASSERT(start >= current->start);
3949 
3950 		/*
3951 		 * No amap cleaning necessary if:
3952 		 *
3953 		 *	(1) There's no amap.
3954 		 *
3955 		 *	(2) We're not deactivating or freeing pages.
3956 		 */
3957 
3958 		if (amap == NULL || (flags & (PGO_DEACTIVATE|PGO_FREE)) == 0)
3959 			goto flush_object;
3960 
3961 		offset = start - current->start;
3962 		size = MIN(end, current->end) - start;
3963 
3964 		amap_lock(amap, RW_WRITER);
3965 		for ( ; size != 0; size -= PAGE_SIZE, offset += PAGE_SIZE) {
3966 			anon = amap_lookup(&current->aref, offset);
3967 			if (anon == NULL)
3968 				continue;
3969 
3970 			KASSERT(anon->an_lock == amap->am_lock);
3971 			pg = anon->an_page;
3972 			if (pg == NULL) {
3973 				continue;
3974 			}
3975 			if (pg->flags & PG_BUSY) {
3976 				continue;
3977 			}
3978 
3979 			switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
3980 
3981 			/*
3982 			 * In these first 3 cases, we just deactivate the page.
3983 			 */
3984 
3985 			case PGO_CLEANIT|PGO_FREE:
3986 			case PGO_CLEANIT|PGO_DEACTIVATE:
3987 			case PGO_DEACTIVATE:
3988  deactivate_it:
3989 				/*
3990 				 * skip the page if it's loaned or wired,
3991 				 * since it shouldn't be on a paging queue
3992 				 * at all in these cases.
3993 				 */
3994 
3995 				if (pg->loan_count != 0 ||
3996 				    pg->wire_count != 0) {
3997 					continue;
3998 				}
3999 				KASSERT(pg->uanon == anon);
4000 				uvm_pagelock(pg);
4001 				uvm_pagedeactivate(pg);
4002 				uvm_pageunlock(pg);
4003 				continue;
4004 
4005 			case PGO_FREE:
4006 
4007 				/*
4008 				 * If there are multiple references to
4009 				 * the amap, just deactivate the page.
4010 				 */
4011 
4012 				if (amap_refs(amap) > 1)
4013 					goto deactivate_it;
4014 
4015 				/* skip the page if it's wired */
4016 				if (pg->wire_count != 0) {
4017 					continue;
4018 				}
4019 				amap_unadd(&current->aref, offset);
4020 				refs = --anon->an_ref;
4021 				if (refs == 0) {
4022 					uvm_anfree(anon);
4023 				}
4024 				continue;
4025 			}
4026 		}
4027 		amap_unlock(amap);
4028 
4029  flush_object:
4030 		/*
4031 		 * flush pages if we've got a valid backing object.
4032 		 * note that we must always clean object pages before
4033 		 * freeing them since otherwise we could reveal stale
4034 		 * data from files.
4035 		 */
4036 
4037 		uoff = current->offset + (start - current->start);
4038 		size = MIN(end, current->end) - start;
4039 		if (uobj != NULL) {
4040 			rw_enter(uobj->vmobjlock, RW_WRITER);
4041 			if (uobj->pgops->pgo_put != NULL)
4042 				error = (uobj->pgops->pgo_put)(uobj, uoff,
4043 				    uoff + size, flags | PGO_CLEANIT);
4044 			else
4045 				error = 0;
4046 		}
4047 		start += size;
4048 	}
4049 	vm_map_unbusy(map);
4050 	return error;
4051 }
4052 
4053 
4054 /*
4055  * uvm_map_checkprot: check protection in map
4056  *
4057  * => must allow specified protection in a fully allocated region.
4058  * => map must be read or write locked by caller.
4059  */
4060 
4061 bool
4062 uvm_map_checkprot(struct vm_map *map, vaddr_t start, vaddr_t end,
4063     vm_prot_t protection)
4064 {
4065 	struct vm_map_entry *entry;
4066 	struct vm_map_entry *tmp_entry;
4067 
4068 	if (!uvm_map_lookup_entry(map, start, &tmp_entry)) {
4069 		return (false);
4070 	}
4071 	entry = tmp_entry;
4072 	while (start < end) {
4073 		if (entry == &map->header) {
4074 			return (false);
4075 		}
4076 
4077 		/*
4078 		 * no holes allowed
4079 		 */
4080 
4081 		if (start < entry->start) {
4082 			return (false);
4083 		}
4084 
4085 		/*
4086 		 * check protection associated with entry
4087 		 */
4088 
4089 		if ((entry->protection & protection) != protection) {
4090 			return (false);
4091 		}
4092 		start = entry->end;
4093 		entry = entry->next;
4094 	}
4095 	return (true);
4096 }
4097 
4098 /*
4099  * uvmspace_alloc: allocate a vmspace structure.
4100  *
4101  * - structure includes vm_map and pmap
4102  * - XXX: no locking on this structure
4103  * - refcnt set to 1, rest must be init'd by caller
4104  */
4105 struct vmspace *
4106 uvmspace_alloc(vaddr_t vmin, vaddr_t vmax, bool topdown)
4107 {
4108 	struct vmspace *vm;
4109 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
4110 
4111 	vm = kmem_alloc(sizeof(*vm), KM_SLEEP);
4112 	uvmspace_init(vm, NULL, vmin, vmax, topdown);
4113 	UVMHIST_LOG(maphist,"<- done (vm=%#jx)", (uintptr_t)vm, 0, 0, 0);
4114 	return (vm);
4115 }
4116 
4117 /*
4118  * uvmspace_init: initialize a vmspace structure.
4119  *
4120  * - XXX: no locking on this structure
4121  * - refcnt set to 1, rest must be init'd by caller
4122  */
4123 void
4124 uvmspace_init(struct vmspace *vm, struct pmap *pmap, vaddr_t vmin,
4125     vaddr_t vmax, bool topdown)
4126 {
4127 	UVMHIST_FUNC(__func__);
4128 	UVMHIST_CALLARGS(maphist, "(vm=%#jx, pmap=%#jx, vmin=%#jx, vmax=%#jx",
4129 	    (uintptr_t)vm, (uintptr_t)pmap, vmin, vmax);
4130 	UVMHIST_LOG(maphist, "   topdown=%ju)", topdown, 0, 0, 0);
4131 
4132 	memset(vm, 0, sizeof(*vm));
4133 	uvm_map_setup(&vm->vm_map, vmin, vmax, VM_MAP_PAGEABLE
4134 	    | (topdown ? VM_MAP_TOPDOWN : 0)
4135 	    );
4136 	if (pmap)
4137 		pmap_reference(pmap);
4138 	else
4139 		pmap = pmap_create();
4140 	vm->vm_map.pmap = pmap;
4141 	vm->vm_refcnt = 1;
4142 	UVMHIST_LOG(maphist,"<- done",0,0,0,0);
4143 }
4144 
4145 /*
4146  * uvmspace_share: share a vmspace between two processes
4147  *
4148  * - used for vfork, threads(?)
4149  */
4150 
4151 void
4152 uvmspace_share(struct proc *p1, struct proc *p2)
4153 {
4154 
4155 	uvmspace_addref(p1->p_vmspace);
4156 	p2->p_vmspace = p1->p_vmspace;
4157 }
4158 
4159 #if 0
4160 
4161 /*
4162  * uvmspace_unshare: ensure that process "p" has its own, unshared, vmspace
4163  *
4164  * - XXX: no locking on vmspace
4165  */
4166 
4167 void
4168 uvmspace_unshare(struct lwp *l)
4169 {
4170 	struct proc *p = l->l_proc;
4171 	struct vmspace *nvm, *ovm = p->p_vmspace;
4172 
4173 	if (ovm->vm_refcnt == 1)
4174 		/* nothing to do: vmspace isn't shared in the first place */
4175 		return;
4176 
4177 	/* make a new vmspace, still holding old one */
4178 	nvm = uvmspace_fork(ovm);
4179 
4180 	kpreempt_disable();
4181 	pmap_deactivate(l);		/* unbind old vmspace */
4182 	p->p_vmspace = nvm;
4183 	pmap_activate(l);		/* switch to new vmspace */
4184 	kpreempt_enable();
4185 
4186 	uvmspace_free(ovm);		/* drop reference to old vmspace */
4187 }
4188 
4189 #endif
4190 
4191 
4192 /*
4193  * uvmspace_spawn: a new process has been spawned and needs a vmspace
4194  */
4195 
4196 void
4197 uvmspace_spawn(struct lwp *l, vaddr_t start, vaddr_t end, bool topdown)
4198 {
4199 	struct proc *p = l->l_proc;
4200 	struct vmspace *nvm;
4201 
4202 #ifdef __HAVE_CPU_VMSPACE_EXEC
4203 	cpu_vmspace_exec(l, start, end);
4204 #endif
4205 
4206 	nvm = uvmspace_alloc(start, end, topdown);
4207 	kpreempt_disable();
4208 	p->p_vmspace = nvm;
4209 	pmap_activate(l);
4210 	kpreempt_enable();
4211 }
4212 
4213 /*
4214  * uvmspace_exec: the process wants to exec a new program
4215  */
4216 
4217 void
4218 uvmspace_exec(struct lwp *l, vaddr_t start, vaddr_t end, bool topdown)
4219 {
4220 	struct proc *p = l->l_proc;
4221 	struct vmspace *nvm, *ovm = p->p_vmspace;
4222 	struct vm_map *map;
4223 	int flags;
4224 
4225 	KASSERT(ovm != NULL);
4226 #ifdef __HAVE_CPU_VMSPACE_EXEC
4227 	cpu_vmspace_exec(l, start, end);
4228 #endif
4229 
4230 	map = &ovm->vm_map;
4231 	/*
4232 	 * see if more than one process is using this vmspace...
4233 	 */
4234 
4235 	if (ovm->vm_refcnt == 1
4236 	    && topdown == ((ovm->vm_map.flags & VM_MAP_TOPDOWN) != 0)) {
4237 
4238 		/*
4239 		 * if p is the only process using its vmspace then we can safely
4240 		 * recycle that vmspace for the program that is being exec'd.
4241 		 * But only if TOPDOWN matches the requested value for the new
4242 		 * vm space!
4243 		 */
4244 
4245 		/*
4246 		 * SYSV SHM semantics require us to kill all segments on an exec
4247 		 */
4248 		if (uvm_shmexit && ovm->vm_shm)
4249 			(*uvm_shmexit)(ovm);
4250 
4251 		/*
4252 		 * POSIX 1003.1b -- "lock future mappings" is revoked
4253 		 * when a process execs another program image.
4254 		 */
4255 
4256 		map->flags &= ~VM_MAP_WIREFUTURE;
4257 
4258 		/*
4259 		 * now unmap the old program.
4260 		 *
4261 		 * XXX set VM_MAP_DYING for the duration, so pmap_update()
4262 		 * is not called until the pmap has been totally cleared out
4263 		 * after pmap_remove_all(), or it can confuse some pmap
4264 		 * implementations.  it would be nice to handle this by
4265 		 * deferring the pmap_update() while it is known the address
4266 		 * space is not visible to any user LWP other than curlwp,
4267 		 * but there isn't an elegant way of inferring that right
4268 		 * now.
4269 		 */
4270 
4271 		flags = pmap_remove_all(map->pmap) ? UVM_FLAG_VAONLY : 0;
4272 		map->flags |= VM_MAP_DYING;
4273 		uvm_unmap1(map, vm_map_min(map), vm_map_max(map), flags);
4274 		map->flags &= ~VM_MAP_DYING;
4275 		pmap_update(map->pmap);
4276 		KASSERT(map->header.prev == &map->header);
4277 		KASSERT(map->nentries == 0);
4278 
4279 		/*
4280 		 * resize the map
4281 		 */
4282 
4283 		vm_map_setmin(map, start);
4284 		vm_map_setmax(map, end);
4285 	} else {
4286 
4287 		/*
4288 		 * p's vmspace is being shared, so we can't reuse it for p since
4289 		 * it is still being used for others.   allocate a new vmspace
4290 		 * for p
4291 		 */
4292 
4293 		nvm = uvmspace_alloc(start, end, topdown);
4294 
4295 		/*
4296 		 * install new vmspace and drop our ref to the old one.
4297 		 */
4298 
4299 		kpreempt_disable();
4300 		pmap_deactivate(l);
4301 		p->p_vmspace = nvm;
4302 		pmap_activate(l);
4303 		kpreempt_enable();
4304 
4305 		uvmspace_free(ovm);
4306 	}
4307 }
4308 
4309 /*
4310  * uvmspace_addref: add a reference to a vmspace.
4311  */
4312 
4313 void
4314 uvmspace_addref(struct vmspace *vm)
4315 {
4316 
4317 	KASSERT((vm->vm_map.flags & VM_MAP_DYING) == 0);
4318 	KASSERT(vm->vm_refcnt > 0);
4319 	atomic_inc_uint(&vm->vm_refcnt);
4320 }
4321 
4322 /*
4323  * uvmspace_free: free a vmspace data structure
4324  */
4325 
4326 void
4327 uvmspace_free(struct vmspace *vm)
4328 {
4329 	struct vm_map_entry *dead_entries;
4330 	struct vm_map *map = &vm->vm_map;
4331 	int flags;
4332 
4333 	UVMHIST_FUNC(__func__);
4334 	UVMHIST_CALLARGS(maphist,"(vm=%#jx) ref=%jd", (uintptr_t)vm,
4335 	    vm->vm_refcnt, 0, 0);
4336 
4337 	membar_release();
4338 	if (atomic_dec_uint_nv(&vm->vm_refcnt) > 0)
4339 		return;
4340 	membar_acquire();
4341 
4342 	/*
4343 	 * at this point, there should be no other references to the map.
4344 	 * delete all of the mappings, then destroy the pmap.
4345 	 */
4346 
4347 	map->flags |= VM_MAP_DYING;
4348 	flags = pmap_remove_all(map->pmap) ? UVM_FLAG_VAONLY : 0;
4349 
4350 	/* Get rid of any SYSV shared memory segments. */
4351 	if (uvm_shmexit && vm->vm_shm != NULL)
4352 		(*uvm_shmexit)(vm);
4353 
4354 	if (map->nentries) {
4355 		uvm_unmap_remove(map, vm_map_min(map), vm_map_max(map),
4356 		    &dead_entries, flags);
4357 		if (dead_entries != NULL)
4358 			uvm_unmap_detach(dead_entries, 0);
4359 	}
4360 	KASSERT(map->nentries == 0);
4361 	KASSERT(map->size == 0);
4362 
4363 	mutex_destroy(&map->misc_lock);
4364 	rw_destroy(&map->lock);
4365 	cv_destroy(&map->cv);
4366 	pmap_destroy(map->pmap);
4367 	kmem_free(vm, sizeof(*vm));
4368 }
4369 
4370 static struct vm_map_entry *
4371 uvm_mapent_clone(struct vm_map *new_map, struct vm_map_entry *old_entry,
4372     int flags)
4373 {
4374 	struct vm_map_entry *new_entry;
4375 
4376 	new_entry = uvm_mapent_alloc(new_map, 0);
4377 	/* old_entry -> new_entry */
4378 	uvm_mapent_copy(old_entry, new_entry);
4379 
4380 	/* new pmap has nothing wired in it */
4381 	new_entry->wired_count = 0;
4382 
4383 	/*
4384 	 * gain reference to object backing the map (can't
4385 	 * be a submap, already checked this case).
4386 	 */
4387 
4388 	if (new_entry->aref.ar_amap)
4389 		uvm_map_reference_amap(new_entry, flags);
4390 
4391 	if (new_entry->object.uvm_obj &&
4392 	    new_entry->object.uvm_obj->pgops->pgo_reference)
4393 		new_entry->object.uvm_obj->pgops->pgo_reference(
4394 			new_entry->object.uvm_obj);
4395 
4396 	/* insert entry at end of new_map's entry list */
4397 	uvm_map_entry_link(new_map, new_map->header.prev,
4398 	    new_entry);
4399 
4400 	return new_entry;
4401 }
4402 
4403 /*
4404  * share the mapping: this means we want the old and
4405  * new entries to share amaps and backing objects.
4406  */
4407 static void
4408 uvm_mapent_forkshared(struct vm_map *new_map, struct vm_map *old_map,
4409     struct vm_map_entry *old_entry)
4410 {
4411 	/*
4412 	 * if the old_entry needs a new amap (due to prev fork)
4413 	 * then we need to allocate it now so that we have
4414 	 * something we own to share with the new_entry.   [in
4415 	 * other words, we need to clear needs_copy]
4416 	 */
4417 
4418 	if (UVM_ET_ISNEEDSCOPY(old_entry)) {
4419 		/* get our own amap, clears needs_copy */
4420 		amap_copy(old_map, old_entry, AMAP_COPY_NOCHUNK,
4421 		    0, 0);
4422 		/* XXXCDC: WAITOK??? */
4423 	}
4424 
4425 	uvm_mapent_clone(new_map, old_entry, AMAP_SHARED);
4426 }
4427 
4428 
4429 static void
4430 uvm_mapent_forkcopy(struct vm_map *new_map, struct vm_map *old_map,
4431     struct vm_map_entry *old_entry)
4432 {
4433 	struct vm_map_entry *new_entry;
4434 
4435 	/*
4436 	 * copy-on-write the mapping (using mmap's
4437 	 * MAP_PRIVATE semantics)
4438 	 *
4439 	 * allocate new_entry, adjust reference counts.
4440 	 * (note that new references are read-only).
4441 	 */
4442 
4443 	new_entry = uvm_mapent_clone(new_map, old_entry, 0);
4444 
4445 	new_entry->etype |=
4446 	    (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
4447 
4448 	/*
4449 	 * the new entry will need an amap.  it will either
4450 	 * need to be copied from the old entry or created
4451 	 * from scratch (if the old entry does not have an
4452 	 * amap).  can we defer this process until later
4453 	 * (by setting "needs_copy") or do we need to copy
4454 	 * the amap now?
4455 	 *
4456 	 * we must copy the amap now if any of the following
4457 	 * conditions hold:
4458 	 * 1. the old entry has an amap and that amap is
4459 	 *    being shared.  this means that the old (parent)
4460 	 *    process is sharing the amap with another
4461 	 *    process.  if we do not clear needs_copy here
4462 	 *    we will end up in a situation where both the
4463 	 *    parent and child process are referring to the
4464 	 *    same amap with "needs_copy" set.  if the
4465 	 *    parent write-faults, the fault routine will
4466 	 *    clear "needs_copy" in the parent by allocating
4467 	 *    a new amap.   this is wrong because the
4468 	 *    parent is supposed to be sharing the old amap
4469 	 *    and the new amap will break that.
4470 	 *
4471 	 * 2. if the old entry has an amap and a non-zero
4472 	 *    wire count then we are going to have to call
4473 	 *    amap_cow_now to avoid page faults in the
4474 	 *    parent process.   since amap_cow_now requires
4475 	 *    "needs_copy" to be clear we might as well
4476 	 *    clear it here as well.
4477 	 *
4478 	 */
4479 
4480 	if (old_entry->aref.ar_amap != NULL) {
4481 		if ((amap_flags(old_entry->aref.ar_amap) & AMAP_SHARED) != 0 ||
4482 		    VM_MAPENT_ISWIRED(old_entry)) {
4483 
4484 			amap_copy(new_map, new_entry,
4485 			    AMAP_COPY_NOCHUNK, 0, 0);
4486 			/* XXXCDC: M_WAITOK ... ok? */
4487 		}
4488 	}
4489 
4490 	/*
4491 	 * if the parent's entry is wired down, then the
4492 	 * parent process does not want page faults on
4493 	 * access to that memory.  this means that we
4494 	 * cannot do copy-on-write because we can't write
4495 	 * protect the old entry.   in this case we
4496 	 * resolve all copy-on-write faults now, using
4497 	 * amap_cow_now.   note that we have already
4498 	 * allocated any needed amap (above).
4499 	 */
4500 
4501 	if (VM_MAPENT_ISWIRED(old_entry)) {
4502 
4503 		/*
4504 		 * resolve all copy-on-write faults now
4505 		 * (note that there is nothing to do if
4506 		 * the old mapping does not have an amap).
4507 		 */
4508 		if (old_entry->aref.ar_amap)
4509 			amap_cow_now(new_map, new_entry);
4510 
4511 	} else {
4512 		/*
4513 		 * setup mappings to trigger copy-on-write faults
4514 		 * we must write-protect the parent if it has
4515 		 * an amap and it is not already "needs_copy"...
4516 		 * if it is already "needs_copy" then the parent
4517 		 * has already been write-protected by a previous
4518 		 * fork operation.
4519 		 */
4520 		if (old_entry->aref.ar_amap &&
4521 		    !UVM_ET_ISNEEDSCOPY(old_entry)) {
4522 			if (old_entry->max_protection & VM_PROT_WRITE) {
4523 #ifdef __HAVE_UNLOCKED_PMAP /* XXX temporary */
4524 				uvm_map_lock_entry(old_entry, RW_WRITER);
4525 #else
4526 				uvm_map_lock_entry(old_entry, RW_READER);
4527 #endif
4528 				pmap_protect(old_map->pmap,
4529 				    old_entry->start, old_entry->end,
4530 				    old_entry->protection & ~VM_PROT_WRITE);
4531 				uvm_map_unlock_entry(old_entry);
4532 			}
4533 			old_entry->etype |= UVM_ET_NEEDSCOPY;
4534 		}
4535 	}
4536 }
4537 
4538 /*
4539  * zero the mapping: the new entry will be zero initialized
4540  */
4541 static void
4542 uvm_mapent_forkzero(struct vm_map *new_map, struct vm_map *old_map,
4543     struct vm_map_entry *old_entry)
4544 {
4545 	struct vm_map_entry *new_entry;
4546 
4547 	new_entry = uvm_mapent_clone(new_map, old_entry, 0);
4548 
4549 	new_entry->etype |=
4550 	    (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
4551 
4552 	if (new_entry->aref.ar_amap) {
4553 		uvm_map_unreference_amap(new_entry, 0);
4554 		new_entry->aref.ar_pageoff = 0;
4555 		new_entry->aref.ar_amap = NULL;
4556 	}
4557 
4558 	if (UVM_ET_ISOBJ(new_entry)) {
4559 		if (new_entry->object.uvm_obj->pgops->pgo_detach)
4560 			new_entry->object.uvm_obj->pgops->pgo_detach(
4561 			    new_entry->object.uvm_obj);
4562 		new_entry->object.uvm_obj = NULL;
4563 		new_entry->offset = 0;
4564 		new_entry->etype &= ~UVM_ET_OBJ;
4565 	}
4566 }
4567 
4568 /*
4569  *   F O R K   -   m a i n   e n t r y   p o i n t
4570  */
4571 /*
4572  * uvmspace_fork: fork a process' main map
4573  *
4574  * => create a new vmspace for child process from parent.
4575  * => parent's map must not be locked.
4576  */
4577 
4578 struct vmspace *
4579 uvmspace_fork(struct vmspace *vm1)
4580 {
4581 	struct vmspace *vm2;
4582 	struct vm_map *old_map = &vm1->vm_map;
4583 	struct vm_map *new_map;
4584 	struct vm_map_entry *old_entry;
4585 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
4586 
4587 	vm_map_lock(old_map);
4588 
4589 	vm2 = uvmspace_alloc(vm_map_min(old_map), vm_map_max(old_map),
4590 	    vm1->vm_map.flags & VM_MAP_TOPDOWN);
4591 	memcpy(&vm2->vm_startcopy, &vm1->vm_startcopy,
4592 	    (char *) (vm1 + 1) - (char *) &vm1->vm_startcopy);
4593 	new_map = &vm2->vm_map;		  /* XXX */
4594 
4595 	old_entry = old_map->header.next;
4596 	new_map->size = old_map->size;
4597 
4598 	/*
4599 	 * go entry-by-entry
4600 	 */
4601 
4602 	while (old_entry != &old_map->header) {
4603 
4604 		/*
4605 		 * first, some sanity checks on the old entry
4606 		 */
4607 
4608 		KASSERT(!UVM_ET_ISSUBMAP(old_entry));
4609 		KASSERT(UVM_ET_ISCOPYONWRITE(old_entry) ||
4610 			!UVM_ET_ISNEEDSCOPY(old_entry));
4611 
4612 		switch (old_entry->inheritance) {
4613 		case MAP_INHERIT_NONE:
4614 			/*
4615 			 * drop the mapping, modify size
4616 			 */
4617 			new_map->size -= old_entry->end - old_entry->start;
4618 			break;
4619 
4620 		case MAP_INHERIT_SHARE:
4621 			uvm_mapent_forkshared(new_map, old_map, old_entry);
4622 			break;
4623 
4624 		case MAP_INHERIT_COPY:
4625 			uvm_mapent_forkcopy(new_map, old_map, old_entry);
4626 			break;
4627 
4628 		case MAP_INHERIT_ZERO:
4629 			uvm_mapent_forkzero(new_map, old_map, old_entry);
4630 			break;
4631 		default:
4632 			KASSERT(0);
4633 			break;
4634 		}
4635 		old_entry = old_entry->next;
4636 	}
4637 
4638 	pmap_update(old_map->pmap);
4639 	vm_map_unlock(old_map);
4640 
4641 	if (uvm_shmfork && vm1->vm_shm)
4642 		(*uvm_shmfork)(vm1, vm2);
4643 
4644 #ifdef PMAP_FORK
4645 	pmap_fork(vm1->vm_map.pmap, vm2->vm_map.pmap);
4646 #endif
4647 
4648 	UVMHIST_LOG(maphist,"<- done",0,0,0,0);
4649 	return (vm2);
4650 }
4651 
4652 
4653 /*
4654  * uvm_mapent_trymerge: try to merge an entry with its neighbors.
4655  *
4656  * => called with map locked.
4657  * => return non zero if successfully merged.
4658  */
4659 
4660 int
4661 uvm_mapent_trymerge(struct vm_map *map, struct vm_map_entry *entry, int flags)
4662 {
4663 	struct uvm_object *uobj;
4664 	struct vm_map_entry *next;
4665 	struct vm_map_entry *prev;
4666 	vsize_t size;
4667 	int merged = 0;
4668 	bool copying;
4669 	int newetype;
4670 
4671 	if (entry->aref.ar_amap != NULL) {
4672 		return 0;
4673 	}
4674 	if ((entry->flags & UVM_MAP_NOMERGE) != 0) {
4675 		return 0;
4676 	}
4677 
4678 	uobj = entry->object.uvm_obj;
4679 	size = entry->end - entry->start;
4680 	copying = (flags & UVM_MERGE_COPYING) != 0;
4681 	newetype = copying ? (entry->etype & ~UVM_ET_NEEDSCOPY) : entry->etype;
4682 
4683 	next = entry->next;
4684 	if (next != &map->header &&
4685 	    next->start == entry->end &&
4686 	    ((copying && next->aref.ar_amap != NULL &&
4687 	    amap_refs(next->aref.ar_amap) == 1) ||
4688 	    (!copying && next->aref.ar_amap == NULL)) &&
4689 	    UVM_ET_ISCOMPATIBLE(next, newetype,
4690 	    uobj, entry->flags, entry->protection,
4691 	    entry->max_protection, entry->inheritance, entry->advice,
4692 	    entry->wired_count) &&
4693 	    (uobj == NULL || entry->offset + size == next->offset)) {
4694 		int error;
4695 
4696 		if (copying) {
4697 			error = amap_extend(next, size,
4698 			    AMAP_EXTEND_NOWAIT|AMAP_EXTEND_BACKWARDS);
4699 		} else {
4700 			error = 0;
4701 		}
4702 		if (error == 0) {
4703 			if (uobj) {
4704 				if (uobj->pgops->pgo_detach) {
4705 					uobj->pgops->pgo_detach(uobj);
4706 				}
4707 			}
4708 
4709 			entry->end = next->end;
4710 			clear_hints(map, next);
4711 			uvm_map_entry_unlink(map, next);
4712 			if (copying) {
4713 				entry->aref = next->aref;
4714 				entry->etype &= ~UVM_ET_NEEDSCOPY;
4715 			}
4716 			uvm_map_check(map, "trymerge forwardmerge");
4717 			uvm_mapent_free(next);
4718 			merged++;
4719 		}
4720 	}
4721 
4722 	prev = entry->prev;
4723 	if (prev != &map->header &&
4724 	    prev->end == entry->start &&
4725 	    ((copying && !merged && prev->aref.ar_amap != NULL &&
4726 	    amap_refs(prev->aref.ar_amap) == 1) ||
4727 	    (!copying && prev->aref.ar_amap == NULL)) &&
4728 	    UVM_ET_ISCOMPATIBLE(prev, newetype,
4729 	    uobj, entry->flags, entry->protection,
4730 	    entry->max_protection, entry->inheritance, entry->advice,
4731 	    entry->wired_count) &&
4732 	    (uobj == NULL ||
4733 	    prev->offset + prev->end - prev->start == entry->offset)) {
4734 		int error;
4735 
4736 		if (copying) {
4737 			error = amap_extend(prev, size,
4738 			    AMAP_EXTEND_NOWAIT|AMAP_EXTEND_FORWARDS);
4739 		} else {
4740 			error = 0;
4741 		}
4742 		if (error == 0) {
4743 			if (uobj) {
4744 				if (uobj->pgops->pgo_detach) {
4745 					uobj->pgops->pgo_detach(uobj);
4746 				}
4747 				entry->offset = prev->offset;
4748 			}
4749 
4750 			entry->start = prev->start;
4751 			clear_hints(map, prev);
4752 			uvm_map_entry_unlink(map, prev);
4753 			if (copying) {
4754 				entry->aref = prev->aref;
4755 				entry->etype &= ~UVM_ET_NEEDSCOPY;
4756 			}
4757 			uvm_map_check(map, "trymerge backmerge");
4758 			uvm_mapent_free(prev);
4759 			merged++;
4760 		}
4761 	}
4762 
4763 	return merged;
4764 }
4765 
4766 /*
4767  * uvm_map_setup: init map
4768  *
4769  * => map must not be in service yet.
4770  */
4771 
4772 void
4773 uvm_map_setup(struct vm_map *map, vaddr_t vmin, vaddr_t vmax, int flags)
4774 {
4775 
4776 	rb_tree_init(&map->rb_tree, &uvm_map_tree_ops);
4777 	map->header.next = map->header.prev = &map->header;
4778 	map->nentries = 0;
4779 	map->size = 0;
4780 	map->ref_count = 1;
4781 	vm_map_setmin(map, vmin);
4782 	vm_map_setmax(map, vmax);
4783 	map->flags = flags;
4784 	map->first_free = &map->header;
4785 	map->hint = &map->header;
4786 	map->timestamp = 0;
4787 	map->busy = NULL;
4788 
4789 	rw_init(&map->lock);
4790 	cv_init(&map->cv, "vm_map");
4791 	mutex_init(&map->misc_lock, MUTEX_DRIVER, IPL_NONE);
4792 }
4793 
4794 /*
4795  *   U N M A P   -   m a i n   e n t r y   p o i n t
4796  */
4797 
4798 /*
4799  * uvm_unmap1: remove mappings from a vm_map (from "start" up to "stop")
4800  *
4801  * => caller must check alignment and size
4802  * => map must be unlocked (we will lock it)
4803  * => flags is UVM_FLAG_QUANTUM or 0.
4804  */
4805 
4806 void
4807 uvm_unmap1(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
4808 {
4809 	struct vm_map_entry *dead_entries;
4810 	UVMHIST_FUNC(__func__);
4811 	UVMHIST_CALLARGS(maphist, "  (map=%#jx, start=%#jx, end=%#jx)",
4812 	    (uintptr_t)map, start, end, 0);
4813 
4814 	KASSERTMSG(start < end,
4815 	    "%s: map %p: start %#jx < end %#jx", __func__, map,
4816 	    (uintmax_t)start, (uintmax_t)end);
4817 	if (map == kernel_map) {
4818 		LOCKDEBUG_MEM_CHECK((void *)start, end - start);
4819 	}
4820 
4821 	/*
4822 	 * work now done by helper functions.   wipe the pmap's and then
4823 	 * detach from the dead entries...
4824 	 */
4825 	vm_map_lock(map);
4826 	uvm_unmap_remove(map, start, end, &dead_entries, flags);
4827 	vm_map_unlock(map);
4828 
4829 	if (dead_entries != NULL)
4830 		uvm_unmap_detach(dead_entries, 0);
4831 
4832 	UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
4833 }
4834 
4835 
4836 /*
4837  * uvm_map_reference: add reference to a map
4838  *
4839  * => map need not be locked
4840  */
4841 
4842 void
4843 uvm_map_reference(struct vm_map *map)
4844 {
4845 
4846 	atomic_inc_uint(&map->ref_count);
4847 }
4848 
4849 void
4850 uvm_map_lock_entry(struct vm_map_entry *entry, krw_t op)
4851 {
4852 
4853 	if (entry->aref.ar_amap != NULL) {
4854 		amap_lock(entry->aref.ar_amap, op);
4855 	}
4856 	if (UVM_ET_ISOBJ(entry)) {
4857 		rw_enter(entry->object.uvm_obj->vmobjlock, op);
4858 	}
4859 }
4860 
4861 void
4862 uvm_map_unlock_entry(struct vm_map_entry *entry)
4863 {
4864 
4865 	if (UVM_ET_ISOBJ(entry)) {
4866 		rw_exit(entry->object.uvm_obj->vmobjlock);
4867 	}
4868 	if (entry->aref.ar_amap != NULL) {
4869 		amap_unlock(entry->aref.ar_amap);
4870 	}
4871 }
4872 
4873 #define	UVM_VOADDR_TYPE_MASK	0x3UL
4874 #define	UVM_VOADDR_TYPE_UOBJ	0x1UL
4875 #define	UVM_VOADDR_TYPE_ANON	0x2UL
4876 #define	UVM_VOADDR_OBJECT_MASK	~UVM_VOADDR_TYPE_MASK
4877 
4878 #define	UVM_VOADDR_GET_TYPE(voa)					\
4879 	((voa)->object & UVM_VOADDR_TYPE_MASK)
4880 #define	UVM_VOADDR_GET_OBJECT(voa)					\
4881 	((voa)->object & UVM_VOADDR_OBJECT_MASK)
4882 #define	UVM_VOADDR_SET_OBJECT(voa, obj, type)				\
4883 do {									\
4884 	KASSERT(((uintptr_t)(obj) & UVM_VOADDR_TYPE_MASK) == 0);	\
4885 	(voa)->object = ((uintptr_t)(obj)) | (type);			\
4886 } while (/*CONSTCOND*/0)
4887 
4888 #define	UVM_VOADDR_GET_UOBJ(voa)					\
4889 	((struct uvm_object *)UVM_VOADDR_GET_OBJECT(voa))
4890 #define	UVM_VOADDR_SET_UOBJ(voa, uobj)					\
4891 	UVM_VOADDR_SET_OBJECT(voa, uobj, UVM_VOADDR_TYPE_UOBJ)
4892 
4893 #define	UVM_VOADDR_GET_ANON(voa)					\
4894 	((struct vm_anon *)UVM_VOADDR_GET_OBJECT(voa))
4895 #define	UVM_VOADDR_SET_ANON(voa, anon)					\
4896 	UVM_VOADDR_SET_OBJECT(voa, anon, UVM_VOADDR_TYPE_ANON)
4897 
4898 /*
4899  * uvm_voaddr_acquire: returns the virtual object address corresponding
4900  * to the specified virtual address.
4901  *
4902  * => resolves COW so the true page identity is tracked.
4903  *
4904  * => acquires a reference on the page's owner (uvm_object or vm_anon)
4905  */
4906 bool
4907 uvm_voaddr_acquire(struct vm_map * const map, vaddr_t const va,
4908     struct uvm_voaddr * const voaddr)
4909 {
4910 	struct vm_map_entry *entry;
4911 	struct vm_anon *anon = NULL;
4912 	bool result = false;
4913 	bool exclusive = false;
4914 	void (*unlock_fn)(struct vm_map *);
4915 
4916 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
4917 	UVMHIST_LOG(maphist,"(map=%#jx,va=%#jx)", (uintptr_t)map, va, 0, 0);
4918 
4919 	const vaddr_t start = trunc_page(va);
4920 	const vaddr_t end = round_page(va+1);
4921 
4922  lookup_again:
4923 	if (__predict_false(exclusive)) {
4924 		vm_map_lock(map);
4925 		unlock_fn = vm_map_unlock;
4926 	} else {
4927 		vm_map_lock_read(map);
4928 		unlock_fn = vm_map_unlock_read;
4929 	}
4930 
4931 	if (__predict_false(!uvm_map_lookup_entry(map, start, &entry))) {
4932 		unlock_fn(map);
4933 		UVMHIST_LOG(maphist,"<- done (no entry)",0,0,0,0);
4934 		return false;
4935 	}
4936 
4937 	if (__predict_false(entry->protection == VM_PROT_NONE)) {
4938 		unlock_fn(map);
4939 		UVMHIST_LOG(maphist,"<- done (PROT_NONE)",0,0,0,0);
4940 		return false;
4941 	}
4942 
4943 	/*
4944 	 * We have a fast path for the common case of "no COW resolution
4945 	 * needed" whereby we have taken a read lock on the map and if
4946 	 * we don't encounter any need to create a vm_anon then great!
4947 	 * But if we do, we loop around again, instead taking an exclusive
4948 	 * lock so that we can perform the fault.
4949 	 *
4950 	 * In the event that we have to resolve the fault, we do nearly the
4951 	 * same work as uvm_map_pageable() does:
4952 	 *
4953 	 * 1: holding the write lock, we create any anonymous maps that need
4954 	 *    to be created.  however, we do NOT need to clip the map entries
4955 	 *    in this case.
4956 	 *
4957 	 * 2: we downgrade to a read lock, and call uvm_fault_wire to fault
4958 	 *    in the page (assuming the entry is not already wired).  this
4959 	 *    is done because we need the vm_anon to be present.
4960 	 */
4961 	if (__predict_true(!VM_MAPENT_ISWIRED(entry))) {
4962 
4963 		bool need_fault = false;
4964 
4965 		/*
4966 		 * perform the action of vm_map_lookup that need the
4967 		 * write lock on the map: create an anonymous map for
4968 		 * a copy-on-write region, or an anonymous map for
4969 		 * a zero-fill region.
4970 		 */
4971 		if (__predict_false(UVM_ET_ISSUBMAP(entry))) {
4972 			unlock_fn(map);
4973 			UVMHIST_LOG(maphist,"<- done (submap)",0,0,0,0);
4974 			return false;
4975 		}
4976 		if (__predict_false(UVM_ET_ISNEEDSCOPY(entry) &&
4977 		    ((entry->max_protection & VM_PROT_WRITE) ||
4978 		     (entry->object.uvm_obj == NULL)))) {
4979 			if (!exclusive) {
4980 				/* need to take the slow path */
4981 				KASSERT(unlock_fn == vm_map_unlock_read);
4982 				vm_map_unlock_read(map);
4983 				exclusive = true;
4984 				goto lookup_again;
4985 			}
4986 			need_fault = true;
4987 			amap_copy(map, entry, 0, start, end);
4988 			/* XXXCDC: wait OK? */
4989 		}
4990 
4991 		/*
4992 		 * do a quick check to see if the fault has already
4993 		 * been resolved to the upper layer.
4994 		 */
4995 		if (__predict_true(entry->aref.ar_amap != NULL &&
4996 				   need_fault == false)) {
4997 			amap_lock(entry->aref.ar_amap, RW_WRITER);
4998 			anon = amap_lookup(&entry->aref, start - entry->start);
4999 			if (__predict_true(anon != NULL)) {
5000 				/* amap unlocked below */
5001 				goto found_anon;
5002 			}
5003 			amap_unlock(entry->aref.ar_amap);
5004 			need_fault = true;
5005 		}
5006 
5007 		/*
5008 		 * we predict this test as false because if we reach
5009 		 * this point, then we are likely dealing with a
5010 		 * shared memory region backed by a uvm_object, in
5011 		 * which case a fault to create the vm_anon is not
5012 		 * necessary.
5013 		 */
5014 		if (__predict_false(need_fault)) {
5015 			if (exclusive) {
5016 				vm_map_busy(map);
5017 				vm_map_unlock(map);
5018 				unlock_fn = vm_map_unbusy;
5019 			}
5020 
5021 			if (uvm_fault_wire(map, start, end,
5022 					   entry->max_protection, 1)) {
5023 				/* wiring failed */
5024 				unlock_fn(map);
5025 				UVMHIST_LOG(maphist,"<- done (wire failed)",
5026 					    0,0,0,0);
5027 				return false;
5028 			}
5029 
5030 			/*
5031 			 * now that we have resolved the fault, we can unwire
5032 			 * the page.
5033 			 */
5034 			if (exclusive) {
5035 				vm_map_lock(map);
5036 				vm_map_unbusy(map);
5037 				unlock_fn = vm_map_unlock;
5038 			}
5039 
5040 			uvm_fault_unwire_locked(map, start, end);
5041 		}
5042 	}
5043 
5044 	/* check the upper layer */
5045 	if (entry->aref.ar_amap) {
5046 		amap_lock(entry->aref.ar_amap, RW_WRITER);
5047 		anon = amap_lookup(&entry->aref, start - entry->start);
5048 		if (anon) {
5049  found_anon:		KASSERT(anon->an_lock == entry->aref.ar_amap->am_lock);
5050 			anon->an_ref++;
5051 			rw_obj_hold(anon->an_lock);
5052 			KASSERT(anon->an_ref != 0);
5053 			UVM_VOADDR_SET_ANON(voaddr, anon);
5054 			voaddr->offset = va & PAGE_MASK;
5055 			result = true;
5056 		}
5057 		amap_unlock(entry->aref.ar_amap);
5058 	}
5059 
5060 	/* check the lower layer */
5061 	if (!result && UVM_ET_ISOBJ(entry)) {
5062 		struct uvm_object *uobj = entry->object.uvm_obj;
5063 
5064 		KASSERT(uobj != NULL);
5065 		(*uobj->pgops->pgo_reference)(uobj);
5066 		UVM_VOADDR_SET_UOBJ(voaddr, uobj);
5067 		voaddr->offset = entry->offset + (va - entry->start);
5068 		result = true;
5069 	}
5070 
5071 	unlock_fn(map);
5072 
5073 	if (result) {
5074 		UVMHIST_LOG(maphist,
5075 		    "<- done OK (type=%jd,owner=%#jx,offset=%#jx)",
5076 		    UVM_VOADDR_GET_TYPE(voaddr),
5077 		    UVM_VOADDR_GET_OBJECT(voaddr),
5078 		    voaddr->offset, 0);
5079 	} else {
5080 		UVMHIST_LOG(maphist,"<- done (failed)",0,0,0,0);
5081 	}
5082 
5083 	return result;
5084 }
5085 
5086 /*
5087  * uvm_voaddr_release: release the references held by the
5088  * vitual object address.
5089  */
5090 void
5091 uvm_voaddr_release(struct uvm_voaddr * const voaddr)
5092 {
5093 
5094 	switch (UVM_VOADDR_GET_TYPE(voaddr)) {
5095 	case UVM_VOADDR_TYPE_UOBJ: {
5096 		struct uvm_object * const uobj = UVM_VOADDR_GET_UOBJ(voaddr);
5097 
5098 		KASSERT(uobj != NULL);
5099 		KASSERT(uobj->pgops->pgo_detach != NULL);
5100 		(*uobj->pgops->pgo_detach)(uobj);
5101 		break;
5102 	    }
5103 	case UVM_VOADDR_TYPE_ANON: {
5104 		struct vm_anon * const anon = UVM_VOADDR_GET_ANON(voaddr);
5105 		krwlock_t *lock;
5106 
5107 		KASSERT(anon != NULL);
5108 		rw_enter((lock = anon->an_lock), RW_WRITER);
5109 	    	KASSERT(anon->an_ref > 0);
5110 		if (--anon->an_ref == 0) {
5111 			uvm_anfree(anon);
5112 		}
5113 		rw_exit(lock);
5114 		rw_obj_free(lock);
5115 	    	break;
5116 	    }
5117 	default:
5118 		panic("uvm_voaddr_release: bad type");
5119 	}
5120 	memset(voaddr, 0, sizeof(*voaddr));
5121 }
5122 
5123 /*
5124  * uvm_voaddr_compare: compare two uvm_voaddr objects.
5125  *
5126  * => memcmp() semantics
5127  */
5128 int
5129 uvm_voaddr_compare(const struct uvm_voaddr * const voaddr1,
5130     const struct uvm_voaddr * const voaddr2)
5131 {
5132 	const uintptr_t type1 = UVM_VOADDR_GET_TYPE(voaddr1);
5133 	const uintptr_t type2 = UVM_VOADDR_GET_TYPE(voaddr2);
5134 
5135 	KASSERT(type1 == UVM_VOADDR_TYPE_UOBJ ||
5136 		type1 == UVM_VOADDR_TYPE_ANON);
5137 
5138 	KASSERT(type2 == UVM_VOADDR_TYPE_UOBJ ||
5139 		type2 == UVM_VOADDR_TYPE_ANON);
5140 
5141 	if (type1 < type2)
5142 		return -1;
5143 	if (type1 > type2)
5144 		return 1;
5145 
5146 	const uintptr_t addr1 = UVM_VOADDR_GET_OBJECT(voaddr1);
5147 	const uintptr_t addr2 = UVM_VOADDR_GET_OBJECT(voaddr2);
5148 
5149 	if (addr1 < addr2)
5150 		return -1;
5151 	if (addr1 > addr2)
5152 		return 1;
5153 
5154 	if (voaddr1->offset < voaddr2->offset)
5155 		return -1;
5156 	if (voaddr1->offset > voaddr2->offset)
5157 		return 1;
5158 
5159 	return 0;
5160 }
5161 
5162 #if defined(DDB) || defined(DEBUGPRINT)
5163 
5164 /*
5165  * uvm_map_printit: actually prints the map
5166  */
5167 
5168 void
5169 uvm_map_printit(struct vm_map *map, bool full,
5170     void (*pr)(const char *, ...))
5171 {
5172 	struct vm_map_entry *entry;
5173 
5174 	(*pr)("MAP %p: [%#lx->%#lx]\n", map, vm_map_min(map),
5175 	    vm_map_max(map));
5176 	(*pr)("\t#ent=%d, sz=%d, ref=%d, version=%d, flags=%#x\n",
5177 	    map->nentries, map->size, map->ref_count, map->timestamp,
5178 	    map->flags);
5179 	(*pr)("\tpmap=%p(resident=%ld, wired=%ld)\n", map->pmap,
5180 	    pmap_resident_count(map->pmap), pmap_wired_count(map->pmap));
5181 	if (!full)
5182 		return;
5183 	for (entry = map->header.next; entry != &map->header;
5184 	    entry = entry->next) {
5185 		(*pr)(" - %p: %#lx->%#lx: obj=%p/%#llx, amap=%p/%d\n",
5186 		    entry, entry->start, entry->end, entry->object.uvm_obj,
5187 		    (long long)entry->offset, entry->aref.ar_amap,
5188 		    entry->aref.ar_pageoff);
5189 		(*pr)(
5190 		    "\tsubmap=%c, cow=%c, nc=%c, prot(max)=%d/%d, inh=%d, "
5191 		    "wc=%d, adv=%d%s\n",
5192 		    (entry->etype & UVM_ET_SUBMAP) ? 'T' : 'F',
5193 		    (entry->etype & UVM_ET_COPYONWRITE) ? 'T' : 'F',
5194 		    (entry->etype & UVM_ET_NEEDSCOPY) ? 'T' : 'F',
5195 		    entry->protection, entry->max_protection,
5196 		    entry->inheritance, entry->wired_count, entry->advice,
5197 		    entry == map->first_free ? " (first_free)" : "");
5198 	}
5199 }
5200 
5201 void
5202 uvm_whatis(uintptr_t addr, void (*pr)(const char *, ...))
5203 {
5204 	struct vm_map *map;
5205 
5206 	for (map = kernel_map;;) {
5207 		struct vm_map_entry *entry;
5208 
5209 		if (!uvm_map_lookup_entry_bytree(map, (vaddr_t)addr, &entry)) {
5210 			break;
5211 		}
5212 		(*pr)("%p is %p+%zu from VMMAP %p\n",
5213 		    (void *)addr, (void *)entry->start,
5214 		    (size_t)(addr - (uintptr_t)entry->start), map);
5215 		if (!UVM_ET_ISSUBMAP(entry)) {
5216 			break;
5217 		}
5218 		map = entry->object.sub_map;
5219 	}
5220 }
5221 
5222 #endif /* DDB || DEBUGPRINT */
5223 
5224 #ifndef __USER_VA0_IS_SAFE
5225 static int
5226 sysctl_user_va0_disable(SYSCTLFN_ARGS)
5227 {
5228 	struct sysctlnode node;
5229 	int t, error;
5230 
5231 	node = *rnode;
5232 	node.sysctl_data = &t;
5233 	t = user_va0_disable;
5234 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
5235 	if (error || newp == NULL)
5236 		return (error);
5237 
5238 	if (!t && user_va0_disable &&
5239 	    kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MAP_VA_ZERO, 0,
5240 	    NULL, NULL, NULL))
5241 		return EPERM;
5242 
5243 	user_va0_disable = !!t;
5244 	return 0;
5245 }
5246 #endif
5247 
5248 static int
5249 fill_vmentry(struct lwp *l, struct proc *p, struct kinfo_vmentry *kve,
5250     struct vm_map *m, struct vm_map_entry *e)
5251 {
5252 #ifndef _RUMPKERNEL
5253 	int error;
5254 
5255 	memset(kve, 0, sizeof(*kve));
5256 	KASSERT(e != NULL);
5257 	if (UVM_ET_ISOBJ(e)) {
5258 		struct uvm_object *uobj = e->object.uvm_obj;
5259 		KASSERT(uobj != NULL);
5260 		kve->kve_ref_count = uobj->uo_refs;
5261 		kve->kve_count = uobj->uo_npages;
5262 		if (UVM_OBJ_IS_VNODE(uobj)) {
5263 			struct vattr va;
5264 			struct vnode *vp = (struct vnode *)uobj;
5265 			vn_lock(vp, LK_SHARED | LK_RETRY);
5266 			error = VOP_GETATTR(vp, &va, l->l_cred);
5267 			VOP_UNLOCK(vp);
5268 			kve->kve_type = KVME_TYPE_VNODE;
5269 			if (error == 0) {
5270 				kve->kve_vn_size = vp->v_size;
5271 				kve->kve_vn_type = (int)vp->v_type;
5272 				kve->kve_vn_mode = va.va_mode;
5273 				kve->kve_vn_rdev = va.va_rdev;
5274 				kve->kve_vn_fileid = va.va_fileid;
5275 				kve->kve_vn_fsid = va.va_fsid;
5276 				error = vnode_to_path(kve->kve_path,
5277 				    sizeof(kve->kve_path) / 2, vp, l, p);
5278 			}
5279 		} else if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
5280 			kve->kve_type = KVME_TYPE_KERN;
5281 		} else if (UVM_OBJ_IS_DEVICE(uobj)) {
5282 			kve->kve_type = KVME_TYPE_DEVICE;
5283 		} else if (UVM_OBJ_IS_AOBJ(uobj)) {
5284 			kve->kve_type = KVME_TYPE_ANON;
5285 		} else {
5286 			kve->kve_type = KVME_TYPE_OBJECT;
5287 		}
5288 	} else if (UVM_ET_ISSUBMAP(e)) {
5289 		struct vm_map *map = e->object.sub_map;
5290 		KASSERT(map != NULL);
5291 		kve->kve_ref_count = map->ref_count;
5292 		kve->kve_count = map->nentries;
5293 		kve->kve_type = KVME_TYPE_SUBMAP;
5294 	} else
5295 		kve->kve_type = KVME_TYPE_UNKNOWN;
5296 
5297 	kve->kve_start = e->start;
5298 	kve->kve_end = e->end;
5299 	kve->kve_offset = e->offset;
5300 	kve->kve_wired_count = e->wired_count;
5301 	kve->kve_inheritance = e->inheritance;
5302 	kve->kve_attributes = 0; /* unused */
5303 	kve->kve_advice = e->advice;
5304 #define PROT(p) (((p) & VM_PROT_READ) ? KVME_PROT_READ : 0) | \
5305 	(((p) & VM_PROT_WRITE) ? KVME_PROT_WRITE : 0) | \
5306 	(((p) & VM_PROT_EXECUTE) ? KVME_PROT_EXEC : 0)
5307 	kve->kve_protection = PROT(e->protection);
5308 	kve->kve_max_protection = PROT(e->max_protection);
5309 	kve->kve_flags |= (e->etype & UVM_ET_COPYONWRITE)
5310 	    ? KVME_FLAG_COW : 0;
5311 	kve->kve_flags |= (e->etype & UVM_ET_NEEDSCOPY)
5312 	    ? KVME_FLAG_NEEDS_COPY : 0;
5313 	kve->kve_flags |= (m->flags & VM_MAP_TOPDOWN)
5314 	    ? KVME_FLAG_GROWS_DOWN : KVME_FLAG_GROWS_UP;
5315 	kve->kve_flags |= (m->flags & VM_MAP_PAGEABLE)
5316 	    ? KVME_FLAG_PAGEABLE : 0;
5317 #endif
5318 	return 0;
5319 }
5320 
5321 static int
5322 fill_vmentries(struct lwp *l, pid_t pid, u_int elem_size, void *oldp,
5323     size_t *oldlenp)
5324 {
5325 	int error;
5326 	struct proc *p;
5327 	struct kinfo_vmentry *vme;
5328 	struct vmspace *vm;
5329 	struct vm_map *map;
5330 	struct vm_map_entry *entry;
5331 	char *dp;
5332 	size_t count, vmesize;
5333 
5334 	if (elem_size == 0 || elem_size > 2 * sizeof(*vme))
5335 		return EINVAL;
5336 
5337 	if (oldp) {
5338 		if (*oldlenp > 10UL * 1024UL * 1024UL)
5339 			return E2BIG;
5340 		count = *oldlenp / elem_size;
5341 		if (count == 0)
5342 			return ENOMEM;
5343 		vmesize = count * sizeof(*vme);
5344 	} else
5345 		vmesize = 0;
5346 
5347 	if ((error = proc_find_locked(l, &p, pid)) != 0)
5348 		return error;
5349 
5350 	vme = NULL;
5351 	count = 0;
5352 
5353 	if ((error = proc_vmspace_getref(p, &vm)) != 0)
5354 		goto out;
5355 
5356 	map = &vm->vm_map;
5357 	vm_map_lock_read(map);
5358 
5359 	dp = oldp;
5360 	if (oldp)
5361 		vme = kmem_alloc(vmesize, KM_SLEEP);
5362 	for (entry = map->header.next; entry != &map->header;
5363 	    entry = entry->next) {
5364 		if (oldp && (dp - (char *)oldp) < vmesize) {
5365 			error = fill_vmentry(l, p, &vme[count], map, entry);
5366 			if (error)
5367 				goto out;
5368 			dp += elem_size;
5369 		}
5370 		count++;
5371 	}
5372 	vm_map_unlock_read(map);
5373 	uvmspace_free(vm);
5374 
5375 out:
5376 	if (pid != -1)
5377 		mutex_exit(p->p_lock);
5378 	if (error == 0) {
5379 		const u_int esize = uimin(sizeof(*vme), elem_size);
5380 		dp = oldp;
5381 		for (size_t i = 0; i < count; i++) {
5382 			if (oldp && (dp - (char *)oldp) < vmesize) {
5383 				error = sysctl_copyout(l, &vme[i], dp, esize);
5384 				if (error)
5385 					break;
5386 				dp += elem_size;
5387 			} else
5388 				break;
5389 		}
5390 		count *= elem_size;
5391 		if (oldp != NULL && *oldlenp < count)
5392 			error = ENOSPC;
5393 		*oldlenp = count;
5394 	}
5395 	if (vme)
5396 		kmem_free(vme, vmesize);
5397 	return error;
5398 }
5399 
5400 static int
5401 sysctl_vmproc(SYSCTLFN_ARGS)
5402 {
5403 	int error;
5404 
5405 	if (namelen == 1 && name[0] == CTL_QUERY)
5406 		return (sysctl_query(SYSCTLFN_CALL(rnode)));
5407 
5408 	if (namelen == 0)
5409 		return EINVAL;
5410 
5411 	switch (name[0]) {
5412 	case VM_PROC_MAP:
5413 		if (namelen != 3)
5414 			return EINVAL;
5415 		sysctl_unlock();
5416 		error = fill_vmentries(l, name[1], name[2], oldp, oldlenp);
5417 		sysctl_relock();
5418 		return error;
5419 	default:
5420 		return EINVAL;
5421 	}
5422 }
5423 
5424 SYSCTL_SETUP(sysctl_uvmmap_setup, "sysctl uvmmap setup")
5425 {
5426 
5427 	sysctl_createv(clog, 0, NULL, NULL,
5428 		       CTLFLAG_PERMANENT,
5429 		       CTLTYPE_STRUCT, "proc",
5430 		       SYSCTL_DESCR("Process vm information"),
5431 		       sysctl_vmproc, 0, NULL, 0,
5432 		       CTL_VM, VM_PROC, CTL_EOL);
5433 #ifndef __USER_VA0_IS_SAFE
5434         sysctl_createv(clog, 0, NULL, NULL,
5435                        CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
5436                        CTLTYPE_INT, "user_va0_disable",
5437                        SYSCTL_DESCR("Disable VA 0"),
5438                        sysctl_user_va0_disable, 0, &user_va0_disable, 0,
5439                        CTL_VM, CTL_CREATE, CTL_EOL);
5440 #endif
5441 }
5442