xref: /netbsd-src/sys/uvm/uvm_map.c (revision 154bfe8e089c1a0a4e9ed8414f08d3da90949162)
1 /*	$NetBSD: uvm_map.c,v 1.385 2020/07/09 05:57:15 skrll Exp $	*/
2 
3 /*
4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
5  * Copyright (c) 1991, 1993, The Regents of the University of California.
6  *
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * The Mach Operating System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)vm_map.c    8.3 (Berkeley) 1/12/94
37  * from: Id: uvm_map.c,v 1.1.2.27 1998/02/07 01:16:54 chs Exp
38  *
39  *
40  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
41  * All rights reserved.
42  *
43  * Permission to use, copy, modify and distribute this software and
44  * its documentation is hereby granted, provided that both the copyright
45  * notice and this permission notice appear in all copies of the
46  * software, derivative works or modified versions, and any portions
47  * thereof, and that both notices appear in supporting documentation.
48  *
49  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
50  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
51  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52  *
53  * Carnegie Mellon requests users of this software to return to
54  *
55  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
56  *  School of Computer Science
57  *  Carnegie Mellon University
58  *  Pittsburgh PA 15213-3890
59  *
60  * any improvements or extensions that they make and grant Carnegie the
61  * rights to redistribute these changes.
62  */
63 
64 /*
65  * uvm_map.c: uvm map operations
66  */
67 
68 #include <sys/cdefs.h>
69 __KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.385 2020/07/09 05:57:15 skrll Exp $");
70 
71 #include "opt_ddb.h"
72 #include "opt_pax.h"
73 #include "opt_uvmhist.h"
74 #include "opt_uvm.h"
75 #include "opt_sysv.h"
76 
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/mman.h>
80 #include <sys/proc.h>
81 #include <sys/pool.h>
82 #include <sys/kernel.h>
83 #include <sys/mount.h>
84 #include <sys/pax.h>
85 #include <sys/vnode.h>
86 #include <sys/filedesc.h>
87 #include <sys/lockdebug.h>
88 #include <sys/atomic.h>
89 #include <sys/sysctl.h>
90 #ifndef __USER_VA0_IS_SAFE
91 #include <sys/kauth.h>
92 #include "opt_user_va0_disable_default.h"
93 #endif
94 
95 #include <sys/shm.h>
96 
97 #include <uvm/uvm.h>
98 #include <uvm/uvm_readahead.h>
99 
100 #if defined(DDB) || defined(DEBUGPRINT)
101 #include <uvm/uvm_ddb.h>
102 #endif
103 
104 #ifdef UVMHIST
105 #ifndef UVMHIST_MAPHIST_SIZE
106 #define UVMHIST_MAPHIST_SIZE 100
107 #endif
108 #ifndef UVMHIST_PDHIST_SIZE
109 #define UVMHIST_PDHIST_SIZE 100
110 #endif
111 static struct kern_history_ent maphistbuf[UVMHIST_MAPHIST_SIZE];
112 UVMHIST_DEFINE(maphist) = UVMHIST_INITIALIZER(maphist, maphistbuf);
113 #endif
114 
115 #if !defined(UVMMAP_COUNTERS)
116 
117 #define	UVMMAP_EVCNT_DEFINE(name)	/* nothing */
118 #define UVMMAP_EVCNT_INCR(ev)		/* nothing */
119 #define UVMMAP_EVCNT_DECR(ev)		/* nothing */
120 
121 #else /* defined(UVMMAP_NOCOUNTERS) */
122 
123 #include <sys/evcnt.h>
124 #define	UVMMAP_EVCNT_DEFINE(name) \
125 struct evcnt uvmmap_evcnt_##name = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, \
126     "uvmmap", #name); \
127 EVCNT_ATTACH_STATIC(uvmmap_evcnt_##name);
128 #define	UVMMAP_EVCNT_INCR(ev)		uvmmap_evcnt_##ev.ev_count++
129 #define	UVMMAP_EVCNT_DECR(ev)		uvmmap_evcnt_##ev.ev_count--
130 
131 #endif /* defined(UVMMAP_NOCOUNTERS) */
132 
133 UVMMAP_EVCNT_DEFINE(ubackmerge)
134 UVMMAP_EVCNT_DEFINE(uforwmerge)
135 UVMMAP_EVCNT_DEFINE(ubimerge)
136 UVMMAP_EVCNT_DEFINE(unomerge)
137 UVMMAP_EVCNT_DEFINE(kbackmerge)
138 UVMMAP_EVCNT_DEFINE(kforwmerge)
139 UVMMAP_EVCNT_DEFINE(kbimerge)
140 UVMMAP_EVCNT_DEFINE(knomerge)
141 UVMMAP_EVCNT_DEFINE(map_call)
142 UVMMAP_EVCNT_DEFINE(mlk_call)
143 UVMMAP_EVCNT_DEFINE(mlk_hint)
144 UVMMAP_EVCNT_DEFINE(mlk_tree)
145 UVMMAP_EVCNT_DEFINE(mlk_treeloop)
146 
147 const char vmmapbsy[] = "vmmapbsy";
148 
149 /*
150  * cache for vmspace structures.
151  */
152 
153 static struct pool_cache uvm_vmspace_cache;
154 
155 /*
156  * cache for dynamically-allocated map entries.
157  */
158 
159 static struct pool_cache uvm_map_entry_cache;
160 
161 #ifdef PMAP_GROWKERNEL
162 /*
163  * This global represents the end of the kernel virtual address
164  * space.  If we want to exceed this, we must grow the kernel
165  * virtual address space dynamically.
166  *
167  * Note, this variable is locked by kernel_map's lock.
168  */
169 vaddr_t uvm_maxkaddr;
170 #endif
171 
172 #ifndef __USER_VA0_IS_SAFE
173 #ifndef __USER_VA0_DISABLE_DEFAULT
174 #define __USER_VA0_DISABLE_DEFAULT 1
175 #endif
176 #ifdef USER_VA0_DISABLE_DEFAULT /* kernel config option overrides */
177 #undef __USER_VA0_DISABLE_DEFAULT
178 #define __USER_VA0_DISABLE_DEFAULT USER_VA0_DISABLE_DEFAULT
179 #endif
180 int user_va0_disable = __USER_VA0_DISABLE_DEFAULT;
181 #endif
182 
183 /*
184  * macros
185  */
186 
187 /*
188  * uvm_map_align_va: round down or up virtual address
189  */
190 static __inline void
191 uvm_map_align_va(vaddr_t *vap, vsize_t align, int topdown)
192 {
193 
194 	KASSERT(powerof2(align));
195 
196 	if (align != 0 && (*vap & (align - 1)) != 0) {
197 		if (topdown)
198 			*vap = rounddown2(*vap, align);
199 		else
200 			*vap = roundup2(*vap, align);
201 	}
202 }
203 
204 /*
205  * UVM_ET_ISCOMPATIBLE: check some requirements for map entry merging
206  */
207 extern struct vm_map *pager_map;
208 
209 #define	UVM_ET_ISCOMPATIBLE(ent, type, uobj, meflags, \
210     prot, maxprot, inh, adv, wire) \
211 	((ent)->etype == (type) && \
212 	(((ent)->flags ^ (meflags)) & (UVM_MAP_NOMERGE)) == 0 && \
213 	(ent)->object.uvm_obj == (uobj) && \
214 	(ent)->protection == (prot) && \
215 	(ent)->max_protection == (maxprot) && \
216 	(ent)->inheritance == (inh) && \
217 	(ent)->advice == (adv) && \
218 	(ent)->wired_count == (wire))
219 
220 /*
221  * uvm_map_entry_link: insert entry into a map
222  *
223  * => map must be locked
224  */
225 #define uvm_map_entry_link(map, after_where, entry) do { \
226 	uvm_mapent_check(entry); \
227 	(map)->nentries++; \
228 	(entry)->prev = (after_where); \
229 	(entry)->next = (after_where)->next; \
230 	(entry)->prev->next = (entry); \
231 	(entry)->next->prev = (entry); \
232 	uvm_rb_insert((map), (entry)); \
233 } while (/*CONSTCOND*/ 0)
234 
235 /*
236  * uvm_map_entry_unlink: remove entry from a map
237  *
238  * => map must be locked
239  */
240 #define uvm_map_entry_unlink(map, entry) do { \
241 	KASSERT((entry) != (map)->first_free); \
242 	KASSERT((entry) != (map)->hint); \
243 	uvm_mapent_check(entry); \
244 	(map)->nentries--; \
245 	(entry)->next->prev = (entry)->prev; \
246 	(entry)->prev->next = (entry)->next; \
247 	uvm_rb_remove((map), (entry)); \
248 } while (/*CONSTCOND*/ 0)
249 
250 /*
251  * SAVE_HINT: saves the specified entry as the hint for future lookups.
252  *
253  * => map need not be locked.
254  */
255 #define SAVE_HINT(map, check, value) do { \
256 	if ((map)->hint == (check)) \
257 		(map)->hint = (value); \
258 } while (/*CONSTCOND*/ 0)
259 
260 /*
261  * clear_hints: ensure that hints don't point to the entry.
262  *
263  * => map must be write-locked.
264  */
265 static void
266 clear_hints(struct vm_map *map, struct vm_map_entry *ent)
267 {
268 
269 	SAVE_HINT(map, ent, ent->prev);
270 	if (map->first_free == ent) {
271 		map->first_free = ent->prev;
272 	}
273 }
274 
275 /*
276  * VM_MAP_RANGE_CHECK: check and correct range
277  *
278  * => map must at least be read locked
279  */
280 
281 #define VM_MAP_RANGE_CHECK(map, start, end) do { \
282 	if (start < vm_map_min(map))		\
283 		start = vm_map_min(map);	\
284 	if (end > vm_map_max(map))		\
285 		end = vm_map_max(map);		\
286 	if (start > end)			\
287 		start = end;			\
288 } while (/*CONSTCOND*/ 0)
289 
290 /*
291  * local prototypes
292  */
293 
294 static struct vm_map_entry *
295 		uvm_mapent_alloc(struct vm_map *, int);
296 static void	uvm_mapent_copy(struct vm_map_entry *, struct vm_map_entry *);
297 static void	uvm_mapent_free(struct vm_map_entry *);
298 #if defined(DEBUG)
299 static void	_uvm_mapent_check(const struct vm_map_entry *, int);
300 #define	uvm_mapent_check(map)	_uvm_mapent_check(map, __LINE__)
301 #else /* defined(DEBUG) */
302 #define	uvm_mapent_check(e)	/* nothing */
303 #endif /* defined(DEBUG) */
304 
305 static void	uvm_map_entry_unwire(struct vm_map *, struct vm_map_entry *);
306 static void	uvm_map_reference_amap(struct vm_map_entry *, int);
307 static int	uvm_map_space_avail(vaddr_t *, vsize_t, voff_t, vsize_t, int,
308 		    int, struct vm_map_entry *);
309 static void	uvm_map_unreference_amap(struct vm_map_entry *, int);
310 
311 int _uvm_map_sanity(struct vm_map *);
312 int _uvm_tree_sanity(struct vm_map *);
313 static vsize_t uvm_rb_maxgap(const struct vm_map_entry *);
314 
315 #define	ROOT_ENTRY(map)		((struct vm_map_entry *)(map)->rb_tree.rbt_root)
316 #define	LEFT_ENTRY(entry)	((struct vm_map_entry *)(entry)->rb_node.rb_left)
317 #define	RIGHT_ENTRY(entry)	((struct vm_map_entry *)(entry)->rb_node.rb_right)
318 #define	PARENT_ENTRY(map, entry) \
319 	(ROOT_ENTRY(map) == (entry) \
320 	    ? NULL : (struct vm_map_entry *)RB_FATHER(&(entry)->rb_node))
321 
322 /*
323  * These get filled in if/when SYSVSHM shared memory code is loaded
324  *
325  * We do this with function pointers rather the #ifdef SYSVSHM so the
326  * SYSVSHM code can be loaded and unloaded
327  */
328 void (*uvm_shmexit)(struct vmspace *) = NULL;
329 void (*uvm_shmfork)(struct vmspace *, struct vmspace *) = NULL;
330 
331 static int
332 uvm_map_compare_nodes(void *ctx, const void *nparent, const void *nkey)
333 {
334 	const struct vm_map_entry *eparent = nparent;
335 	const struct vm_map_entry *ekey = nkey;
336 
337 	KASSERT(eparent->start < ekey->start || eparent->start >= ekey->end);
338 	KASSERT(ekey->start < eparent->start || ekey->start >= eparent->end);
339 
340 	if (eparent->start < ekey->start)
341 		return -1;
342 	if (eparent->end >= ekey->start)
343 		return 1;
344 	return 0;
345 }
346 
347 static int
348 uvm_map_compare_key(void *ctx, const void *nparent, const void *vkey)
349 {
350 	const struct vm_map_entry *eparent = nparent;
351 	const vaddr_t va = *(const vaddr_t *) vkey;
352 
353 	if (eparent->start < va)
354 		return -1;
355 	if (eparent->end >= va)
356 		return 1;
357 	return 0;
358 }
359 
360 static const rb_tree_ops_t uvm_map_tree_ops = {
361 	.rbto_compare_nodes = uvm_map_compare_nodes,
362 	.rbto_compare_key = uvm_map_compare_key,
363 	.rbto_node_offset = offsetof(struct vm_map_entry, rb_node),
364 	.rbto_context = NULL
365 };
366 
367 /*
368  * uvm_rb_gap: return the gap size between our entry and next entry.
369  */
370 static inline vsize_t
371 uvm_rb_gap(const struct vm_map_entry *entry)
372 {
373 
374 	KASSERT(entry->next != NULL);
375 	return entry->next->start - entry->end;
376 }
377 
378 static vsize_t
379 uvm_rb_maxgap(const struct vm_map_entry *entry)
380 {
381 	struct vm_map_entry *child;
382 	vsize_t maxgap = entry->gap;
383 
384 	/*
385 	 * We need maxgap to be the largest gap of us or any of our
386 	 * descendents.  Since each of our children's maxgap is the
387 	 * cached value of their largest gap of themselves or their
388 	 * descendents, we can just use that value and avoid recursing
389 	 * down the tree to calculate it.
390 	 */
391 	if ((child = LEFT_ENTRY(entry)) != NULL && maxgap < child->maxgap)
392 		maxgap = child->maxgap;
393 
394 	if ((child = RIGHT_ENTRY(entry)) != NULL && maxgap < child->maxgap)
395 		maxgap = child->maxgap;
396 
397 	return maxgap;
398 }
399 
400 static void
401 uvm_rb_fixup(struct vm_map *map, struct vm_map_entry *entry)
402 {
403 	struct vm_map_entry *parent;
404 
405 	KASSERT(entry->gap == uvm_rb_gap(entry));
406 	entry->maxgap = uvm_rb_maxgap(entry);
407 
408 	while ((parent = PARENT_ENTRY(map, entry)) != NULL) {
409 		struct vm_map_entry *brother;
410 		vsize_t maxgap = parent->gap;
411 		unsigned int which;
412 
413 		KDASSERT(parent->gap == uvm_rb_gap(parent));
414 		if (maxgap < entry->maxgap)
415 			maxgap = entry->maxgap;
416 		/*
417 		 * Since we work towards the root, we know entry's maxgap
418 		 * value is OK, but its brothers may now be out-of-date due
419 		 * to rebalancing.  So refresh it.
420 		 */
421 		which = RB_POSITION(&entry->rb_node) ^ RB_DIR_OTHER;
422 		brother = (struct vm_map_entry *)parent->rb_node.rb_nodes[which];
423 		if (brother != NULL) {
424 			KDASSERT(brother->gap == uvm_rb_gap(brother));
425 			brother->maxgap = uvm_rb_maxgap(brother);
426 			if (maxgap < brother->maxgap)
427 				maxgap = brother->maxgap;
428 		}
429 
430 		parent->maxgap = maxgap;
431 		entry = parent;
432 	}
433 }
434 
435 static void
436 uvm_rb_insert(struct vm_map *map, struct vm_map_entry *entry)
437 {
438 	struct vm_map_entry *ret __diagused;
439 
440 	entry->gap = entry->maxgap = uvm_rb_gap(entry);
441 	if (entry->prev != &map->header)
442 		entry->prev->gap = uvm_rb_gap(entry->prev);
443 
444 	ret = rb_tree_insert_node(&map->rb_tree, entry);
445 	KASSERTMSG(ret == entry,
446 	    "uvm_rb_insert: map %p: duplicate entry %p", map, ret);
447 
448 	/*
449 	 * If the previous entry is not our immediate left child, then it's an
450 	 * ancestor and will be fixed up on the way to the root.  We don't
451 	 * have to check entry->prev against &map->header since &map->header
452 	 * will never be in the tree.
453 	 */
454 	uvm_rb_fixup(map,
455 	    LEFT_ENTRY(entry) == entry->prev ? entry->prev : entry);
456 }
457 
458 static void
459 uvm_rb_remove(struct vm_map *map, struct vm_map_entry *entry)
460 {
461 	struct vm_map_entry *prev_parent = NULL, *next_parent = NULL;
462 
463 	/*
464 	 * If we are removing an interior node, then an adjacent node will
465 	 * be used to replace its position in the tree.  Therefore we will
466 	 * need to fixup the tree starting at the parent of the replacement
467 	 * node.  So record their parents for later use.
468 	 */
469 	if (entry->prev != &map->header)
470 		prev_parent = PARENT_ENTRY(map, entry->prev);
471 	if (entry->next != &map->header)
472 		next_parent = PARENT_ENTRY(map, entry->next);
473 
474 	rb_tree_remove_node(&map->rb_tree, entry);
475 
476 	/*
477 	 * If the previous node has a new parent, fixup the tree starting
478 	 * at the previous node's old parent.
479 	 */
480 	if (entry->prev != &map->header) {
481 		/*
482 		 * Update the previous entry's gap due to our absence.
483 		 */
484 		entry->prev->gap = uvm_rb_gap(entry->prev);
485 		uvm_rb_fixup(map, entry->prev);
486 		if (prev_parent != NULL
487 		    && prev_parent != entry
488 		    && prev_parent != PARENT_ENTRY(map, entry->prev))
489 			uvm_rb_fixup(map, prev_parent);
490 	}
491 
492 	/*
493 	 * If the next node has a new parent, fixup the tree starting
494 	 * at the next node's old parent.
495 	 */
496 	if (entry->next != &map->header) {
497 		uvm_rb_fixup(map, entry->next);
498 		if (next_parent != NULL
499 		    && next_parent != entry
500 		    && next_parent != PARENT_ENTRY(map, entry->next))
501 			uvm_rb_fixup(map, next_parent);
502 	}
503 }
504 
505 #if defined(DEBUG)
506 int uvm_debug_check_map = 0;
507 int uvm_debug_check_rbtree = 0;
508 #define uvm_map_check(map, name) \
509 	_uvm_map_check((map), (name), __FILE__, __LINE__)
510 static void
511 _uvm_map_check(struct vm_map *map, const char *name,
512     const char *file, int line)
513 {
514 
515 	if ((uvm_debug_check_map && _uvm_map_sanity(map)) ||
516 	    (uvm_debug_check_rbtree && _uvm_tree_sanity(map))) {
517 		panic("uvm_map_check failed: \"%s\" map=%p (%s:%d)",
518 		    name, map, file, line);
519 	}
520 }
521 #else /* defined(DEBUG) */
522 #define uvm_map_check(map, name)	/* nothing */
523 #endif /* defined(DEBUG) */
524 
525 #if defined(DEBUG) || defined(DDB)
526 int
527 _uvm_map_sanity(struct vm_map *map)
528 {
529 	bool first_free_found = false;
530 	bool hint_found = false;
531 	const struct vm_map_entry *e;
532 	struct vm_map_entry *hint = map->hint;
533 
534 	e = &map->header;
535 	for (;;) {
536 		if (map->first_free == e) {
537 			first_free_found = true;
538 		} else if (!first_free_found && e->next->start > e->end) {
539 			printf("first_free %p should be %p\n",
540 			    map->first_free, e);
541 			return -1;
542 		}
543 		if (hint == e) {
544 			hint_found = true;
545 		}
546 
547 		e = e->next;
548 		if (e == &map->header) {
549 			break;
550 		}
551 	}
552 	if (!first_free_found) {
553 		printf("stale first_free\n");
554 		return -1;
555 	}
556 	if (!hint_found) {
557 		printf("stale hint\n");
558 		return -1;
559 	}
560 	return 0;
561 }
562 
563 int
564 _uvm_tree_sanity(struct vm_map *map)
565 {
566 	struct vm_map_entry *tmp, *trtmp;
567 	int n = 0, i = 1;
568 
569 	for (tmp = map->header.next; tmp != &map->header; tmp = tmp->next) {
570 		if (tmp->gap != uvm_rb_gap(tmp)) {
571 			printf("%d/%d gap %#lx != %#lx %s\n",
572 			    n + 1, map->nentries,
573 			    (ulong)tmp->gap, (ulong)uvm_rb_gap(tmp),
574 			    tmp->next == &map->header ? "(last)" : "");
575 			goto error;
576 		}
577 		/*
578 		 * If any entries are out of order, tmp->gap will be unsigned
579 		 * and will likely exceed the size of the map.
580 		 */
581 		if (tmp->gap >= vm_map_max(map) - vm_map_min(map)) {
582 			printf("too large gap %zu\n", (size_t)tmp->gap);
583 			goto error;
584 		}
585 		n++;
586 	}
587 
588 	if (n != map->nentries) {
589 		printf("nentries: %d vs %d\n", n, map->nentries);
590 		goto error;
591 	}
592 
593 	trtmp = NULL;
594 	for (tmp = map->header.next; tmp != &map->header; tmp = tmp->next) {
595 		if (tmp->maxgap != uvm_rb_maxgap(tmp)) {
596 			printf("maxgap %#lx != %#lx\n",
597 			    (ulong)tmp->maxgap,
598 			    (ulong)uvm_rb_maxgap(tmp));
599 			goto error;
600 		}
601 		if (trtmp != NULL && trtmp->start >= tmp->start) {
602 			printf("corrupt: 0x%"PRIxVADDR"x >= 0x%"PRIxVADDR"x\n",
603 			    trtmp->start, tmp->start);
604 			goto error;
605 		}
606 
607 		trtmp = tmp;
608 	}
609 
610 	for (tmp = map->header.next; tmp != &map->header;
611 	    tmp = tmp->next, i++) {
612 		trtmp = rb_tree_iterate(&map->rb_tree, tmp, RB_DIR_LEFT);
613 		if (trtmp == NULL)
614 			trtmp = &map->header;
615 		if (tmp->prev != trtmp) {
616 			printf("lookup: %d: %p->prev=%p: %p\n",
617 			    i, tmp, tmp->prev, trtmp);
618 			goto error;
619 		}
620 		trtmp = rb_tree_iterate(&map->rb_tree, tmp, RB_DIR_RIGHT);
621 		if (trtmp == NULL)
622 			trtmp = &map->header;
623 		if (tmp->next != trtmp) {
624 			printf("lookup: %d: %p->next=%p: %p\n",
625 			    i, tmp, tmp->next, trtmp);
626 			goto error;
627 		}
628 		trtmp = rb_tree_find_node(&map->rb_tree, &tmp->start);
629 		if (trtmp != tmp) {
630 			printf("lookup: %d: %p - %p: %p\n", i, tmp, trtmp,
631 			    PARENT_ENTRY(map, tmp));
632 			goto error;
633 		}
634 	}
635 
636 	return (0);
637  error:
638 	return (-1);
639 }
640 #endif /* defined(DEBUG) || defined(DDB) */
641 
642 /*
643  * vm_map_lock: acquire an exclusive (write) lock on a map.
644  *
645  * => The locking protocol provides for guaranteed upgrade from shared ->
646  *    exclusive by whichever thread currently has the map marked busy.
647  *    See "LOCKING PROTOCOL NOTES" in uvm_map.h.  This is horrible; among
648  *    other problems, it defeats any fairness guarantees provided by RW
649  *    locks.
650  */
651 
652 void
653 vm_map_lock(struct vm_map *map)
654 {
655 
656 	for (;;) {
657 		rw_enter(&map->lock, RW_WRITER);
658 		if (map->busy == NULL || map->busy == curlwp) {
659 			break;
660 		}
661 		mutex_enter(&map->misc_lock);
662 		rw_exit(&map->lock);
663 		if (map->busy != NULL) {
664 			cv_wait(&map->cv, &map->misc_lock);
665 		}
666 		mutex_exit(&map->misc_lock);
667 	}
668 	map->timestamp++;
669 }
670 
671 /*
672  * vm_map_lock_try: try to lock a map, failing if it is already locked.
673  */
674 
675 bool
676 vm_map_lock_try(struct vm_map *map)
677 {
678 
679 	if (!rw_tryenter(&map->lock, RW_WRITER)) {
680 		return false;
681 	}
682 	if (map->busy != NULL) {
683 		rw_exit(&map->lock);
684 		return false;
685 	}
686 	map->timestamp++;
687 	return true;
688 }
689 
690 /*
691  * vm_map_unlock: release an exclusive lock on a map.
692  */
693 
694 void
695 vm_map_unlock(struct vm_map *map)
696 {
697 
698 	KASSERT(rw_write_held(&map->lock));
699 	KASSERT(map->busy == NULL || map->busy == curlwp);
700 	rw_exit(&map->lock);
701 }
702 
703 /*
704  * vm_map_unbusy: mark the map as unbusy, and wake any waiters that
705  *     want an exclusive lock.
706  */
707 
708 void
709 vm_map_unbusy(struct vm_map *map)
710 {
711 
712 	KASSERT(map->busy == curlwp);
713 
714 	/*
715 	 * Safe to clear 'busy' and 'waiters' with only a read lock held:
716 	 *
717 	 * o they can only be set with a write lock held
718 	 * o writers are blocked out with a read or write hold
719 	 * o at any time, only one thread owns the set of values
720 	 */
721 	mutex_enter(&map->misc_lock);
722 	map->busy = NULL;
723 	cv_broadcast(&map->cv);
724 	mutex_exit(&map->misc_lock);
725 }
726 
727 /*
728  * vm_map_lock_read: acquire a shared (read) lock on a map.
729  */
730 
731 void
732 vm_map_lock_read(struct vm_map *map)
733 {
734 
735 	rw_enter(&map->lock, RW_READER);
736 }
737 
738 /*
739  * vm_map_unlock_read: release a shared lock on a map.
740  */
741 
742 void
743 vm_map_unlock_read(struct vm_map *map)
744 {
745 
746 	rw_exit(&map->lock);
747 }
748 
749 /*
750  * vm_map_busy: mark a map as busy.
751  *
752  * => the caller must hold the map write locked
753  */
754 
755 void
756 vm_map_busy(struct vm_map *map)
757 {
758 
759 	KASSERT(rw_write_held(&map->lock));
760 	KASSERT(map->busy == NULL);
761 
762 	map->busy = curlwp;
763 }
764 
765 /*
766  * vm_map_locked_p: return true if the map is write locked.
767  *
768  * => only for debug purposes like KASSERTs.
769  * => should not be used to verify that a map is not locked.
770  */
771 
772 bool
773 vm_map_locked_p(struct vm_map *map)
774 {
775 
776 	return rw_write_held(&map->lock);
777 }
778 
779 /*
780  * uvm_mapent_alloc: allocate a map entry
781  */
782 
783 static struct vm_map_entry *
784 uvm_mapent_alloc(struct vm_map *map, int flags)
785 {
786 	struct vm_map_entry *me;
787 	int pflags = (flags & UVM_FLAG_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
788 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
789 
790 	me = pool_cache_get(&uvm_map_entry_cache, pflags);
791 	if (__predict_false(me == NULL)) {
792 		return NULL;
793 	}
794 	me->flags = 0;
795 
796 	UVMHIST_LOG(maphist, "<- new entry=%#jx [kentry=%jd]", (uintptr_t)me,
797 	    (map == kernel_map), 0, 0);
798 	return me;
799 }
800 
801 /*
802  * uvm_mapent_free: free map entry
803  */
804 
805 static void
806 uvm_mapent_free(struct vm_map_entry *me)
807 {
808 	UVMHIST_FUNC(__func__);
809 	UVMHIST_CALLARGS(maphist,"<- freeing map entry=%#jx [flags=%jd]",
810 		(uintptr_t)me, me->flags, 0, 0);
811 	pool_cache_put(&uvm_map_entry_cache, me);
812 }
813 
814 /*
815  * uvm_mapent_copy: copy a map entry, preserving flags
816  */
817 
818 static inline void
819 uvm_mapent_copy(struct vm_map_entry *src, struct vm_map_entry *dst)
820 {
821 
822 	memcpy(dst, src, sizeof(*dst));
823 	dst->flags = 0;
824 }
825 
826 #if defined(DEBUG)
827 static void
828 _uvm_mapent_check(const struct vm_map_entry *entry, int line)
829 {
830 
831 	if (entry->start >= entry->end) {
832 		goto bad;
833 	}
834 	if (UVM_ET_ISOBJ(entry)) {
835 		if (entry->object.uvm_obj == NULL) {
836 			goto bad;
837 		}
838 	} else if (UVM_ET_ISSUBMAP(entry)) {
839 		if (entry->object.sub_map == NULL) {
840 			goto bad;
841 		}
842 	} else {
843 		if (entry->object.uvm_obj != NULL ||
844 		    entry->object.sub_map != NULL) {
845 			goto bad;
846 		}
847 	}
848 	if (!UVM_ET_ISOBJ(entry)) {
849 		if (entry->offset != 0) {
850 			goto bad;
851 		}
852 	}
853 
854 	return;
855 
856 bad:
857 	panic("%s: bad entry %p, line %d", __func__, entry, line);
858 }
859 #endif /* defined(DEBUG) */
860 
861 /*
862  * uvm_map_entry_unwire: unwire a map entry
863  *
864  * => map should be locked by caller
865  */
866 
867 static inline void
868 uvm_map_entry_unwire(struct vm_map *map, struct vm_map_entry *entry)
869 {
870 
871 	entry->wired_count = 0;
872 	uvm_fault_unwire_locked(map, entry->start, entry->end);
873 }
874 
875 
876 /*
877  * wrapper for calling amap_ref()
878  */
879 static inline void
880 uvm_map_reference_amap(struct vm_map_entry *entry, int flags)
881 {
882 
883 	amap_ref(entry->aref.ar_amap, entry->aref.ar_pageoff,
884 	    (entry->end - entry->start) >> PAGE_SHIFT, flags);
885 }
886 
887 
888 /*
889  * wrapper for calling amap_unref()
890  */
891 static inline void
892 uvm_map_unreference_amap(struct vm_map_entry *entry, int flags)
893 {
894 
895 	amap_unref(entry->aref.ar_amap, entry->aref.ar_pageoff,
896 	    (entry->end - entry->start) >> PAGE_SHIFT, flags);
897 }
898 
899 
900 /*
901  * uvm_map_init: init mapping system at boot time.
902  */
903 
904 void
905 uvm_map_init(void)
906 {
907 #if defined(UVMHIST)
908 	static struct kern_history_ent pdhistbuf[UVMHIST_PDHIST_SIZE];
909 #endif
910 
911 	/*
912 	 * first, init logging system.
913 	 */
914 
915 	UVMHIST_FUNC(__func__);
916 	UVMHIST_LINK_STATIC(maphist);
917 	UVMHIST_INIT_STATIC(pdhist, pdhistbuf);
918 	UVMHIST_CALLED(maphist);
919 	UVMHIST_LOG(maphist,"<starting uvm map system>", 0, 0, 0, 0);
920 
921 	/*
922 	 * initialize the global lock for kernel map entry.
923 	 */
924 
925 	mutex_init(&uvm_kentry_lock, MUTEX_DRIVER, IPL_VM);
926 }
927 
928 /*
929  * uvm_map_init_caches: init mapping system caches.
930  */
931 void
932 uvm_map_init_caches(void)
933 {
934 	/*
935 	 * initialize caches.
936 	 */
937 
938 	pool_cache_bootstrap(&uvm_map_entry_cache, sizeof(struct vm_map_entry),
939 	    coherency_unit, 0, PR_LARGECACHE, "vmmpepl", NULL, IPL_NONE, NULL,
940 	    NULL, NULL);
941 	pool_cache_bootstrap(&uvm_vmspace_cache, sizeof(struct vmspace),
942 	    0, 0, 0, "vmsppl", NULL, IPL_NONE, NULL, NULL, NULL);
943 }
944 
945 /*
946  * clippers
947  */
948 
949 /*
950  * uvm_mapent_splitadj: adjust map entries for splitting, after uvm_mapent_copy.
951  */
952 
953 static void
954 uvm_mapent_splitadj(struct vm_map_entry *entry1, struct vm_map_entry *entry2,
955     vaddr_t splitat)
956 {
957 	vaddr_t adj;
958 
959 	KASSERT(entry1->start < splitat);
960 	KASSERT(splitat < entry1->end);
961 
962 	adj = splitat - entry1->start;
963 	entry1->end = entry2->start = splitat;
964 
965 	if (entry1->aref.ar_amap) {
966 		amap_splitref(&entry1->aref, &entry2->aref, adj);
967 	}
968 	if (UVM_ET_ISSUBMAP(entry1)) {
969 		/* ... unlikely to happen, but play it safe */
970 		 uvm_map_reference(entry1->object.sub_map);
971 	} else if (UVM_ET_ISOBJ(entry1)) {
972 		KASSERT(entry1->object.uvm_obj != NULL); /* suppress coverity */
973 		entry2->offset += adj;
974 		if (entry1->object.uvm_obj->pgops &&
975 		    entry1->object.uvm_obj->pgops->pgo_reference)
976 			entry1->object.uvm_obj->pgops->pgo_reference(
977 			    entry1->object.uvm_obj);
978 	}
979 }
980 
981 /*
982  * uvm_map_clip_start: ensure that the entry begins at or after
983  *	the starting address, if it doesn't we split the entry.
984  *
985  * => caller should use UVM_MAP_CLIP_START macro rather than calling
986  *    this directly
987  * => map must be locked by caller
988  */
989 
990 void
991 uvm_map_clip_start(struct vm_map *map, struct vm_map_entry *entry,
992     vaddr_t start)
993 {
994 	struct vm_map_entry *new_entry;
995 
996 	/* uvm_map_simplify_entry(map, entry); */ /* XXX */
997 
998 	uvm_map_check(map, "clip_start entry");
999 	uvm_mapent_check(entry);
1000 
1001 	/*
1002 	 * Split off the front portion.  note that we must insert the new
1003 	 * entry BEFORE this one, so that this entry has the specified
1004 	 * starting address.
1005 	 */
1006 	new_entry = uvm_mapent_alloc(map, 0);
1007 	uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
1008 	uvm_mapent_splitadj(new_entry, entry, start);
1009 	uvm_map_entry_link(map, entry->prev, new_entry);
1010 
1011 	uvm_map_check(map, "clip_start leave");
1012 }
1013 
1014 /*
1015  * uvm_map_clip_end: ensure that the entry ends at or before
1016  *	the ending address, if it does't we split the reference
1017  *
1018  * => caller should use UVM_MAP_CLIP_END macro rather than calling
1019  *    this directly
1020  * => map must be locked by caller
1021  */
1022 
1023 void
1024 uvm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, vaddr_t end)
1025 {
1026 	struct vm_map_entry *new_entry;
1027 
1028 	uvm_map_check(map, "clip_end entry");
1029 	uvm_mapent_check(entry);
1030 
1031 	/*
1032 	 *	Create a new entry and insert it
1033 	 *	AFTER the specified entry
1034 	 */
1035 	new_entry = uvm_mapent_alloc(map, 0);
1036 	uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
1037 	uvm_mapent_splitadj(entry, new_entry, end);
1038 	uvm_map_entry_link(map, entry, new_entry);
1039 
1040 	uvm_map_check(map, "clip_end leave");
1041 }
1042 
1043 /*
1044  *   M A P   -   m a i n   e n t r y   p o i n t
1045  */
1046 /*
1047  * uvm_map: establish a valid mapping in a map
1048  *
1049  * => assume startp is page aligned.
1050  * => assume size is a multiple of PAGE_SIZE.
1051  * => assume sys_mmap provides enough of a "hint" to have us skip
1052  *	over text/data/bss area.
1053  * => map must be unlocked (we will lock it)
1054  * => <uobj,uoffset> value meanings (4 cases):
1055  *	 [1] <NULL,uoffset>		== uoffset is a hint for PMAP_PREFER
1056  *	 [2] <NULL,UVM_UNKNOWN_OFFSET>	== don't PMAP_PREFER
1057  *	 [3] <uobj,uoffset>		== normal mapping
1058  *	 [4] <uobj,UVM_UNKNOWN_OFFSET>	== uvm_map finds offset based on VA
1059  *
1060  *    case [4] is for kernel mappings where we don't know the offset until
1061  *    we've found a virtual address.   note that kernel object offsets are
1062  *    always relative to vm_map_min(kernel_map).
1063  *
1064  * => if `align' is non-zero, we align the virtual address to the specified
1065  *	alignment.
1066  *	this is provided as a mechanism for large pages.
1067  *
1068  * => XXXCDC: need way to map in external amap?
1069  */
1070 
1071 int
1072 uvm_map(struct vm_map *map, vaddr_t *startp /* IN/OUT */, vsize_t size,
1073     struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags)
1074 {
1075 	struct uvm_map_args args;
1076 	struct vm_map_entry *new_entry;
1077 	int error;
1078 
1079 	KASSERT((size & PAGE_MASK) == 0);
1080 	KASSERT((flags & UVM_FLAG_FIXED) == 0 || align == 0);
1081 
1082 	/*
1083 	 * for pager_map, allocate the new entry first to avoid sleeping
1084 	 * for memory while we have the map locked.
1085 	 */
1086 
1087 	new_entry = NULL;
1088 	if (map == pager_map) {
1089 		new_entry = uvm_mapent_alloc(map, (flags & UVM_FLAG_NOWAIT));
1090 		if (__predict_false(new_entry == NULL))
1091 			return ENOMEM;
1092 	}
1093 	if (map == pager_map)
1094 		flags |= UVM_FLAG_NOMERGE;
1095 
1096 	error = uvm_map_prepare(map, *startp, size, uobj, uoffset, align,
1097 	    flags, &args);
1098 	if (!error) {
1099 		error = uvm_map_enter(map, &args, new_entry);
1100 		*startp = args.uma_start;
1101 	} else if (new_entry) {
1102 		uvm_mapent_free(new_entry);
1103 	}
1104 
1105 #if defined(DEBUG)
1106 	if (!error && VM_MAP_IS_KERNEL(map) && (flags & UVM_FLAG_NOWAIT) == 0) {
1107 		uvm_km_check_empty(map, *startp, *startp + size);
1108 	}
1109 #endif /* defined(DEBUG) */
1110 
1111 	return error;
1112 }
1113 
1114 /*
1115  * uvm_map_prepare:
1116  *
1117  * called with map unlocked.
1118  * on success, returns the map locked.
1119  */
1120 
1121 int
1122 uvm_map_prepare(struct vm_map *map, vaddr_t start, vsize_t size,
1123     struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags,
1124     struct uvm_map_args *args)
1125 {
1126 	struct vm_map_entry *prev_entry;
1127 	vm_prot_t prot = UVM_PROTECTION(flags);
1128 	vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
1129 
1130 	UVMHIST_FUNC(__func__);
1131 	UVMHIST_CALLARGS(maphist, "(map=%#jx, start=%#jx, size=%ju, flags=%#jx)",
1132 	    (uintptr_t)map, start, size, flags);
1133 	UVMHIST_LOG(maphist, "  uobj/offset %#jx/%jd", (uintptr_t)uobj,
1134 	    uoffset,0,0);
1135 
1136 	/*
1137 	 * detect a popular device driver bug.
1138 	 */
1139 
1140 	KASSERT(doing_shutdown || curlwp != NULL);
1141 
1142 	/*
1143 	 * zero-sized mapping doesn't make any sense.
1144 	 */
1145 	KASSERT(size > 0);
1146 
1147 	KASSERT((~flags & (UVM_FLAG_NOWAIT | UVM_FLAG_WAITVA)) != 0);
1148 
1149 	uvm_map_check(map, "map entry");
1150 
1151 	/*
1152 	 * check sanity of protection code
1153 	 */
1154 
1155 	if ((prot & maxprot) != prot) {
1156 		UVMHIST_LOG(maphist, "<- prot. failure:  prot=%#jx, max=%#jx",
1157 		prot, maxprot,0,0);
1158 		return EACCES;
1159 	}
1160 
1161 	/*
1162 	 * figure out where to put new VM range
1163 	 */
1164 retry:
1165 	if (vm_map_lock_try(map) == false) {
1166 		if ((flags & UVM_FLAG_TRYLOCK) != 0) {
1167 			return EAGAIN;
1168 		}
1169 		vm_map_lock(map); /* could sleep here */
1170 	}
1171 	if (flags & UVM_FLAG_UNMAP) {
1172 		KASSERT(flags & UVM_FLAG_FIXED);
1173 		KASSERT((flags & UVM_FLAG_NOWAIT) == 0);
1174 
1175 		/*
1176 		 * Set prev_entry to what it will need to be after any existing
1177 		 * entries are removed later in uvm_map_enter().
1178 		 */
1179 
1180 		if (uvm_map_lookup_entry(map, start, &prev_entry)) {
1181 			if (start == prev_entry->start)
1182 				prev_entry = prev_entry->prev;
1183 			else
1184 				UVM_MAP_CLIP_END(map, prev_entry, start);
1185 			SAVE_HINT(map, map->hint, prev_entry);
1186 		}
1187 	} else {
1188 		prev_entry = uvm_map_findspace(map, start, size, &start,
1189 		    uobj, uoffset, align, flags);
1190 	}
1191 	if (prev_entry == NULL) {
1192 		unsigned int timestamp;
1193 
1194 		timestamp = map->timestamp;
1195 		UVMHIST_LOG(maphist,"waiting va timestamp=%#jx",
1196 			    timestamp,0,0,0);
1197 		map->flags |= VM_MAP_WANTVA;
1198 		vm_map_unlock(map);
1199 
1200 		/*
1201 		 * try to reclaim kva and wait until someone does unmap.
1202 		 * fragile locking here, so we awaken every second to
1203 		 * recheck the condition.
1204 		 */
1205 
1206 		mutex_enter(&map->misc_lock);
1207 		while ((map->flags & VM_MAP_WANTVA) != 0 &&
1208 		   map->timestamp == timestamp) {
1209 			if ((flags & UVM_FLAG_WAITVA) == 0) {
1210 				mutex_exit(&map->misc_lock);
1211 				UVMHIST_LOG(maphist,
1212 				    "<- uvm_map_findspace failed!", 0,0,0,0);
1213 				return ENOMEM;
1214 			} else {
1215 				cv_timedwait(&map->cv, &map->misc_lock, hz);
1216 			}
1217 		}
1218 		mutex_exit(&map->misc_lock);
1219 		goto retry;
1220 	}
1221 
1222 #ifdef PMAP_GROWKERNEL
1223 	/*
1224 	 * If the kernel pmap can't map the requested space,
1225 	 * then allocate more resources for it.
1226 	 */
1227 	if (map == kernel_map && uvm_maxkaddr < (start + size))
1228 		uvm_maxkaddr = pmap_growkernel(start + size);
1229 #endif
1230 
1231 	UVMMAP_EVCNT_INCR(map_call);
1232 
1233 	/*
1234 	 * if uobj is null, then uoffset is either a VAC hint for PMAP_PREFER
1235 	 * [typically from uvm_map_reserve] or it is UVM_UNKNOWN_OFFSET.   in
1236 	 * either case we want to zero it  before storing it in the map entry
1237 	 * (because it looks strange and confusing when debugging...)
1238 	 *
1239 	 * if uobj is not null
1240 	 *   if uoffset is not UVM_UNKNOWN_OFFSET then we have a normal mapping
1241 	 *      and we do not need to change uoffset.
1242 	 *   if uoffset is UVM_UNKNOWN_OFFSET then we need to find the offset
1243 	 *      now (based on the starting address of the map).   this case is
1244 	 *      for kernel object mappings where we don't know the offset until
1245 	 *      the virtual address is found (with uvm_map_findspace).   the
1246 	 *      offset is the distance we are from the start of the map.
1247 	 */
1248 
1249 	if (uobj == NULL) {
1250 		uoffset = 0;
1251 	} else {
1252 		if (uoffset == UVM_UNKNOWN_OFFSET) {
1253 			KASSERT(UVM_OBJ_IS_KERN_OBJECT(uobj));
1254 			uoffset = start - vm_map_min(kernel_map);
1255 		}
1256 	}
1257 
1258 	args->uma_flags = flags;
1259 	args->uma_prev = prev_entry;
1260 	args->uma_start = start;
1261 	args->uma_size = size;
1262 	args->uma_uobj = uobj;
1263 	args->uma_uoffset = uoffset;
1264 
1265 	UVMHIST_LOG(maphist, "<- done!", 0,0,0,0);
1266 	return 0;
1267 }
1268 
1269 /*
1270  * uvm_map_enter:
1271  *
1272  * called with map locked.
1273  * unlock the map before returning.
1274  */
1275 
1276 int
1277 uvm_map_enter(struct vm_map *map, const struct uvm_map_args *args,
1278     struct vm_map_entry *new_entry)
1279 {
1280 	struct vm_map_entry *prev_entry = args->uma_prev;
1281 	struct vm_map_entry *dead = NULL, *dead_entries = NULL;
1282 
1283 	const uvm_flag_t flags = args->uma_flags;
1284 	const vm_prot_t prot = UVM_PROTECTION(flags);
1285 	const vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
1286 	const vm_inherit_t inherit = UVM_INHERIT(flags);
1287 	const int amapwaitflag = (flags & UVM_FLAG_NOWAIT) ?
1288 	    AMAP_EXTEND_NOWAIT : 0;
1289 	const int advice = UVM_ADVICE(flags);
1290 
1291 	vaddr_t start = args->uma_start;
1292 	vsize_t size = args->uma_size;
1293 	struct uvm_object *uobj = args->uma_uobj;
1294 	voff_t uoffset = args->uma_uoffset;
1295 
1296 	const int kmap = (vm_map_pmap(map) == pmap_kernel());
1297 	int merged = 0;
1298 	int error;
1299 	int newetype;
1300 
1301 	UVMHIST_FUNC(__func__);
1302 	UVMHIST_CALLARGS(maphist, "(map=%#jx, start=%#jx, size=%ju, flags=%#jx)",
1303 	    (uintptr_t)map, start, size, flags);
1304 	UVMHIST_LOG(maphist, "  uobj/offset %#jx/%jd", (uintptr_t)uobj,
1305 	    uoffset,0,0);
1306 
1307 	KASSERT(map->hint == prev_entry); /* bimerge case assumes this */
1308 	KASSERT(vm_map_locked_p(map));
1309 	KASSERT((flags & (UVM_FLAG_NOWAIT | UVM_FLAG_UNMAP)) !=
1310 		(UVM_FLAG_NOWAIT | UVM_FLAG_UNMAP));
1311 
1312 	if (uobj)
1313 		newetype = UVM_ET_OBJ;
1314 	else
1315 		newetype = 0;
1316 
1317 	if (flags & UVM_FLAG_COPYONW) {
1318 		newetype |= UVM_ET_COPYONWRITE;
1319 		if ((flags & UVM_FLAG_OVERLAY) == 0)
1320 			newetype |= UVM_ET_NEEDSCOPY;
1321 	}
1322 
1323 	/*
1324 	 * For mappings with unmap, remove any old entries now.  Adding the new
1325 	 * entry cannot fail because that can only happen if UVM_FLAG_NOWAIT
1326 	 * is set, and we do not support nowait and unmap together.
1327 	 */
1328 
1329 	if (flags & UVM_FLAG_UNMAP) {
1330 		KASSERT(flags & UVM_FLAG_FIXED);
1331 		uvm_unmap_remove(map, start, start + size, &dead_entries, 0);
1332 #ifdef DEBUG
1333 		struct vm_map_entry *tmp_entry __diagused;
1334 		bool rv __diagused;
1335 
1336 		rv = uvm_map_lookup_entry(map, start, &tmp_entry);
1337 		KASSERT(!rv);
1338 		KASSERTMSG(prev_entry == tmp_entry,
1339 			   "args %p prev_entry %p tmp_entry %p",
1340 			   args, prev_entry, tmp_entry);
1341 #endif
1342 		SAVE_HINT(map, map->hint, prev_entry);
1343 	}
1344 
1345 	/*
1346 	 * try and insert in map by extending previous entry, if possible.
1347 	 * XXX: we don't try and pull back the next entry.   might be useful
1348 	 * for a stack, but we are currently allocating our stack in advance.
1349 	 */
1350 
1351 	if (flags & UVM_FLAG_NOMERGE)
1352 		goto nomerge;
1353 
1354 	if (prev_entry->end == start &&
1355 	    prev_entry != &map->header &&
1356 	    UVM_ET_ISCOMPATIBLE(prev_entry, newetype, uobj, 0,
1357 	    prot, maxprot, inherit, advice, 0)) {
1358 
1359 		if (uobj && prev_entry->offset +
1360 		    (prev_entry->end - prev_entry->start) != uoffset)
1361 			goto forwardmerge;
1362 
1363 		/*
1364 		 * can't extend a shared amap.  note: no need to lock amap to
1365 		 * look at refs since we don't care about its exact value.
1366 		 * if it is one (i.e. we have only reference) it will stay there
1367 		 */
1368 
1369 		if (prev_entry->aref.ar_amap &&
1370 		    amap_refs(prev_entry->aref.ar_amap) != 1) {
1371 			goto forwardmerge;
1372 		}
1373 
1374 		if (prev_entry->aref.ar_amap) {
1375 			error = amap_extend(prev_entry, size,
1376 			    amapwaitflag | AMAP_EXTEND_FORWARDS);
1377 			if (error)
1378 				goto nomerge;
1379 		}
1380 
1381 		if (kmap) {
1382 			UVMMAP_EVCNT_INCR(kbackmerge);
1383 		} else {
1384 			UVMMAP_EVCNT_INCR(ubackmerge);
1385 		}
1386 		UVMHIST_LOG(maphist,"  starting back merge", 0, 0, 0, 0);
1387 
1388 		/*
1389 		 * drop our reference to uobj since we are extending a reference
1390 		 * that we already have (the ref count can not drop to zero).
1391 		 */
1392 
1393 		if (uobj && uobj->pgops->pgo_detach)
1394 			uobj->pgops->pgo_detach(uobj);
1395 
1396 		/*
1397 		 * Now that we've merged the entries, note that we've grown
1398 		 * and our gap has shrunk.  Then fix the tree.
1399 		 */
1400 		prev_entry->end += size;
1401 		prev_entry->gap -= size;
1402 		uvm_rb_fixup(map, prev_entry);
1403 
1404 		uvm_map_check(map, "map backmerged");
1405 
1406 		UVMHIST_LOG(maphist,"<- done (via backmerge)!", 0, 0, 0, 0);
1407 		merged++;
1408 	}
1409 
1410 forwardmerge:
1411 	if (prev_entry->next->start == (start + size) &&
1412 	    prev_entry->next != &map->header &&
1413 	    UVM_ET_ISCOMPATIBLE(prev_entry->next, newetype, uobj, 0,
1414 	    prot, maxprot, inherit, advice, 0)) {
1415 
1416 		if (uobj && prev_entry->next->offset != uoffset + size)
1417 			goto nomerge;
1418 
1419 		/*
1420 		 * can't extend a shared amap.  note: no need to lock amap to
1421 		 * look at refs since we don't care about its exact value.
1422 		 * if it is one (i.e. we have only reference) it will stay there.
1423 		 *
1424 		 * note that we also can't merge two amaps, so if we
1425 		 * merged with the previous entry which has an amap,
1426 		 * and the next entry also has an amap, we give up.
1427 		 *
1428 		 * Interesting cases:
1429 		 * amap, new, amap -> give up second merge (single fwd extend)
1430 		 * amap, new, none -> double forward extend (extend again here)
1431 		 * none, new, amap -> double backward extend (done here)
1432 		 * uobj, new, amap -> single backward extend (done here)
1433 		 *
1434 		 * XXX should we attempt to deal with someone refilling
1435 		 * the deallocated region between two entries that are
1436 		 * backed by the same amap (ie, arefs is 2, "prev" and
1437 		 * "next" refer to it, and adding this allocation will
1438 		 * close the hole, thus restoring arefs to 1 and
1439 		 * deallocating the "next" vm_map_entry)?  -- @@@
1440 		 */
1441 
1442 		if (prev_entry->next->aref.ar_amap &&
1443 		    (amap_refs(prev_entry->next->aref.ar_amap) != 1 ||
1444 		     (merged && prev_entry->aref.ar_amap))) {
1445 			goto nomerge;
1446 		}
1447 
1448 		if (merged) {
1449 			/*
1450 			 * Try to extend the amap of the previous entry to
1451 			 * cover the next entry as well.  If it doesn't work
1452 			 * just skip on, don't actually give up, since we've
1453 			 * already completed the back merge.
1454 			 */
1455 			if (prev_entry->aref.ar_amap) {
1456 				if (amap_extend(prev_entry,
1457 				    prev_entry->next->end -
1458 				    prev_entry->next->start,
1459 				    amapwaitflag | AMAP_EXTEND_FORWARDS))
1460 					goto nomerge;
1461 			}
1462 
1463 			/*
1464 			 * Try to extend the amap of the *next* entry
1465 			 * back to cover the new allocation *and* the
1466 			 * previous entry as well (the previous merge
1467 			 * didn't have an amap already otherwise we
1468 			 * wouldn't be checking here for an amap).  If
1469 			 * it doesn't work just skip on, again, don't
1470 			 * actually give up, since we've already
1471 			 * completed the back merge.
1472 			 */
1473 			else if (prev_entry->next->aref.ar_amap) {
1474 				if (amap_extend(prev_entry->next,
1475 				    prev_entry->end -
1476 				    prev_entry->start,
1477 				    amapwaitflag | AMAP_EXTEND_BACKWARDS))
1478 					goto nomerge;
1479 			}
1480 		} else {
1481 			/*
1482 			 * Pull the next entry's amap backwards to cover this
1483 			 * new allocation.
1484 			 */
1485 			if (prev_entry->next->aref.ar_amap) {
1486 				error = amap_extend(prev_entry->next, size,
1487 				    amapwaitflag | AMAP_EXTEND_BACKWARDS);
1488 				if (error)
1489 					goto nomerge;
1490 			}
1491 		}
1492 
1493 		if (merged) {
1494 			if (kmap) {
1495 				UVMMAP_EVCNT_DECR(kbackmerge);
1496 				UVMMAP_EVCNT_INCR(kbimerge);
1497 			} else {
1498 				UVMMAP_EVCNT_DECR(ubackmerge);
1499 				UVMMAP_EVCNT_INCR(ubimerge);
1500 			}
1501 		} else {
1502 			if (kmap) {
1503 				UVMMAP_EVCNT_INCR(kforwmerge);
1504 			} else {
1505 				UVMMAP_EVCNT_INCR(uforwmerge);
1506 			}
1507 		}
1508 		UVMHIST_LOG(maphist,"  starting forward merge", 0, 0, 0, 0);
1509 
1510 		/*
1511 		 * drop our reference to uobj since we are extending a reference
1512 		 * that we already have (the ref count can not drop to zero).
1513 		 */
1514 		if (uobj && uobj->pgops->pgo_detach)
1515 			uobj->pgops->pgo_detach(uobj);
1516 
1517 		if (merged) {
1518 			dead = prev_entry->next;
1519 			prev_entry->end = dead->end;
1520 			uvm_map_entry_unlink(map, dead);
1521 			if (dead->aref.ar_amap != NULL) {
1522 				prev_entry->aref = dead->aref;
1523 				dead->aref.ar_amap = NULL;
1524 			}
1525 		} else {
1526 			prev_entry->next->start -= size;
1527 			if (prev_entry != &map->header) {
1528 				prev_entry->gap -= size;
1529 				KASSERT(prev_entry->gap == uvm_rb_gap(prev_entry));
1530 				uvm_rb_fixup(map, prev_entry);
1531 			}
1532 			if (uobj)
1533 				prev_entry->next->offset = uoffset;
1534 		}
1535 
1536 		uvm_map_check(map, "map forwardmerged");
1537 
1538 		UVMHIST_LOG(maphist,"<- done forwardmerge", 0, 0, 0, 0);
1539 		merged++;
1540 	}
1541 
1542 nomerge:
1543 	if (!merged) {
1544 		UVMHIST_LOG(maphist,"  allocating new map entry", 0, 0, 0, 0);
1545 		if (kmap) {
1546 			UVMMAP_EVCNT_INCR(knomerge);
1547 		} else {
1548 			UVMMAP_EVCNT_INCR(unomerge);
1549 		}
1550 
1551 		/*
1552 		 * allocate new entry and link it in.
1553 		 */
1554 
1555 		if (new_entry == NULL) {
1556 			new_entry = uvm_mapent_alloc(map,
1557 				(flags & UVM_FLAG_NOWAIT));
1558 			if (__predict_false(new_entry == NULL)) {
1559 				error = ENOMEM;
1560 				goto done;
1561 			}
1562 		}
1563 		new_entry->start = start;
1564 		new_entry->end = new_entry->start + size;
1565 		new_entry->object.uvm_obj = uobj;
1566 		new_entry->offset = uoffset;
1567 
1568 		new_entry->etype = newetype;
1569 
1570 		if (flags & UVM_FLAG_NOMERGE) {
1571 			new_entry->flags |= UVM_MAP_NOMERGE;
1572 		}
1573 
1574 		new_entry->protection = prot;
1575 		new_entry->max_protection = maxprot;
1576 		new_entry->inheritance = inherit;
1577 		new_entry->wired_count = 0;
1578 		new_entry->advice = advice;
1579 		if (flags & UVM_FLAG_OVERLAY) {
1580 
1581 			/*
1582 			 * to_add: for BSS we overallocate a little since we
1583 			 * are likely to extend
1584 			 */
1585 
1586 			vaddr_t to_add = (flags & UVM_FLAG_AMAPPAD) ?
1587 				UVM_AMAP_CHUNK << PAGE_SHIFT : 0;
1588 			struct vm_amap *amap = amap_alloc(size, to_add,
1589 			    (flags & UVM_FLAG_NOWAIT));
1590 			if (__predict_false(amap == NULL)) {
1591 				error = ENOMEM;
1592 				goto done;
1593 			}
1594 			new_entry->aref.ar_pageoff = 0;
1595 			new_entry->aref.ar_amap = amap;
1596 		} else {
1597 			new_entry->aref.ar_pageoff = 0;
1598 			new_entry->aref.ar_amap = NULL;
1599 		}
1600 		uvm_map_entry_link(map, prev_entry, new_entry);
1601 
1602 		/*
1603 		 * Update the free space hint
1604 		 */
1605 
1606 		if ((map->first_free == prev_entry) &&
1607 		    (prev_entry->end >= new_entry->start))
1608 			map->first_free = new_entry;
1609 
1610 		new_entry = NULL;
1611 	}
1612 
1613 	map->size += size;
1614 
1615 	UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
1616 
1617 	error = 0;
1618 
1619 done:
1620 	vm_map_unlock(map);
1621 
1622 	if (new_entry) {
1623 		uvm_mapent_free(new_entry);
1624 	}
1625 	if (dead) {
1626 		KDASSERT(merged);
1627 		uvm_mapent_free(dead);
1628 	}
1629 	if (dead_entries)
1630 		uvm_unmap_detach(dead_entries, 0);
1631 
1632 	return error;
1633 }
1634 
1635 /*
1636  * uvm_map_lookup_entry_bytree: lookup an entry in tree
1637  */
1638 
1639 static inline bool
1640 uvm_map_lookup_entry_bytree(struct vm_map *map, vaddr_t address,
1641     struct vm_map_entry **entry	/* OUT */)
1642 {
1643 	struct vm_map_entry *prev = &map->header;
1644 	struct vm_map_entry *cur = ROOT_ENTRY(map);
1645 
1646 	while (cur) {
1647 		UVMMAP_EVCNT_INCR(mlk_treeloop);
1648 		if (address >= cur->start) {
1649 			if (address < cur->end) {
1650 				*entry = cur;
1651 				return true;
1652 			}
1653 			prev = cur;
1654 			cur = RIGHT_ENTRY(cur);
1655 		} else
1656 			cur = LEFT_ENTRY(cur);
1657 	}
1658 	*entry = prev;
1659 	return false;
1660 }
1661 
1662 /*
1663  * uvm_map_lookup_entry: find map entry at or before an address
1664  *
1665  * => map must at least be read-locked by caller
1666  * => entry is returned in "entry"
1667  * => return value is true if address is in the returned entry
1668  */
1669 
1670 bool
1671 uvm_map_lookup_entry(struct vm_map *map, vaddr_t address,
1672     struct vm_map_entry **entry	/* OUT */)
1673 {
1674 	struct vm_map_entry *cur;
1675 	UVMHIST_FUNC(__func__);
1676 	UVMHIST_CALLARGS(maphist,"(map=%#jx,addr=%#jx,ent=%#jx)",
1677 	    (uintptr_t)map, address, (uintptr_t)entry, 0);
1678 
1679 	/*
1680 	 * make a quick check to see if we are already looking at
1681 	 * the entry we want (which is usually the case).  note also
1682 	 * that we don't need to save the hint here...  it is the
1683 	 * same hint (unless we are at the header, in which case the
1684 	 * hint didn't buy us anything anyway).
1685 	 */
1686 
1687 	cur = map->hint;
1688 	UVMMAP_EVCNT_INCR(mlk_call);
1689 	if (cur != &map->header &&
1690 	    address >= cur->start && cur->end > address) {
1691 		UVMMAP_EVCNT_INCR(mlk_hint);
1692 		*entry = cur;
1693 		UVMHIST_LOG(maphist,"<- got it via hint (%#jx)",
1694 		    (uintptr_t)cur, 0, 0, 0);
1695 		uvm_mapent_check(*entry);
1696 		return (true);
1697 	}
1698 	uvm_map_check(map, __func__);
1699 
1700 	/*
1701 	 * lookup in the tree.
1702 	 */
1703 
1704 	UVMMAP_EVCNT_INCR(mlk_tree);
1705 	if (__predict_true(uvm_map_lookup_entry_bytree(map, address, entry))) {
1706 		SAVE_HINT(map, map->hint, *entry);
1707 		UVMHIST_LOG(maphist,"<- search got it (%#jx)",
1708 		    (uintptr_t)cur, 0, 0, 0);
1709 		KDASSERT((*entry)->start <= address);
1710 		KDASSERT(address < (*entry)->end);
1711 		uvm_mapent_check(*entry);
1712 		return (true);
1713 	}
1714 
1715 	SAVE_HINT(map, map->hint, *entry);
1716 	UVMHIST_LOG(maphist,"<- failed!",0,0,0,0);
1717 	KDASSERT((*entry) == &map->header || (*entry)->end <= address);
1718 	KDASSERT((*entry)->next == &map->header ||
1719 	    address < (*entry)->next->start);
1720 	return (false);
1721 }
1722 
1723 /*
1724  * See if the range between start and start + length fits in the gap
1725  * entry->next->start and entry->end.  Returns 1 if fits, 0 if doesn't
1726  * fit, and -1 address wraps around.
1727  */
1728 static int
1729 uvm_map_space_avail(vaddr_t *start, vsize_t length, voff_t uoffset,
1730     vsize_t align, int flags, int topdown, struct vm_map_entry *entry)
1731 {
1732 	vaddr_t end;
1733 
1734 #ifdef PMAP_PREFER
1735 	/*
1736 	 * push start address forward as needed to avoid VAC alias problems.
1737 	 * we only do this if a valid offset is specified.
1738 	 */
1739 
1740 	if (uoffset != UVM_UNKNOWN_OFFSET)
1741 		PMAP_PREFER(uoffset, start, length, topdown);
1742 #endif
1743 	if ((flags & UVM_FLAG_COLORMATCH) != 0) {
1744 		KASSERT(align < uvmexp.ncolors);
1745 		if (uvmexp.ncolors > 1) {
1746 			const u_int colormask = uvmexp.colormask;
1747 			const u_int colorsize = colormask + 1;
1748 			vaddr_t hint = atop(*start);
1749 			const u_int color = hint & colormask;
1750 			if (color != align) {
1751 				hint -= color;	/* adjust to color boundary */
1752 				KASSERT((hint & colormask) == 0);
1753 				if (topdown) {
1754 					if (align > color)
1755 						hint -= colorsize;
1756 				} else {
1757 					if (align < color)
1758 						hint += colorsize;
1759 				}
1760 				*start = ptoa(hint + align); /* adjust to color */
1761 			}
1762 		}
1763 	} else {
1764 		KASSERT(powerof2(align));
1765 		uvm_map_align_va(start, align, topdown);
1766 		/*
1767 		 * XXX Should we PMAP_PREFER() here again?
1768 		 * eh...i think we're okay
1769 		 */
1770 	}
1771 
1772 	/*
1773 	 * Find the end of the proposed new region.  Be sure we didn't
1774 	 * wrap around the address; if so, we lose.  Otherwise, if the
1775 	 * proposed new region fits before the next entry, we win.
1776 	 */
1777 
1778 	end = *start + length;
1779 	if (end < *start)
1780 		return (-1);
1781 
1782 	if (entry->next->start >= end && *start >= entry->end)
1783 		return (1);
1784 
1785 	return (0);
1786 }
1787 
1788 /*
1789  * uvm_map_findspace: find "length" sized space in "map".
1790  *
1791  * => "hint" is a hint about where we want it, unless UVM_FLAG_FIXED is
1792  *	set in "flags" (in which case we insist on using "hint").
1793  * => "result" is VA returned
1794  * => uobj/uoffset are to be used to handle VAC alignment, if required
1795  * => if "align" is non-zero, we attempt to align to that value.
1796  * => caller must at least have read-locked map
1797  * => returns NULL on failure, or pointer to prev. map entry if success
1798  * => note this is a cross between the old vm_map_findspace and vm_map_find
1799  */
1800 
1801 struct vm_map_entry *
1802 uvm_map_findspace(struct vm_map *map, vaddr_t hint, vsize_t length,
1803     vaddr_t *result /* OUT */, struct uvm_object *uobj, voff_t uoffset,
1804     vsize_t align, int flags)
1805 {
1806 	struct vm_map_entry *entry;
1807 	struct vm_map_entry *child, *prev, *tmp;
1808 	vaddr_t orig_hint __diagused;
1809 	const int topdown = map->flags & VM_MAP_TOPDOWN;
1810 	UVMHIST_FUNC(__func__);
1811 	UVMHIST_CALLARGS(maphist, "(map=%#jx, hint=%#jx, len=%ju, flags=%#jx)",
1812 	    (uintptr_t)map, hint, length, flags);
1813 
1814 	KASSERT((flags & UVM_FLAG_COLORMATCH) != 0 || powerof2(align));
1815 	KASSERT((flags & UVM_FLAG_COLORMATCH) == 0 || align < uvmexp.ncolors);
1816 	KASSERT((flags & UVM_FLAG_FIXED) == 0 || align == 0);
1817 
1818 	uvm_map_check(map, "map_findspace entry");
1819 
1820 	/*
1821 	 * remember the original hint.  if we are aligning, then we
1822 	 * may have to try again with no alignment constraint if
1823 	 * we fail the first time.
1824 	 */
1825 
1826 	orig_hint = hint;
1827 	if (hint < vm_map_min(map)) {	/* check ranges ... */
1828 		if (flags & UVM_FLAG_FIXED) {
1829 			UVMHIST_LOG(maphist,"<- VA below map range",0,0,0,0);
1830 			return (NULL);
1831 		}
1832 		hint = vm_map_min(map);
1833 	}
1834 	if (hint > vm_map_max(map)) {
1835 		UVMHIST_LOG(maphist,"<- VA %#jx > range [%#jx->%#jx]",
1836 		    hint, vm_map_min(map), vm_map_max(map), 0);
1837 		return (NULL);
1838 	}
1839 
1840 	/*
1841 	 * hint may not be aligned properly; we need round up or down it
1842 	 * before proceeding further.
1843 	 */
1844 	if ((flags & UVM_FLAG_COLORMATCH) == 0)
1845 		uvm_map_align_va(&hint, align, topdown);
1846 
1847 	/*
1848 	 * Look for the first possible address; if there's already
1849 	 * something at this address, we have to start after it.
1850 	 */
1851 
1852 	/*
1853 	 * @@@: there are four, no, eight cases to consider.
1854 	 *
1855 	 * 0: found,     fixed,     bottom up -> fail
1856 	 * 1: found,     fixed,     top down  -> fail
1857 	 * 2: found,     not fixed, bottom up -> start after entry->end,
1858 	 *                                       loop up
1859 	 * 3: found,     not fixed, top down  -> start before entry->start,
1860 	 *                                       loop down
1861 	 * 4: not found, fixed,     bottom up -> check entry->next->start, fail
1862 	 * 5: not found, fixed,     top down  -> check entry->next->start, fail
1863 	 * 6: not found, not fixed, bottom up -> check entry->next->start,
1864 	 *                                       loop up
1865 	 * 7: not found, not fixed, top down  -> check entry->next->start,
1866 	 *                                       loop down
1867 	 *
1868 	 * as you can see, it reduces to roughly five cases, and that
1869 	 * adding top down mapping only adds one unique case (without
1870 	 * it, there would be four cases).
1871 	 */
1872 
1873 	if ((flags & UVM_FLAG_FIXED) == 0 && hint == vm_map_min(map)) {
1874 		entry = map->first_free;
1875 	} else {
1876 		if (uvm_map_lookup_entry(map, hint, &entry)) {
1877 			/* "hint" address already in use ... */
1878 			if (flags & UVM_FLAG_FIXED) {
1879 				UVMHIST_LOG(maphist, "<- fixed & VA in use",
1880 				    0, 0, 0, 0);
1881 				return (NULL);
1882 			}
1883 			if (topdown)
1884 				/* Start from lower gap. */
1885 				entry = entry->prev;
1886 		} else if (flags & UVM_FLAG_FIXED) {
1887 			if (entry->next->start >= hint + length &&
1888 			    hint + length > hint)
1889 				goto found;
1890 
1891 			/* "hint" address is gap but too small */
1892 			UVMHIST_LOG(maphist, "<- fixed mapping failed",
1893 			    0, 0, 0, 0);
1894 			return (NULL); /* only one shot at it ... */
1895 		} else {
1896 			/*
1897 			 * See if given hint fits in this gap.
1898 			 */
1899 			switch (uvm_map_space_avail(&hint, length,
1900 			    uoffset, align, flags, topdown, entry)) {
1901 			case 1:
1902 				goto found;
1903 			case -1:
1904 				goto wraparound;
1905 			}
1906 
1907 			if (topdown) {
1908 				/*
1909 				 * Still there is a chance to fit
1910 				 * if hint > entry->end.
1911 				 */
1912 			} else {
1913 				/* Start from higher gap. */
1914 				entry = entry->next;
1915 				if (entry == &map->header)
1916 					goto notfound;
1917 				goto nextgap;
1918 			}
1919 		}
1920 	}
1921 
1922 	/*
1923 	 * Note that all UVM_FLAGS_FIXED case is already handled.
1924 	 */
1925 	KDASSERT((flags & UVM_FLAG_FIXED) == 0);
1926 
1927 	/* Try to find the space in the red-black tree */
1928 
1929 	/* Check slot before any entry */
1930 	hint = topdown ? entry->next->start - length : entry->end;
1931 	switch (uvm_map_space_avail(&hint, length, uoffset, align, flags,
1932 	    topdown, entry)) {
1933 	case 1:
1934 		goto found;
1935 	case -1:
1936 		goto wraparound;
1937 	}
1938 
1939 nextgap:
1940 	KDASSERT((flags & UVM_FLAG_FIXED) == 0);
1941 	/* If there is not enough space in the whole tree, we fail */
1942 	tmp = ROOT_ENTRY(map);
1943 	if (tmp == NULL || tmp->maxgap < length)
1944 		goto notfound;
1945 
1946 	prev = NULL; /* previous candidate */
1947 
1948 	/* Find an entry close to hint that has enough space */
1949 	for (; tmp;) {
1950 		KASSERT(tmp->next->start == tmp->end + tmp->gap);
1951 		if (topdown) {
1952 			if (tmp->next->start < hint + length &&
1953 			    (prev == NULL || tmp->end > prev->end)) {
1954 				if (tmp->gap >= length)
1955 					prev = tmp;
1956 				else if ((child = LEFT_ENTRY(tmp)) != NULL
1957 				    && child->maxgap >= length)
1958 					prev = tmp;
1959 			}
1960 		} else {
1961 			if (tmp->end >= hint &&
1962 			    (prev == NULL || tmp->end < prev->end)) {
1963 				if (tmp->gap >= length)
1964 					prev = tmp;
1965 				else if ((child = RIGHT_ENTRY(tmp)) != NULL
1966 				    && child->maxgap >= length)
1967 					prev = tmp;
1968 			}
1969 		}
1970 		if (tmp->next->start < hint + length)
1971 			child = RIGHT_ENTRY(tmp);
1972 		else if (tmp->end > hint)
1973 			child = LEFT_ENTRY(tmp);
1974 		else {
1975 			if (tmp->gap >= length)
1976 				break;
1977 			if (topdown)
1978 				child = LEFT_ENTRY(tmp);
1979 			else
1980 				child = RIGHT_ENTRY(tmp);
1981 		}
1982 		if (child == NULL || child->maxgap < length)
1983 			break;
1984 		tmp = child;
1985 	}
1986 
1987 	if (tmp != NULL && tmp->start < hint && hint < tmp->next->start) {
1988 		/*
1989 		 * Check if the entry that we found satifies the
1990 		 * space requirement
1991 		 */
1992 		if (topdown) {
1993 			if (hint > tmp->next->start - length)
1994 				hint = tmp->next->start - length;
1995 		} else {
1996 			if (hint < tmp->end)
1997 				hint = tmp->end;
1998 		}
1999 		switch (uvm_map_space_avail(&hint, length, uoffset, align,
2000 		    flags, topdown, tmp)) {
2001 		case 1:
2002 			entry = tmp;
2003 			goto found;
2004 		case -1:
2005 			goto wraparound;
2006 		}
2007 		if (tmp->gap >= length)
2008 			goto listsearch;
2009 	}
2010 	if (prev == NULL)
2011 		goto notfound;
2012 
2013 	if (topdown) {
2014 		KASSERT(orig_hint >= prev->next->start - length ||
2015 		    prev->next->start - length > prev->next->start);
2016 		hint = prev->next->start - length;
2017 	} else {
2018 		KASSERT(orig_hint <= prev->end);
2019 		hint = prev->end;
2020 	}
2021 	switch (uvm_map_space_avail(&hint, length, uoffset, align,
2022 	    flags, topdown, prev)) {
2023 	case 1:
2024 		entry = prev;
2025 		goto found;
2026 	case -1:
2027 		goto wraparound;
2028 	}
2029 	if (prev->gap >= length)
2030 		goto listsearch;
2031 
2032 	if (topdown)
2033 		tmp = LEFT_ENTRY(prev);
2034 	else
2035 		tmp = RIGHT_ENTRY(prev);
2036 	for (;;) {
2037 		KASSERT(tmp && tmp->maxgap >= length);
2038 		if (topdown)
2039 			child = RIGHT_ENTRY(tmp);
2040 		else
2041 			child = LEFT_ENTRY(tmp);
2042 		if (child && child->maxgap >= length) {
2043 			tmp = child;
2044 			continue;
2045 		}
2046 		if (tmp->gap >= length)
2047 			break;
2048 		if (topdown)
2049 			tmp = LEFT_ENTRY(tmp);
2050 		else
2051 			tmp = RIGHT_ENTRY(tmp);
2052 	}
2053 
2054 	if (topdown) {
2055 		KASSERT(orig_hint >= tmp->next->start - length ||
2056 		    tmp->next->start - length > tmp->next->start);
2057 		hint = tmp->next->start - length;
2058 	} else {
2059 		KASSERT(orig_hint <= tmp->end);
2060 		hint = tmp->end;
2061 	}
2062 	switch (uvm_map_space_avail(&hint, length, uoffset, align,
2063 	    flags, topdown, tmp)) {
2064 	case 1:
2065 		entry = tmp;
2066 		goto found;
2067 	case -1:
2068 		goto wraparound;
2069 	}
2070 
2071 	/*
2072 	 * The tree fails to find an entry because of offset or alignment
2073 	 * restrictions.  Search the list instead.
2074 	 */
2075  listsearch:
2076 	/*
2077 	 * Look through the rest of the map, trying to fit a new region in
2078 	 * the gap between existing regions, or after the very last region.
2079 	 * note: entry->end = base VA of current gap,
2080 	 *	 entry->next->start = VA of end of current gap
2081 	 */
2082 
2083 	for (;;) {
2084 		/* Update hint for current gap. */
2085 		hint = topdown ? entry->next->start - length : entry->end;
2086 
2087 		/* See if it fits. */
2088 		switch (uvm_map_space_avail(&hint, length, uoffset, align,
2089 		    flags, topdown, entry)) {
2090 		case 1:
2091 			goto found;
2092 		case -1:
2093 			goto wraparound;
2094 		}
2095 
2096 		/* Advance to next/previous gap */
2097 		if (topdown) {
2098 			if (entry == &map->header) {
2099 				UVMHIST_LOG(maphist, "<- failed (off start)",
2100 				    0,0,0,0);
2101 				goto notfound;
2102 			}
2103 			entry = entry->prev;
2104 		} else {
2105 			entry = entry->next;
2106 			if (entry == &map->header) {
2107 				UVMHIST_LOG(maphist, "<- failed (off end)",
2108 				    0,0,0,0);
2109 				goto notfound;
2110 			}
2111 		}
2112 	}
2113 
2114  found:
2115 	SAVE_HINT(map, map->hint, entry);
2116 	*result = hint;
2117 	UVMHIST_LOG(maphist,"<- got it!  (result=%#jx)", hint, 0,0,0);
2118 	KASSERTMSG( topdown || hint >= orig_hint, "hint: %jx, orig_hint: %jx",
2119 	    (uintmax_t)hint, (uintmax_t)orig_hint);
2120 	KASSERTMSG(!topdown || hint <= orig_hint, "hint: %jx, orig_hint: %jx",
2121 	    (uintmax_t)hint, (uintmax_t)orig_hint);
2122 	KASSERT(entry->end <= hint);
2123 	KASSERT(hint + length <= entry->next->start);
2124 	return (entry);
2125 
2126  wraparound:
2127 	UVMHIST_LOG(maphist, "<- failed (wrap around)", 0,0,0,0);
2128 
2129 	return (NULL);
2130 
2131  notfound:
2132 	UVMHIST_LOG(maphist, "<- failed (notfound)", 0,0,0,0);
2133 
2134 	return (NULL);
2135 }
2136 
2137 /*
2138  *   U N M A P   -   m a i n   h e l p e r   f u n c t i o n s
2139  */
2140 
2141 /*
2142  * uvm_unmap_remove: remove mappings from a vm_map (from "start" up to "stop")
2143  *
2144  * => caller must check alignment and size
2145  * => map must be locked by caller
2146  * => we return a list of map entries that we've remove from the map
2147  *    in "entry_list"
2148  */
2149 
2150 void
2151 uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
2152     struct vm_map_entry **entry_list /* OUT */, int flags)
2153 {
2154 	struct vm_map_entry *entry, *first_entry, *next;
2155 	vaddr_t len;
2156 	UVMHIST_FUNC(__func__);
2157 	UVMHIST_CALLARGS(maphist,"(map=%#jx, start=%#jx, end=%#jx)",
2158 	    (uintptr_t)map, start, end, 0);
2159 	VM_MAP_RANGE_CHECK(map, start, end);
2160 
2161 	uvm_map_check(map, "unmap_remove entry");
2162 
2163 	/*
2164 	 * find first entry
2165 	 */
2166 
2167 	if (uvm_map_lookup_entry(map, start, &first_entry) == true) {
2168 		/* clip and go... */
2169 		entry = first_entry;
2170 		UVM_MAP_CLIP_START(map, entry, start);
2171 		/* critical!  prevents stale hint */
2172 		SAVE_HINT(map, entry, entry->prev);
2173 	} else {
2174 		entry = first_entry->next;
2175 	}
2176 
2177 	/*
2178 	 * save the free space hint
2179 	 */
2180 
2181 	if (map->first_free != &map->header && map->first_free->start >= start)
2182 		map->first_free = entry->prev;
2183 
2184 	/*
2185 	 * note: we now re-use first_entry for a different task.  we remove
2186 	 * a number of map entries from the map and save them in a linked
2187 	 * list headed by "first_entry".  once we remove them from the map
2188 	 * the caller should unlock the map and drop the references to the
2189 	 * backing objects [c.f. uvm_unmap_detach].  the object is to
2190 	 * separate unmapping from reference dropping.  why?
2191 	 *   [1] the map has to be locked for unmapping
2192 	 *   [2] the map need not be locked for reference dropping
2193 	 *   [3] dropping references may trigger pager I/O, and if we hit
2194 	 *       a pager that does synchronous I/O we may have to wait for it.
2195 	 *   [4] we would like all waiting for I/O to occur with maps unlocked
2196 	 *       so that we don't block other threads.
2197 	 */
2198 
2199 	first_entry = NULL;
2200 	*entry_list = NULL;
2201 
2202 	/*
2203 	 * break up the area into map entry sized regions and unmap.  note
2204 	 * that all mappings have to be removed before we can even consider
2205 	 * dropping references to amaps or VM objects (otherwise we could end
2206 	 * up with a mapping to a page on the free list which would be very bad)
2207 	 */
2208 
2209 	while ((entry != &map->header) && (entry->start < end)) {
2210 		KASSERT((entry->flags & UVM_MAP_STATIC) == 0);
2211 
2212 		UVM_MAP_CLIP_END(map, entry, end);
2213 		next = entry->next;
2214 		len = entry->end - entry->start;
2215 
2216 		/*
2217 		 * unwire before removing addresses from the pmap; otherwise
2218 		 * unwiring will put the entries back into the pmap (XXX).
2219 		 */
2220 
2221 		if (VM_MAPENT_ISWIRED(entry)) {
2222 			uvm_map_entry_unwire(map, entry);
2223 		}
2224 		if (flags & UVM_FLAG_VAONLY) {
2225 
2226 			/* nothing */
2227 
2228 		} else if ((map->flags & VM_MAP_PAGEABLE) == 0) {
2229 
2230 			/*
2231 			 * if the map is non-pageable, any pages mapped there
2232 			 * must be wired and entered with pmap_kenter_pa(),
2233 			 * and we should free any such pages immediately.
2234 			 * this is mostly used for kmem_map.
2235 			 */
2236 			KASSERT(vm_map_pmap(map) == pmap_kernel());
2237 
2238 			uvm_km_pgremove_intrsafe(map, entry->start, entry->end);
2239 		} else if (UVM_ET_ISOBJ(entry) &&
2240 			   UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) {
2241 			panic("%s: kernel object %p %p\n",
2242 			    __func__, map, entry);
2243 		} else if (UVM_ET_ISOBJ(entry) || entry->aref.ar_amap) {
2244 			/*
2245 			 * remove mappings the standard way.  lock object
2246 			 * and/or amap to ensure vm_page state does not
2247 			 * change while in pmap_remove().
2248 			 */
2249 
2250 #ifdef __HAVE_UNLOCKED_PMAP /* XXX temporary */
2251 			uvm_map_lock_entry(entry, RW_WRITER);
2252 #else
2253 			uvm_map_lock_entry(entry, RW_READER);
2254 #endif
2255 			pmap_remove(map->pmap, entry->start, entry->end);
2256 
2257 			/*
2258 			 * note: if map is dying, leave pmap_update() for
2259 			 * later.  if the map is to be reused (exec) then
2260 			 * pmap_update() will be called.  if the map is
2261 			 * being disposed of (exit) then pmap_destroy()
2262 			 * will be called.
2263 			 */
2264 
2265 			if ((map->flags & VM_MAP_DYING) == 0) {
2266 				pmap_update(vm_map_pmap(map));
2267 			} else {
2268 				KASSERT(vm_map_pmap(map) != pmap_kernel());
2269 			}
2270 
2271 			uvm_map_unlock_entry(entry);
2272 		}
2273 
2274 #if defined(UVMDEBUG)
2275 		/*
2276 		 * check if there's remaining mapping,
2277 		 * which is a bug in caller.
2278 		 */
2279 
2280 		vaddr_t va;
2281 		for (va = entry->start; va < entry->end;
2282 		    va += PAGE_SIZE) {
2283 			if (pmap_extract(vm_map_pmap(map), va, NULL)) {
2284 				panic("%s: %#"PRIxVADDR" has mapping",
2285 				    __func__, va);
2286 			}
2287 		}
2288 
2289 		if (VM_MAP_IS_KERNEL(map) && (flags & UVM_FLAG_NOWAIT) == 0) {
2290 			uvm_km_check_empty(map, entry->start,
2291 			    entry->end);
2292 		}
2293 #endif /* defined(UVMDEBUG) */
2294 
2295 		/*
2296 		 * remove entry from map and put it on our list of entries
2297 		 * that we've nuked.  then go to next entry.
2298 		 */
2299 
2300 		UVMHIST_LOG(maphist, "  removed map entry %#jx",
2301 		    (uintptr_t)entry, 0, 0, 0);
2302 
2303 		/* critical!  prevents stale hint */
2304 		SAVE_HINT(map, entry, entry->prev);
2305 
2306 		uvm_map_entry_unlink(map, entry);
2307 		KASSERT(map->size >= len);
2308 		map->size -= len;
2309 		entry->prev = NULL;
2310 		entry->next = first_entry;
2311 		first_entry = entry;
2312 		entry = next;
2313 	}
2314 
2315 	uvm_map_check(map, "unmap_remove leave");
2316 
2317 	/*
2318 	 * now we've cleaned up the map and are ready for the caller to drop
2319 	 * references to the mapped objects.
2320 	 */
2321 
2322 	*entry_list = first_entry;
2323 	UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
2324 
2325 	if (map->flags & VM_MAP_WANTVA) {
2326 		mutex_enter(&map->misc_lock);
2327 		map->flags &= ~VM_MAP_WANTVA;
2328 		cv_broadcast(&map->cv);
2329 		mutex_exit(&map->misc_lock);
2330 	}
2331 }
2332 
2333 /*
2334  * uvm_unmap_detach: drop references in a chain of map entries
2335  *
2336  * => we will free the map entries as we traverse the list.
2337  */
2338 
2339 void
2340 uvm_unmap_detach(struct vm_map_entry *first_entry, int flags)
2341 {
2342 	struct vm_map_entry *next_entry;
2343 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
2344 
2345 	while (first_entry) {
2346 		KASSERT(!VM_MAPENT_ISWIRED(first_entry));
2347 		UVMHIST_LOG(maphist,
2348 		    "  detach %#jx: amap=%#jx, obj=%#jx, submap?=%jd",
2349 		    (uintptr_t)first_entry,
2350 		    (uintptr_t)first_entry->aref.ar_amap,
2351 		    (uintptr_t)first_entry->object.uvm_obj,
2352 		    UVM_ET_ISSUBMAP(first_entry));
2353 
2354 		/*
2355 		 * drop reference to amap, if we've got one
2356 		 */
2357 
2358 		if (first_entry->aref.ar_amap)
2359 			uvm_map_unreference_amap(first_entry, flags);
2360 
2361 		/*
2362 		 * drop reference to our backing object, if we've got one
2363 		 */
2364 
2365 		KASSERT(!UVM_ET_ISSUBMAP(first_entry));
2366 		if (UVM_ET_ISOBJ(first_entry) &&
2367 		    first_entry->object.uvm_obj->pgops->pgo_detach) {
2368 			(*first_entry->object.uvm_obj->pgops->pgo_detach)
2369 				(first_entry->object.uvm_obj);
2370 		}
2371 		next_entry = first_entry->next;
2372 		uvm_mapent_free(first_entry);
2373 		first_entry = next_entry;
2374 	}
2375 	UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
2376 }
2377 
2378 /*
2379  *   E X T R A C T I O N   F U N C T I O N S
2380  */
2381 
2382 /*
2383  * uvm_map_reserve: reserve space in a vm_map for future use.
2384  *
2385  * => we reserve space in a map by putting a dummy map entry in the
2386  *    map (dummy means obj=NULL, amap=NULL, prot=VM_PROT_NONE)
2387  * => map should be unlocked (we will write lock it)
2388  * => we return true if we were able to reserve space
2389  * => XXXCDC: should be inline?
2390  */
2391 
2392 int
2393 uvm_map_reserve(struct vm_map *map, vsize_t size,
2394     vaddr_t offset	/* hint for pmap_prefer */,
2395     vsize_t align	/* alignment */,
2396     vaddr_t *raddr	/* IN:hint, OUT: reserved VA */,
2397     uvm_flag_t flags	/* UVM_FLAG_FIXED or UVM_FLAG_COLORMATCH or 0 */)
2398 {
2399 	UVMHIST_FUNC(__func__);
2400 	UVMHIST_CALLARGS(maphist, "(map=%#jx, size=%#jx, offset=%#jx, addr=%#jx)",
2401 	    (uintptr_t)map, size, offset, (uintptr_t)raddr);
2402 
2403 	size = round_page(size);
2404 
2405 	/*
2406 	 * reserve some virtual space.
2407 	 */
2408 
2409 	if (uvm_map(map, raddr, size, NULL, offset, align,
2410 	    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
2411 	    UVM_ADV_RANDOM, UVM_FLAG_NOMERGE|flags)) != 0) {
2412 	    UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
2413 		return (false);
2414 	}
2415 
2416 	UVMHIST_LOG(maphist, "<- done (*raddr=%#jx)", *raddr,0,0,0);
2417 	return (true);
2418 }
2419 
2420 /*
2421  * uvm_map_replace: replace a reserved (blank) area of memory with
2422  * real mappings.
2423  *
2424  * => caller must WRITE-LOCK the map
2425  * => we return true if replacement was a success
2426  * => we expect the newents chain to have nnewents entrys on it and
2427  *    we expect newents->prev to point to the last entry on the list
2428  * => note newents is allowed to be NULL
2429  */
2430 
2431 static int
2432 uvm_map_replace(struct vm_map *map, vaddr_t start, vaddr_t end,
2433     struct vm_map_entry *newents, int nnewents, vsize_t nsize,
2434     struct vm_map_entry **oldentryp)
2435 {
2436 	struct vm_map_entry *oldent, *last;
2437 
2438 	uvm_map_check(map, "map_replace entry");
2439 
2440 	/*
2441 	 * first find the blank map entry at the specified address
2442 	 */
2443 
2444 	if (!uvm_map_lookup_entry(map, start, &oldent)) {
2445 		return (false);
2446 	}
2447 
2448 	/*
2449 	 * check to make sure we have a proper blank entry
2450 	 */
2451 
2452 	if (end < oldent->end) {
2453 		UVM_MAP_CLIP_END(map, oldent, end);
2454 	}
2455 	if (oldent->start != start || oldent->end != end ||
2456 	    oldent->object.uvm_obj != NULL || oldent->aref.ar_amap != NULL) {
2457 		return (false);
2458 	}
2459 
2460 #ifdef DIAGNOSTIC
2461 
2462 	/*
2463 	 * sanity check the newents chain
2464 	 */
2465 
2466 	{
2467 		struct vm_map_entry *tmpent = newents;
2468 		int nent = 0;
2469 		vsize_t sz = 0;
2470 		vaddr_t cur = start;
2471 
2472 		while (tmpent) {
2473 			nent++;
2474 			sz += tmpent->end - tmpent->start;
2475 			if (tmpent->start < cur)
2476 				panic("uvm_map_replace1");
2477 			if (tmpent->start >= tmpent->end || tmpent->end > end) {
2478 				panic("uvm_map_replace2: "
2479 				    "tmpent->start=%#"PRIxVADDR
2480 				    ", tmpent->end=%#"PRIxVADDR
2481 				    ", end=%#"PRIxVADDR,
2482 				    tmpent->start, tmpent->end, end);
2483 			}
2484 			cur = tmpent->end;
2485 			if (tmpent->next) {
2486 				if (tmpent->next->prev != tmpent)
2487 					panic("uvm_map_replace3");
2488 			} else {
2489 				if (newents->prev != tmpent)
2490 					panic("uvm_map_replace4");
2491 			}
2492 			tmpent = tmpent->next;
2493 		}
2494 		if (nent != nnewents)
2495 			panic("uvm_map_replace5");
2496 		if (sz != nsize)
2497 			panic("uvm_map_replace6");
2498 	}
2499 #endif
2500 
2501 	/*
2502 	 * map entry is a valid blank!   replace it.   (this does all the
2503 	 * work of map entry link/unlink...).
2504 	 */
2505 
2506 	if (newents) {
2507 		last = newents->prev;
2508 
2509 		/* critical: flush stale hints out of map */
2510 		SAVE_HINT(map, map->hint, newents);
2511 		if (map->first_free == oldent)
2512 			map->first_free = last;
2513 
2514 		last->next = oldent->next;
2515 		last->next->prev = last;
2516 
2517 		/* Fix RB tree */
2518 		uvm_rb_remove(map, oldent);
2519 
2520 		newents->prev = oldent->prev;
2521 		newents->prev->next = newents;
2522 		map->nentries = map->nentries + (nnewents - 1);
2523 
2524 		/* Fixup the RB tree */
2525 		{
2526 			int i;
2527 			struct vm_map_entry *tmp;
2528 
2529 			tmp = newents;
2530 			for (i = 0; i < nnewents && tmp; i++) {
2531 				uvm_rb_insert(map, tmp);
2532 				tmp = tmp->next;
2533 			}
2534 		}
2535 	} else {
2536 		/* NULL list of new entries: just remove the old one */
2537 		clear_hints(map, oldent);
2538 		uvm_map_entry_unlink(map, oldent);
2539 	}
2540 	map->size -= end - start - nsize;
2541 
2542 	uvm_map_check(map, "map_replace leave");
2543 
2544 	/*
2545 	 * now we can free the old blank entry and return.
2546 	 */
2547 
2548 	*oldentryp = oldent;
2549 	return (true);
2550 }
2551 
2552 /*
2553  * uvm_map_extract: extract a mapping from a map and put it somewhere
2554  *	(maybe removing the old mapping)
2555  *
2556  * => maps should be unlocked (we will write lock them)
2557  * => returns 0 on success, error code otherwise
2558  * => start must be page aligned
2559  * => len must be page sized
2560  * => flags:
2561  *      UVM_EXTRACT_REMOVE: remove mappings from srcmap
2562  *      UVM_EXTRACT_CONTIG: abort if unmapped area (advisory only)
2563  *      UVM_EXTRACT_QREF: for a temporary extraction do quick obj refs
2564  *      UVM_EXTRACT_FIXPROT: set prot to maxprot as we go
2565  *      UVM_EXTRACT_PROT_ALL: set prot to UVM_PROT_ALL as we go
2566  *    >>>NOTE: if you set REMOVE, you are not allowed to use CONTIG or QREF!<<<
2567  *    >>>NOTE: QREF's must be unmapped via the QREF path, thus should only
2568  *             be used from within the kernel in a kernel level map <<<
2569  */
2570 
2571 int
2572 uvm_map_extract(struct vm_map *srcmap, vaddr_t start, vsize_t len,
2573     struct vm_map *dstmap, vaddr_t *dstaddrp, int flags)
2574 {
2575 	vaddr_t dstaddr, end, newend, oldoffset, fudge, orig_fudge;
2576 	struct vm_map_entry *chain, *endchain, *entry, *orig_entry, *newentry,
2577 	    *deadentry, *oldentry;
2578 	struct vm_map_entry *resentry = NULL; /* a dummy reservation entry */
2579 	vsize_t elen __unused;
2580 	int nchain, error, copy_ok;
2581 	vsize_t nsize;
2582 	UVMHIST_FUNC(__func__);
2583 	UVMHIST_CALLARGS(maphist,"(srcmap=%#jx,start=%#jx, len=%#jx",
2584 	    (uintptr_t)srcmap, start, len, 0);
2585 	UVMHIST_LOG(maphist," ...,dstmap=%#jx, flags=%#jx)",
2586 	    (uintptr_t)dstmap, flags, 0, 0);
2587 
2588 	/*
2589 	 * step 0: sanity check: start must be on a page boundary, length
2590 	 * must be page sized.  can't ask for CONTIG/QREF if you asked for
2591 	 * REMOVE.
2592 	 */
2593 
2594 	KASSERT((start & PAGE_MASK) == 0 && (len & PAGE_MASK) == 0);
2595 	KASSERT((flags & UVM_EXTRACT_REMOVE) == 0 ||
2596 		(flags & (UVM_EXTRACT_CONTIG|UVM_EXTRACT_QREF)) == 0);
2597 
2598 	/*
2599 	 * step 1: reserve space in the target map for the extracted area
2600 	 */
2601 
2602 	if ((flags & UVM_EXTRACT_RESERVED) == 0) {
2603 		dstaddr = vm_map_min(dstmap);
2604 		if (!uvm_map_reserve(dstmap, len, start,
2605 		    atop(start) & uvmexp.colormask, &dstaddr,
2606 		    UVM_FLAG_COLORMATCH))
2607 			return (ENOMEM);
2608 		KASSERT((atop(start ^ dstaddr) & uvmexp.colormask) == 0);
2609 		*dstaddrp = dstaddr;	/* pass address back to caller */
2610 		UVMHIST_LOG(maphist, "  dstaddr=%#jx", dstaddr,0,0,0);
2611 	} else {
2612 		dstaddr = *dstaddrp;
2613 	}
2614 
2615 	/*
2616 	 * step 2: setup for the extraction process loop by init'ing the
2617 	 * map entry chain, locking src map, and looking up the first useful
2618 	 * entry in the map.
2619 	 */
2620 
2621 	end = start + len;
2622 	newend = dstaddr + len;
2623 	chain = endchain = NULL;
2624 	nchain = 0;
2625 	nsize = 0;
2626 	vm_map_lock(srcmap);
2627 
2628 	if (uvm_map_lookup_entry(srcmap, start, &entry)) {
2629 
2630 		/* "start" is within an entry */
2631 		if (flags & UVM_EXTRACT_QREF) {
2632 
2633 			/*
2634 			 * for quick references we don't clip the entry, so
2635 			 * the entry may map space "before" the starting
2636 			 * virtual address... this is the "fudge" factor
2637 			 * (which can be non-zero only the first time
2638 			 * through the "while" loop in step 3).
2639 			 */
2640 
2641 			fudge = start - entry->start;
2642 		} else {
2643 
2644 			/*
2645 			 * normal reference: we clip the map to fit (thus
2646 			 * fudge is zero)
2647 			 */
2648 
2649 			UVM_MAP_CLIP_START(srcmap, entry, start);
2650 			SAVE_HINT(srcmap, srcmap->hint, entry->prev);
2651 			fudge = 0;
2652 		}
2653 	} else {
2654 
2655 		/* "start" is not within an entry ... skip to next entry */
2656 		if (flags & UVM_EXTRACT_CONTIG) {
2657 			error = EINVAL;
2658 			goto bad;    /* definite hole here ... */
2659 		}
2660 
2661 		entry = entry->next;
2662 		fudge = 0;
2663 	}
2664 
2665 	/* save values from srcmap for step 6 */
2666 	orig_entry = entry;
2667 	orig_fudge = fudge;
2668 
2669 	/*
2670 	 * step 3: now start looping through the map entries, extracting
2671 	 * as we go.
2672 	 */
2673 
2674 	while (entry->start < end && entry != &srcmap->header) {
2675 
2676 		/* if we are not doing a quick reference, clip it */
2677 		if ((flags & UVM_EXTRACT_QREF) == 0)
2678 			UVM_MAP_CLIP_END(srcmap, entry, end);
2679 
2680 		/* clear needs_copy (allow chunking) */
2681 		if (UVM_ET_ISNEEDSCOPY(entry)) {
2682 			amap_copy(srcmap, entry,
2683 			    AMAP_COPY_NOWAIT|AMAP_COPY_NOMERGE, start, end);
2684 			if (UVM_ET_ISNEEDSCOPY(entry)) {  /* failed? */
2685 				error = ENOMEM;
2686 				goto bad;
2687 			}
2688 
2689 			/* amap_copy could clip (during chunk)!  update fudge */
2690 			if (fudge) {
2691 				fudge = start - entry->start;
2692 				orig_fudge = fudge;
2693 			}
2694 		}
2695 
2696 		/* calculate the offset of this from "start" */
2697 		oldoffset = (entry->start + fudge) - start;
2698 
2699 		/* allocate a new map entry */
2700 		newentry = uvm_mapent_alloc(dstmap, 0);
2701 		if (newentry == NULL) {
2702 			error = ENOMEM;
2703 			goto bad;
2704 		}
2705 
2706 		/* set up new map entry */
2707 		newentry->next = NULL;
2708 		newentry->prev = endchain;
2709 		newentry->start = dstaddr + oldoffset;
2710 		newentry->end =
2711 		    newentry->start + (entry->end - (entry->start + fudge));
2712 		if (newentry->end > newend || newentry->end < newentry->start)
2713 			newentry->end = newend;
2714 		newentry->object.uvm_obj = entry->object.uvm_obj;
2715 		if (newentry->object.uvm_obj) {
2716 			if (newentry->object.uvm_obj->pgops->pgo_reference)
2717 				newentry->object.uvm_obj->pgops->
2718 				    pgo_reference(newentry->object.uvm_obj);
2719 			newentry->offset = entry->offset + fudge;
2720 		} else {
2721 			newentry->offset = 0;
2722 		}
2723 		newentry->etype = entry->etype;
2724 		if (flags & UVM_EXTRACT_PROT_ALL) {
2725 			newentry->protection = newentry->max_protection =
2726 			    UVM_PROT_ALL;
2727 		} else {
2728 			newentry->protection = (flags & UVM_EXTRACT_FIXPROT) ?
2729 			    entry->max_protection : entry->protection;
2730 			newentry->max_protection = entry->max_protection;
2731 		}
2732 		newentry->inheritance = entry->inheritance;
2733 		newentry->wired_count = 0;
2734 		newentry->aref.ar_amap = entry->aref.ar_amap;
2735 		if (newentry->aref.ar_amap) {
2736 			newentry->aref.ar_pageoff =
2737 			    entry->aref.ar_pageoff + (fudge >> PAGE_SHIFT);
2738 			uvm_map_reference_amap(newentry, AMAP_SHARED |
2739 			    ((flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0));
2740 		} else {
2741 			newentry->aref.ar_pageoff = 0;
2742 		}
2743 		newentry->advice = entry->advice;
2744 		if ((flags & UVM_EXTRACT_QREF) != 0) {
2745 			newentry->flags |= UVM_MAP_NOMERGE;
2746 		}
2747 
2748 		/* now link it on the chain */
2749 		nchain++;
2750 		nsize += newentry->end - newentry->start;
2751 		if (endchain == NULL) {
2752 			chain = endchain = newentry;
2753 		} else {
2754 			endchain->next = newentry;
2755 			endchain = newentry;
2756 		}
2757 
2758 		/* end of 'while' loop! */
2759 		if ((flags & UVM_EXTRACT_CONTIG) && entry->end < end &&
2760 		    (entry->next == &srcmap->header ||
2761 		    entry->next->start != entry->end)) {
2762 			error = EINVAL;
2763 			goto bad;
2764 		}
2765 		entry = entry->next;
2766 		fudge = 0;
2767 	}
2768 
2769 	/*
2770 	 * step 4: close off chain (in format expected by uvm_map_replace)
2771 	 */
2772 
2773 	if (chain)
2774 		chain->prev = endchain;
2775 
2776 	/*
2777 	 * step 5: attempt to lock the dest map so we can pmap_copy.
2778 	 * note usage of copy_ok:
2779 	 *   1 => dstmap locked, pmap_copy ok, and we "replace" here (step 5)
2780 	 *   0 => dstmap unlocked, NO pmap_copy, and we will "replace" in step 7
2781 	 */
2782 
2783 	if (srcmap == dstmap || vm_map_lock_try(dstmap) == true) {
2784 		copy_ok = 1;
2785 		if (!uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
2786 		    nchain, nsize, &resentry)) {
2787 			if (srcmap != dstmap)
2788 				vm_map_unlock(dstmap);
2789 			error = EIO;
2790 			goto bad;
2791 		}
2792 	} else {
2793 		copy_ok = 0;
2794 		/* replace defered until step 7 */
2795 	}
2796 
2797 	/*
2798 	 * step 6: traverse the srcmap a second time to do the following:
2799 	 *  - if we got a lock on the dstmap do pmap_copy
2800 	 *  - if UVM_EXTRACT_REMOVE remove the entries
2801 	 * we make use of orig_entry and orig_fudge (saved in step 2)
2802 	 */
2803 
2804 	if (copy_ok || (flags & UVM_EXTRACT_REMOVE)) {
2805 
2806 		/* purge possible stale hints from srcmap */
2807 		if (flags & UVM_EXTRACT_REMOVE) {
2808 			SAVE_HINT(srcmap, srcmap->hint, orig_entry->prev);
2809 			if (srcmap->first_free != &srcmap->header &&
2810 			    srcmap->first_free->start >= start)
2811 				srcmap->first_free = orig_entry->prev;
2812 		}
2813 
2814 		entry = orig_entry;
2815 		fudge = orig_fudge;
2816 		deadentry = NULL;	/* for UVM_EXTRACT_REMOVE */
2817 
2818 		while (entry->start < end && entry != &srcmap->header) {
2819 			if (copy_ok) {
2820 				oldoffset = (entry->start + fudge) - start;
2821 				elen = MIN(end, entry->end) -
2822 				    (entry->start + fudge);
2823 				pmap_copy(dstmap->pmap, srcmap->pmap,
2824 				    dstaddr + oldoffset, elen,
2825 				    entry->start + fudge);
2826 			}
2827 
2828 			/* we advance "entry" in the following if statement */
2829 			if (flags & UVM_EXTRACT_REMOVE) {
2830 #ifdef __HAVE_UNLOCKED_PMAP /* XXX temporary */
2831 				uvm_map_lock_entry(entry, RW_WRITER);
2832 #else
2833 				uvm_map_lock_entry(entry, RW_READER);
2834 #endif
2835 				pmap_remove(srcmap->pmap, entry->start,
2836 						entry->end);
2837 				uvm_map_unlock_entry(entry);
2838 				oldentry = entry;	/* save entry */
2839 				entry = entry->next;	/* advance */
2840 				uvm_map_entry_unlink(srcmap, oldentry);
2841 							/* add to dead list */
2842 				oldentry->next = deadentry;
2843 				deadentry = oldentry;
2844 			} else {
2845 				entry = entry->next;		/* advance */
2846 			}
2847 
2848 			/* end of 'while' loop */
2849 			fudge = 0;
2850 		}
2851 		pmap_update(srcmap->pmap);
2852 
2853 		/*
2854 		 * unlock dstmap.  we will dispose of deadentry in
2855 		 * step 7 if needed
2856 		 */
2857 
2858 		if (copy_ok && srcmap != dstmap)
2859 			vm_map_unlock(dstmap);
2860 
2861 	} else {
2862 		deadentry = NULL;
2863 	}
2864 
2865 	/*
2866 	 * step 7: we are done with the source map, unlock.   if copy_ok
2867 	 * is 0 then we have not replaced the dummy mapping in dstmap yet
2868 	 * and we need to do so now.
2869 	 */
2870 
2871 	vm_map_unlock(srcmap);
2872 	if ((flags & UVM_EXTRACT_REMOVE) && deadentry)
2873 		uvm_unmap_detach(deadentry, 0);   /* dispose of old entries */
2874 
2875 	/* now do the replacement if we didn't do it in step 5 */
2876 	if (copy_ok == 0) {
2877 		vm_map_lock(dstmap);
2878 		error = uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
2879 		    nchain, nsize, &resentry);
2880 		vm_map_unlock(dstmap);
2881 
2882 		if (error == false) {
2883 			error = EIO;
2884 			goto bad2;
2885 		}
2886 	}
2887 
2888 	if (resentry != NULL)
2889 		uvm_mapent_free(resentry);
2890 
2891 	return (0);
2892 
2893 	/*
2894 	 * bad: failure recovery
2895 	 */
2896 bad:
2897 	vm_map_unlock(srcmap);
2898 bad2:			/* src already unlocked */
2899 	if (chain)
2900 		uvm_unmap_detach(chain,
2901 		    (flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0);
2902 
2903 	if (resentry != NULL)
2904 		uvm_mapent_free(resentry);
2905 
2906 	if ((flags & UVM_EXTRACT_RESERVED) == 0) {
2907 		uvm_unmap(dstmap, dstaddr, dstaddr+len);   /* ??? */
2908 	}
2909 	return (error);
2910 }
2911 
2912 /* end of extraction functions */
2913 
2914 /*
2915  * uvm_map_submap: punch down part of a map into a submap
2916  *
2917  * => only the kernel_map is allowed to be submapped
2918  * => the purpose of submapping is to break up the locking granularity
2919  *	of a larger map
2920  * => the range specified must have been mapped previously with a uvm_map()
2921  *	call [with uobj==NULL] to create a blank map entry in the main map.
2922  *	[And it had better still be blank!]
2923  * => maps which contain submaps should never be copied or forked.
2924  * => to remove a submap, use uvm_unmap() on the main map
2925  *	and then uvm_map_deallocate() the submap.
2926  * => main map must be unlocked.
2927  * => submap must have been init'd and have a zero reference count.
2928  *	[need not be locked as we don't actually reference it]
2929  */
2930 
2931 int
2932 uvm_map_submap(struct vm_map *map, vaddr_t start, vaddr_t end,
2933     struct vm_map *submap)
2934 {
2935 	struct vm_map_entry *entry;
2936 	int error;
2937 
2938 	vm_map_lock(map);
2939 	VM_MAP_RANGE_CHECK(map, start, end);
2940 
2941 	if (uvm_map_lookup_entry(map, start, &entry)) {
2942 		UVM_MAP_CLIP_START(map, entry, start);
2943 		UVM_MAP_CLIP_END(map, entry, end);	/* to be safe */
2944 	} else {
2945 		entry = NULL;
2946 	}
2947 
2948 	if (entry != NULL &&
2949 	    entry->start == start && entry->end == end &&
2950 	    entry->object.uvm_obj == NULL && entry->aref.ar_amap == NULL &&
2951 	    !UVM_ET_ISCOPYONWRITE(entry) && !UVM_ET_ISNEEDSCOPY(entry)) {
2952 		entry->etype |= UVM_ET_SUBMAP;
2953 		entry->object.sub_map = submap;
2954 		entry->offset = 0;
2955 		uvm_map_reference(submap);
2956 		error = 0;
2957 	} else {
2958 		error = EINVAL;
2959 	}
2960 	vm_map_unlock(map);
2961 
2962 	return error;
2963 }
2964 
2965 /*
2966  * uvm_map_protect_user: change map protection on behalf of the user.
2967  * Enforces PAX settings as necessary.
2968  */
2969 int
2970 uvm_map_protect_user(struct lwp *l, vaddr_t start, vaddr_t end,
2971     vm_prot_t new_prot)
2972 {
2973 	int error;
2974 
2975 	if ((error = PAX_MPROTECT_VALIDATE(l, new_prot)))
2976 		return error;
2977 
2978 	return uvm_map_protect(&l->l_proc->p_vmspace->vm_map, start, end,
2979 	    new_prot, false);
2980 }
2981 
2982 
2983 /*
2984  * uvm_map_protect: change map protection
2985  *
2986  * => set_max means set max_protection.
2987  * => map must be unlocked.
2988  */
2989 
2990 #define MASK(entry)	(UVM_ET_ISCOPYONWRITE(entry) ? \
2991 			 ~VM_PROT_WRITE : VM_PROT_ALL)
2992 
2993 int
2994 uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
2995     vm_prot_t new_prot, bool set_max)
2996 {
2997 	struct vm_map_entry *current, *entry;
2998 	int error = 0;
2999 	UVMHIST_FUNC(__func__);
3000 	UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx,new_prot=%#jx)",
3001 	    (uintptr_t)map, start, end, new_prot);
3002 
3003 	vm_map_lock(map);
3004 	VM_MAP_RANGE_CHECK(map, start, end);
3005 	if (uvm_map_lookup_entry(map, start, &entry)) {
3006 		UVM_MAP_CLIP_START(map, entry, start);
3007 	} else {
3008 		entry = entry->next;
3009 	}
3010 
3011 	/*
3012 	 * make a first pass to check for protection violations.
3013 	 */
3014 
3015 	current = entry;
3016 	while ((current != &map->header) && (current->start < end)) {
3017 		if (UVM_ET_ISSUBMAP(current)) {
3018 			error = EINVAL;
3019 			goto out;
3020 		}
3021 		if ((new_prot & current->max_protection) != new_prot) {
3022 			error = EACCES;
3023 			goto out;
3024 		}
3025 		/*
3026 		 * Don't allow VM_PROT_EXECUTE to be set on entries that
3027 		 * point to vnodes that are associated with a NOEXEC file
3028 		 * system.
3029 		 */
3030 		if (UVM_ET_ISOBJ(current) &&
3031 		    UVM_OBJ_IS_VNODE(current->object.uvm_obj)) {
3032 			struct vnode *vp =
3033 			    (struct vnode *) current->object.uvm_obj;
3034 
3035 			if ((new_prot & VM_PROT_EXECUTE) != 0 &&
3036 			    (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0) {
3037 				error = EACCES;
3038 				goto out;
3039 			}
3040 		}
3041 
3042 		current = current->next;
3043 	}
3044 
3045 	/* go back and fix up protections (no need to clip this time). */
3046 
3047 	current = entry;
3048 	while ((current != &map->header) && (current->start < end)) {
3049 		vm_prot_t old_prot;
3050 
3051 		UVM_MAP_CLIP_END(map, current, end);
3052 		old_prot = current->protection;
3053 		if (set_max)
3054 			current->protection =
3055 			    (current->max_protection = new_prot) & old_prot;
3056 		else
3057 			current->protection = new_prot;
3058 
3059 		/*
3060 		 * update physical map if necessary.  worry about copy-on-write
3061 		 * here -- CHECK THIS XXX
3062 		 */
3063 
3064 		if (current->protection != old_prot) {
3065 			/* update pmap! */
3066 #ifdef __HAVE_UNLOCKED_PMAP /* XXX temporary */
3067 			uvm_map_lock_entry(current, RW_WRITER);
3068 #else
3069 			uvm_map_lock_entry(current, RW_READER);
3070 #endif
3071 			pmap_protect(map->pmap, current->start, current->end,
3072 			    current->protection & MASK(current));
3073 			uvm_map_unlock_entry(current);
3074 
3075 			/*
3076 			 * If this entry points at a vnode, and the
3077 			 * protection includes VM_PROT_EXECUTE, mark
3078 			 * the vnode as VEXECMAP.
3079 			 */
3080 			if (UVM_ET_ISOBJ(current)) {
3081 				struct uvm_object *uobj =
3082 				    current->object.uvm_obj;
3083 
3084 				if (UVM_OBJ_IS_VNODE(uobj) &&
3085 				    (current->protection & VM_PROT_EXECUTE)) {
3086 					vn_markexec((struct vnode *) uobj);
3087 				}
3088 			}
3089 		}
3090 
3091 		/*
3092 		 * If the map is configured to lock any future mappings,
3093 		 * wire this entry now if the old protection was VM_PROT_NONE
3094 		 * and the new protection is not VM_PROT_NONE.
3095 		 */
3096 
3097 		if ((map->flags & VM_MAP_WIREFUTURE) != 0 &&
3098 		    VM_MAPENT_ISWIRED(current) == 0 &&
3099 		    old_prot == VM_PROT_NONE &&
3100 		    new_prot != VM_PROT_NONE) {
3101 
3102 			/*
3103 			 * We must call pmap_update() here because the
3104 			 * pmap_protect() call above might have removed some
3105 			 * pmap entries and uvm_map_pageable() might create
3106 			 * some new pmap entries that rely on the prior
3107 			 * removals being completely finished.
3108 			 */
3109 
3110 			pmap_update(map->pmap);
3111 
3112 			if (uvm_map_pageable(map, current->start,
3113 			    current->end, false,
3114 			    UVM_LK_ENTER|UVM_LK_EXIT) != 0) {
3115 
3116 				/*
3117 				 * If locking the entry fails, remember the
3118 				 * error if it's the first one.  Note we
3119 				 * still continue setting the protection in
3120 				 * the map, but will return the error
3121 				 * condition regardless.
3122 				 *
3123 				 * XXX Ignore what the actual error is,
3124 				 * XXX just call it a resource shortage
3125 				 * XXX so that it doesn't get confused
3126 				 * XXX what uvm_map_protect() itself would
3127 				 * XXX normally return.
3128 				 */
3129 
3130 				error = ENOMEM;
3131 			}
3132 		}
3133 		current = current->next;
3134 	}
3135 	pmap_update(map->pmap);
3136 
3137  out:
3138 	vm_map_unlock(map);
3139 
3140 	UVMHIST_LOG(maphist, "<- done, error=%jd",error,0,0,0);
3141 	return error;
3142 }
3143 
3144 #undef  MASK
3145 
3146 /*
3147  * uvm_map_inherit: set inheritance code for range of addrs in map.
3148  *
3149  * => map must be unlocked
3150  * => note that the inherit code is used during a "fork".  see fork
3151  *	code for details.
3152  */
3153 
3154 int
3155 uvm_map_inherit(struct vm_map *map, vaddr_t start, vaddr_t end,
3156     vm_inherit_t new_inheritance)
3157 {
3158 	struct vm_map_entry *entry, *temp_entry;
3159 	UVMHIST_FUNC(__func__);
3160 	UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx,new_inh=%#jx)",
3161 	    (uintptr_t)map, start, end, new_inheritance);
3162 
3163 	switch (new_inheritance) {
3164 	case MAP_INHERIT_NONE:
3165 	case MAP_INHERIT_COPY:
3166 	case MAP_INHERIT_SHARE:
3167 	case MAP_INHERIT_ZERO:
3168 		break;
3169 	default:
3170 		UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
3171 		return EINVAL;
3172 	}
3173 
3174 	vm_map_lock(map);
3175 	VM_MAP_RANGE_CHECK(map, start, end);
3176 	if (uvm_map_lookup_entry(map, start, &temp_entry)) {
3177 		entry = temp_entry;
3178 		UVM_MAP_CLIP_START(map, entry, start);
3179 	}  else {
3180 		entry = temp_entry->next;
3181 	}
3182 	while ((entry != &map->header) && (entry->start < end)) {
3183 		UVM_MAP_CLIP_END(map, entry, end);
3184 		entry->inheritance = new_inheritance;
3185 		entry = entry->next;
3186 	}
3187 	vm_map_unlock(map);
3188 	UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3189 	return 0;
3190 }
3191 
3192 /*
3193  * uvm_map_advice: set advice code for range of addrs in map.
3194  *
3195  * => map must be unlocked
3196  */
3197 
3198 int
3199 uvm_map_advice(struct vm_map *map, vaddr_t start, vaddr_t end, int new_advice)
3200 {
3201 	struct vm_map_entry *entry, *temp_entry;
3202 	UVMHIST_FUNC(__func__);
3203 	UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx,new_adv=%#jx)",
3204 	    (uintptr_t)map, start, end, new_advice);
3205 
3206 	vm_map_lock(map);
3207 	VM_MAP_RANGE_CHECK(map, start, end);
3208 	if (uvm_map_lookup_entry(map, start, &temp_entry)) {
3209 		entry = temp_entry;
3210 		UVM_MAP_CLIP_START(map, entry, start);
3211 	} else {
3212 		entry = temp_entry->next;
3213 	}
3214 
3215 	/*
3216 	 * XXXJRT: disallow holes?
3217 	 */
3218 
3219 	while ((entry != &map->header) && (entry->start < end)) {
3220 		UVM_MAP_CLIP_END(map, entry, end);
3221 
3222 		switch (new_advice) {
3223 		case MADV_NORMAL:
3224 		case MADV_RANDOM:
3225 		case MADV_SEQUENTIAL:
3226 			/* nothing special here */
3227 			break;
3228 
3229 		default:
3230 			vm_map_unlock(map);
3231 			UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
3232 			return EINVAL;
3233 		}
3234 		entry->advice = new_advice;
3235 		entry = entry->next;
3236 	}
3237 
3238 	vm_map_unlock(map);
3239 	UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3240 	return 0;
3241 }
3242 
3243 /*
3244  * uvm_map_willneed: apply MADV_WILLNEED
3245  */
3246 
3247 int
3248 uvm_map_willneed(struct vm_map *map, vaddr_t start, vaddr_t end)
3249 {
3250 	struct vm_map_entry *entry;
3251 	UVMHIST_FUNC(__func__);
3252 	UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx)",
3253 	    (uintptr_t)map, start, end, 0);
3254 
3255 	vm_map_lock_read(map);
3256 	VM_MAP_RANGE_CHECK(map, start, end);
3257 	if (!uvm_map_lookup_entry(map, start, &entry)) {
3258 		entry = entry->next;
3259 	}
3260 	while (entry->start < end) {
3261 		struct vm_amap * const amap = entry->aref.ar_amap;
3262 		struct uvm_object * const uobj = entry->object.uvm_obj;
3263 
3264 		KASSERT(entry != &map->header);
3265 		KASSERT(start < entry->end);
3266 		/*
3267 		 * For now, we handle only the easy but commonly-requested case.
3268 		 * ie. start prefetching of backing uobj pages.
3269 		 *
3270 		 * XXX It might be useful to pmap_enter() the already-in-core
3271 		 * pages by inventing a "weak" mode for uvm_fault() which would
3272 		 * only do the PGO_LOCKED pgo_get().
3273 		 */
3274 		if (UVM_ET_ISOBJ(entry) && amap == NULL && uobj != NULL) {
3275 			off_t offset;
3276 			off_t size;
3277 
3278 			offset = entry->offset;
3279 			if (start < entry->start) {
3280 				offset += entry->start - start;
3281 			}
3282 			size = entry->offset + (entry->end - entry->start);
3283 			if (entry->end < end) {
3284 				size -= end - entry->end;
3285 			}
3286 			uvm_readahead(uobj, offset, size);
3287 		}
3288 		entry = entry->next;
3289 	}
3290 	vm_map_unlock_read(map);
3291 	UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3292 	return 0;
3293 }
3294 
3295 /*
3296  * uvm_map_pageable: sets the pageability of a range in a map.
3297  *
3298  * => wires map entries.  should not be used for transient page locking.
3299  *	for that, use uvm_fault_wire()/uvm_fault_unwire() (see uvm_vslock()).
3300  * => regions specified as not pageable require lock-down (wired) memory
3301  *	and page tables.
3302  * => map must never be read-locked
3303  * => if islocked is true, map is already write-locked
3304  * => we always unlock the map, since we must downgrade to a read-lock
3305  *	to call uvm_fault_wire()
3306  * => XXXCDC: check this and try and clean it up.
3307  */
3308 
3309 int
3310 uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end,
3311     bool new_pageable, int lockflags)
3312 {
3313 	struct vm_map_entry *entry, *start_entry, *failed_entry;
3314 	int rv;
3315 #ifdef DIAGNOSTIC
3316 	u_int timestamp_save;
3317 #endif
3318 	UVMHIST_FUNC(__func__);
3319 	UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx,new_pageable=%ju)",
3320 	    (uintptr_t)map, start, end, new_pageable);
3321 	KASSERT(map->flags & VM_MAP_PAGEABLE);
3322 
3323 	if ((lockflags & UVM_LK_ENTER) == 0)
3324 		vm_map_lock(map);
3325 	VM_MAP_RANGE_CHECK(map, start, end);
3326 
3327 	/*
3328 	 * only one pageability change may take place at one time, since
3329 	 * uvm_fault_wire assumes it will be called only once for each
3330 	 * wiring/unwiring.  therefore, we have to make sure we're actually
3331 	 * changing the pageability for the entire region.  we do so before
3332 	 * making any changes.
3333 	 */
3334 
3335 	if (uvm_map_lookup_entry(map, start, &start_entry) == false) {
3336 		if ((lockflags & UVM_LK_EXIT) == 0)
3337 			vm_map_unlock(map);
3338 
3339 		UVMHIST_LOG(maphist,"<- done (fault)",0,0,0,0);
3340 		return EFAULT;
3341 	}
3342 	entry = start_entry;
3343 
3344 	if (start == end) {		/* nothing required */
3345 		if ((lockflags & UVM_LK_EXIT) == 0)
3346 			vm_map_unlock(map);
3347 
3348 		UVMHIST_LOG(maphist,"<- done (nothing)",0,0,0,0);
3349 		return 0;
3350 	}
3351 
3352 	/*
3353 	 * handle wiring and unwiring separately.
3354 	 */
3355 
3356 	if (new_pageable) {		/* unwire */
3357 		UVM_MAP_CLIP_START(map, entry, start);
3358 
3359 		/*
3360 		 * unwiring.  first ensure that the range to be unwired is
3361 		 * really wired down and that there are no holes.
3362 		 */
3363 
3364 		while ((entry != &map->header) && (entry->start < end)) {
3365 			if (entry->wired_count == 0 ||
3366 			    (entry->end < end &&
3367 			     (entry->next == &map->header ||
3368 			      entry->next->start > entry->end))) {
3369 				if ((lockflags & UVM_LK_EXIT) == 0)
3370 					vm_map_unlock(map);
3371 				UVMHIST_LOG(maphist, "<- done (INVAL)",0,0,0,0);
3372 				return EINVAL;
3373 			}
3374 			entry = entry->next;
3375 		}
3376 
3377 		/*
3378 		 * POSIX 1003.1b - a single munlock call unlocks a region,
3379 		 * regardless of the number of mlock calls made on that
3380 		 * region.
3381 		 */
3382 
3383 		entry = start_entry;
3384 		while ((entry != &map->header) && (entry->start < end)) {
3385 			UVM_MAP_CLIP_END(map, entry, end);
3386 			if (VM_MAPENT_ISWIRED(entry))
3387 				uvm_map_entry_unwire(map, entry);
3388 			entry = entry->next;
3389 		}
3390 		if ((lockflags & UVM_LK_EXIT) == 0)
3391 			vm_map_unlock(map);
3392 		UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
3393 		return 0;
3394 	}
3395 
3396 	/*
3397 	 * wire case: in two passes [XXXCDC: ugly block of code here]
3398 	 *
3399 	 * 1: holding the write lock, we create any anonymous maps that need
3400 	 *    to be created.  then we clip each map entry to the region to
3401 	 *    be wired and increment its wiring count.
3402 	 *
3403 	 * 2: we downgrade to a read lock, and call uvm_fault_wire to fault
3404 	 *    in the pages for any newly wired area (wired_count == 1).
3405 	 *
3406 	 *    downgrading to a read lock for uvm_fault_wire avoids a possible
3407 	 *    deadlock with another thread that may have faulted on one of
3408 	 *    the pages to be wired (it would mark the page busy, blocking
3409 	 *    us, then in turn block on the map lock that we hold).  because
3410 	 *    of problems in the recursive lock package, we cannot upgrade
3411 	 *    to a write lock in vm_map_lookup.  thus, any actions that
3412 	 *    require the write lock must be done beforehand.  because we
3413 	 *    keep the read lock on the map, the copy-on-write status of the
3414 	 *    entries we modify here cannot change.
3415 	 */
3416 
3417 	while ((entry != &map->header) && (entry->start < end)) {
3418 		if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3419 
3420 			/*
3421 			 * perform actions of vm_map_lookup that need the
3422 			 * write lock on the map: create an anonymous map
3423 			 * for a copy-on-write region, or an anonymous map
3424 			 * for a zero-fill region.  (XXXCDC: submap case
3425 			 * ok?)
3426 			 */
3427 
3428 			if (!UVM_ET_ISSUBMAP(entry)) {  /* not submap */
3429 				if (UVM_ET_ISNEEDSCOPY(entry) &&
3430 				    ((entry->max_protection & VM_PROT_WRITE) ||
3431 				     (entry->object.uvm_obj == NULL))) {
3432 					amap_copy(map, entry, 0, start, end);
3433 					/* XXXCDC: wait OK? */
3434 				}
3435 			}
3436 		}
3437 		UVM_MAP_CLIP_START(map, entry, start);
3438 		UVM_MAP_CLIP_END(map, entry, end);
3439 		entry->wired_count++;
3440 
3441 		/*
3442 		 * Check for holes
3443 		 */
3444 
3445 		if (entry->protection == VM_PROT_NONE ||
3446 		    (entry->end < end &&
3447 		     (entry->next == &map->header ||
3448 		      entry->next->start > entry->end))) {
3449 
3450 			/*
3451 			 * found one.  amap creation actions do not need to
3452 			 * be undone, but the wired counts need to be restored.
3453 			 */
3454 
3455 			while (entry != &map->header && entry->end > start) {
3456 				entry->wired_count--;
3457 				entry = entry->prev;
3458 			}
3459 			if ((lockflags & UVM_LK_EXIT) == 0)
3460 				vm_map_unlock(map);
3461 			UVMHIST_LOG(maphist,"<- done (INVALID WIRE)",0,0,0,0);
3462 			return EINVAL;
3463 		}
3464 		entry = entry->next;
3465 	}
3466 
3467 	/*
3468 	 * Pass 2.
3469 	 */
3470 
3471 #ifdef DIAGNOSTIC
3472 	timestamp_save = map->timestamp;
3473 #endif
3474 	vm_map_busy(map);
3475 	vm_map_unlock(map);
3476 
3477 	rv = 0;
3478 	entry = start_entry;
3479 	while (entry != &map->header && entry->start < end) {
3480 		if (entry->wired_count == 1) {
3481 			rv = uvm_fault_wire(map, entry->start, entry->end,
3482 			    entry->max_protection, 1);
3483 			if (rv) {
3484 
3485 				/*
3486 				 * wiring failed.  break out of the loop.
3487 				 * we'll clean up the map below, once we
3488 				 * have a write lock again.
3489 				 */
3490 
3491 				break;
3492 			}
3493 		}
3494 		entry = entry->next;
3495 	}
3496 
3497 	if (rv) {	/* failed? */
3498 
3499 		/*
3500 		 * Get back to an exclusive (write) lock.
3501 		 */
3502 
3503 		vm_map_lock(map);
3504 		vm_map_unbusy(map);
3505 
3506 #ifdef DIAGNOSTIC
3507 		if (timestamp_save + 1 != map->timestamp)
3508 			panic("uvm_map_pageable: stale map");
3509 #endif
3510 
3511 		/*
3512 		 * first drop the wiring count on all the entries
3513 		 * which haven't actually been wired yet.
3514 		 */
3515 
3516 		failed_entry = entry;
3517 		while (entry != &map->header && entry->start < end) {
3518 			entry->wired_count--;
3519 			entry = entry->next;
3520 		}
3521 
3522 		/*
3523 		 * now, unwire all the entries that were successfully
3524 		 * wired above.
3525 		 */
3526 
3527 		entry = start_entry;
3528 		while (entry != failed_entry) {
3529 			entry->wired_count--;
3530 			if (VM_MAPENT_ISWIRED(entry) == 0)
3531 				uvm_map_entry_unwire(map, entry);
3532 			entry = entry->next;
3533 		}
3534 		if ((lockflags & UVM_LK_EXIT) == 0)
3535 			vm_map_unlock(map);
3536 		UVMHIST_LOG(maphist, "<- done (RV=%jd)", rv,0,0,0);
3537 		return (rv);
3538 	}
3539 
3540 	if ((lockflags & UVM_LK_EXIT) == 0) {
3541 		vm_map_unbusy(map);
3542 	} else {
3543 
3544 		/*
3545 		 * Get back to an exclusive (write) lock.
3546 		 */
3547 
3548 		vm_map_lock(map);
3549 		vm_map_unbusy(map);
3550 	}
3551 
3552 	UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
3553 	return 0;
3554 }
3555 
3556 /*
3557  * uvm_map_pageable_all: special case of uvm_map_pageable - affects
3558  * all mapped regions.
3559  *
3560  * => map must not be locked.
3561  * => if no flags are specified, all regions are unwired.
3562  * => XXXJRT: has some of the same problems as uvm_map_pageable() above.
3563  */
3564 
3565 int
3566 uvm_map_pageable_all(struct vm_map *map, int flags, vsize_t limit)
3567 {
3568 	struct vm_map_entry *entry, *failed_entry;
3569 	vsize_t size;
3570 	int rv;
3571 #ifdef DIAGNOSTIC
3572 	u_int timestamp_save;
3573 #endif
3574 	UVMHIST_FUNC(__func__);
3575 	UVMHIST_CALLARGS(maphist,"(map=%#jx,flags=%#jx)", (uintptr_t)map, flags,
3576 	    0, 0);
3577 
3578 	KASSERT(map->flags & VM_MAP_PAGEABLE);
3579 
3580 	vm_map_lock(map);
3581 
3582 	/*
3583 	 * handle wiring and unwiring separately.
3584 	 */
3585 
3586 	if (flags == 0) {			/* unwire */
3587 
3588 		/*
3589 		 * POSIX 1003.1b -- munlockall unlocks all regions,
3590 		 * regardless of how many times mlockall has been called.
3591 		 */
3592 
3593 		for (entry = map->header.next; entry != &map->header;
3594 		     entry = entry->next) {
3595 			if (VM_MAPENT_ISWIRED(entry))
3596 				uvm_map_entry_unwire(map, entry);
3597 		}
3598 		map->flags &= ~VM_MAP_WIREFUTURE;
3599 		vm_map_unlock(map);
3600 		UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
3601 		return 0;
3602 	}
3603 
3604 	if (flags & MCL_FUTURE) {
3605 
3606 		/*
3607 		 * must wire all future mappings; remember this.
3608 		 */
3609 
3610 		map->flags |= VM_MAP_WIREFUTURE;
3611 	}
3612 
3613 	if ((flags & MCL_CURRENT) == 0) {
3614 
3615 		/*
3616 		 * no more work to do!
3617 		 */
3618 
3619 		UVMHIST_LOG(maphist,"<- done (OK no wire)",0,0,0,0);
3620 		vm_map_unlock(map);
3621 		return 0;
3622 	}
3623 
3624 	/*
3625 	 * wire case: in three passes [XXXCDC: ugly block of code here]
3626 	 *
3627 	 * 1: holding the write lock, count all pages mapped by non-wired
3628 	 *    entries.  if this would cause us to go over our limit, we fail.
3629 	 *
3630 	 * 2: still holding the write lock, we create any anonymous maps that
3631 	 *    need to be created.  then we increment its wiring count.
3632 	 *
3633 	 * 3: we downgrade to a read lock, and call uvm_fault_wire to fault
3634 	 *    in the pages for any newly wired area (wired_count == 1).
3635 	 *
3636 	 *    downgrading to a read lock for uvm_fault_wire avoids a possible
3637 	 *    deadlock with another thread that may have faulted on one of
3638 	 *    the pages to be wired (it would mark the page busy, blocking
3639 	 *    us, then in turn block on the map lock that we hold).  because
3640 	 *    of problems in the recursive lock package, we cannot upgrade
3641 	 *    to a write lock in vm_map_lookup.  thus, any actions that
3642 	 *    require the write lock must be done beforehand.  because we
3643 	 *    keep the read lock on the map, the copy-on-write status of the
3644 	 *    entries we modify here cannot change.
3645 	 */
3646 
3647 	for (size = 0, entry = map->header.next; entry != &map->header;
3648 	     entry = entry->next) {
3649 		if (entry->protection != VM_PROT_NONE &&
3650 		    VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3651 			size += entry->end - entry->start;
3652 		}
3653 	}
3654 
3655 	if (atop(size) + uvmexp.wired > uvmexp.wiredmax) {
3656 		vm_map_unlock(map);
3657 		return ENOMEM;
3658 	}
3659 
3660 	if (limit != 0 &&
3661 	    (size + ptoa(pmap_wired_count(vm_map_pmap(map))) > limit)) {
3662 		vm_map_unlock(map);
3663 		return ENOMEM;
3664 	}
3665 
3666 	/*
3667 	 * Pass 2.
3668 	 */
3669 
3670 	for (entry = map->header.next; entry != &map->header;
3671 	     entry = entry->next) {
3672 		if (entry->protection == VM_PROT_NONE)
3673 			continue;
3674 		if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3675 
3676 			/*
3677 			 * perform actions of vm_map_lookup that need the
3678 			 * write lock on the map: create an anonymous map
3679 			 * for a copy-on-write region, or an anonymous map
3680 			 * for a zero-fill region.  (XXXCDC: submap case
3681 			 * ok?)
3682 			 */
3683 
3684 			if (!UVM_ET_ISSUBMAP(entry)) {	/* not submap */
3685 				if (UVM_ET_ISNEEDSCOPY(entry) &&
3686 				    ((entry->max_protection & VM_PROT_WRITE) ||
3687 				     (entry->object.uvm_obj == NULL))) {
3688 					amap_copy(map, entry, 0, entry->start,
3689 					    entry->end);
3690 					/* XXXCDC: wait OK? */
3691 				}
3692 			}
3693 		}
3694 		entry->wired_count++;
3695 	}
3696 
3697 	/*
3698 	 * Pass 3.
3699 	 */
3700 
3701 #ifdef DIAGNOSTIC
3702 	timestamp_save = map->timestamp;
3703 #endif
3704 	vm_map_busy(map);
3705 	vm_map_unlock(map);
3706 
3707 	rv = 0;
3708 	for (entry = map->header.next; entry != &map->header;
3709 	     entry = entry->next) {
3710 		if (entry->wired_count == 1) {
3711 			rv = uvm_fault_wire(map, entry->start, entry->end,
3712 			    entry->max_protection, 1);
3713 			if (rv) {
3714 
3715 				/*
3716 				 * wiring failed.  break out of the loop.
3717 				 * we'll clean up the map below, once we
3718 				 * have a write lock again.
3719 				 */
3720 
3721 				break;
3722 			}
3723 		}
3724 	}
3725 
3726 	if (rv) {
3727 
3728 		/*
3729 		 * Get back an exclusive (write) lock.
3730 		 */
3731 
3732 		vm_map_lock(map);
3733 		vm_map_unbusy(map);
3734 
3735 #ifdef DIAGNOSTIC
3736 		if (timestamp_save + 1 != map->timestamp)
3737 			panic("uvm_map_pageable_all: stale map");
3738 #endif
3739 
3740 		/*
3741 		 * first drop the wiring count on all the entries
3742 		 * which haven't actually been wired yet.
3743 		 *
3744 		 * Skip VM_PROT_NONE entries like we did above.
3745 		 */
3746 
3747 		failed_entry = entry;
3748 		for (/* nothing */; entry != &map->header;
3749 		     entry = entry->next) {
3750 			if (entry->protection == VM_PROT_NONE)
3751 				continue;
3752 			entry->wired_count--;
3753 		}
3754 
3755 		/*
3756 		 * now, unwire all the entries that were successfully
3757 		 * wired above.
3758 		 *
3759 		 * Skip VM_PROT_NONE entries like we did above.
3760 		 */
3761 
3762 		for (entry = map->header.next; entry != failed_entry;
3763 		     entry = entry->next) {
3764 			if (entry->protection == VM_PROT_NONE)
3765 				continue;
3766 			entry->wired_count--;
3767 			if (VM_MAPENT_ISWIRED(entry))
3768 				uvm_map_entry_unwire(map, entry);
3769 		}
3770 		vm_map_unlock(map);
3771 		UVMHIST_LOG(maphist,"<- done (RV=%jd)", rv,0,0,0);
3772 		return (rv);
3773 	}
3774 
3775 	vm_map_unbusy(map);
3776 
3777 	UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
3778 	return 0;
3779 }
3780 
3781 /*
3782  * uvm_map_clean: clean out a map range
3783  *
3784  * => valid flags:
3785  *   if (flags & PGO_CLEANIT): dirty pages are cleaned first
3786  *   if (flags & PGO_SYNCIO): dirty pages are written synchronously
3787  *   if (flags & PGO_DEACTIVATE): any cached pages are deactivated after clean
3788  *   if (flags & PGO_FREE): any cached pages are freed after clean
3789  * => returns an error if any part of the specified range isn't mapped
3790  * => never a need to flush amap layer since the anonymous memory has
3791  *	no permanent home, but may deactivate pages there
3792  * => called from sys_msync() and sys_madvise()
3793  * => caller must not write-lock map (read OK).
3794  * => we may sleep while cleaning if SYNCIO [with map read-locked]
3795  */
3796 
3797 int
3798 uvm_map_clean(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
3799 {
3800 	struct vm_map_entry *current, *entry;
3801 	struct uvm_object *uobj;
3802 	struct vm_amap *amap;
3803 	struct vm_anon *anon;
3804 	struct vm_page *pg;
3805 	vaddr_t offset;
3806 	vsize_t size;
3807 	voff_t uoff;
3808 	int error, refs;
3809 	UVMHIST_FUNC(__func__);
3810 	UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx,flags=%#jx)",
3811 	    (uintptr_t)map, start, end, flags);
3812 
3813 	KASSERT((flags & (PGO_FREE|PGO_DEACTIVATE)) !=
3814 		(PGO_FREE|PGO_DEACTIVATE));
3815 
3816 	vm_map_lock_read(map);
3817 	VM_MAP_RANGE_CHECK(map, start, end);
3818 	if (uvm_map_lookup_entry(map, start, &entry) == false) {
3819 		vm_map_unlock_read(map);
3820 		return EFAULT;
3821 	}
3822 
3823 	/*
3824 	 * Make a first pass to check for holes and wiring problems.
3825 	 */
3826 
3827 	for (current = entry; current->start < end; current = current->next) {
3828 		if (UVM_ET_ISSUBMAP(current)) {
3829 			vm_map_unlock_read(map);
3830 			return EINVAL;
3831 		}
3832 		if ((flags & PGO_FREE) != 0 && VM_MAPENT_ISWIRED(entry)) {
3833 			vm_map_unlock_read(map);
3834 			return EBUSY;
3835 		}
3836 		if (end <= current->end) {
3837 			break;
3838 		}
3839 		if (current->end != current->next->start) {
3840 			vm_map_unlock_read(map);
3841 			return EFAULT;
3842 		}
3843 	}
3844 
3845 	error = 0;
3846 	for (current = entry; start < end; current = current->next) {
3847 		amap = current->aref.ar_amap;	/* upper layer */
3848 		uobj = current->object.uvm_obj;	/* lower layer */
3849 		KASSERT(start >= current->start);
3850 
3851 		/*
3852 		 * No amap cleaning necessary if:
3853 		 *
3854 		 *	(1) There's no amap.
3855 		 *
3856 		 *	(2) We're not deactivating or freeing pages.
3857 		 */
3858 
3859 		if (amap == NULL || (flags & (PGO_DEACTIVATE|PGO_FREE)) == 0)
3860 			goto flush_object;
3861 
3862 		offset = start - current->start;
3863 		size = MIN(end, current->end) - start;
3864 
3865 		amap_lock(amap, RW_WRITER);
3866 		for ( ; size != 0; size -= PAGE_SIZE, offset += PAGE_SIZE) {
3867 			anon = amap_lookup(&current->aref, offset);
3868 			if (anon == NULL)
3869 				continue;
3870 
3871 			KASSERT(anon->an_lock == amap->am_lock);
3872 			pg = anon->an_page;
3873 			if (pg == NULL) {
3874 				continue;
3875 			}
3876 			if (pg->flags & PG_BUSY) {
3877 				continue;
3878 			}
3879 
3880 			switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
3881 
3882 			/*
3883 			 * In these first 3 cases, we just deactivate the page.
3884 			 */
3885 
3886 			case PGO_CLEANIT|PGO_FREE:
3887 			case PGO_CLEANIT|PGO_DEACTIVATE:
3888 			case PGO_DEACTIVATE:
3889  deactivate_it:
3890 				/*
3891 				 * skip the page if it's loaned or wired,
3892 				 * since it shouldn't be on a paging queue
3893 				 * at all in these cases.
3894 				 */
3895 
3896 				if (pg->loan_count != 0 ||
3897 				    pg->wire_count != 0) {
3898 					continue;
3899 				}
3900 				KASSERT(pg->uanon == anon);
3901 				uvm_pagelock(pg);
3902 				uvm_pagedeactivate(pg);
3903 				uvm_pageunlock(pg);
3904 				continue;
3905 
3906 			case PGO_FREE:
3907 
3908 				/*
3909 				 * If there are multiple references to
3910 				 * the amap, just deactivate the page.
3911 				 */
3912 
3913 				if (amap_refs(amap) > 1)
3914 					goto deactivate_it;
3915 
3916 				/* skip the page if it's wired */
3917 				if (pg->wire_count != 0) {
3918 					continue;
3919 				}
3920 				amap_unadd(&current->aref, offset);
3921 				refs = --anon->an_ref;
3922 				if (refs == 0) {
3923 					uvm_anfree(anon);
3924 				}
3925 				continue;
3926 			}
3927 		}
3928 		amap_unlock(amap);
3929 
3930  flush_object:
3931 		/*
3932 		 * flush pages if we've got a valid backing object.
3933 		 * note that we must always clean object pages before
3934 		 * freeing them since otherwise we could reveal stale
3935 		 * data from files.
3936 		 */
3937 
3938 		uoff = current->offset + (start - current->start);
3939 		size = MIN(end, current->end) - start;
3940 		if (uobj != NULL) {
3941 			rw_enter(uobj->vmobjlock, RW_WRITER);
3942 			if (uobj->pgops->pgo_put != NULL)
3943 				error = (uobj->pgops->pgo_put)(uobj, uoff,
3944 				    uoff + size, flags | PGO_CLEANIT);
3945 			else
3946 				error = 0;
3947 		}
3948 		start += size;
3949 	}
3950 	vm_map_unlock_read(map);
3951 	return (error);
3952 }
3953 
3954 
3955 /*
3956  * uvm_map_checkprot: check protection in map
3957  *
3958  * => must allow specified protection in a fully allocated region.
3959  * => map must be read or write locked by caller.
3960  */
3961 
3962 bool
3963 uvm_map_checkprot(struct vm_map *map, vaddr_t start, vaddr_t end,
3964     vm_prot_t protection)
3965 {
3966 	struct vm_map_entry *entry;
3967 	struct vm_map_entry *tmp_entry;
3968 
3969 	if (!uvm_map_lookup_entry(map, start, &tmp_entry)) {
3970 		return (false);
3971 	}
3972 	entry = tmp_entry;
3973 	while (start < end) {
3974 		if (entry == &map->header) {
3975 			return (false);
3976 		}
3977 
3978 		/*
3979 		 * no holes allowed
3980 		 */
3981 
3982 		if (start < entry->start) {
3983 			return (false);
3984 		}
3985 
3986 		/*
3987 		 * check protection associated with entry
3988 		 */
3989 
3990 		if ((entry->protection & protection) != protection) {
3991 			return (false);
3992 		}
3993 		start = entry->end;
3994 		entry = entry->next;
3995 	}
3996 	return (true);
3997 }
3998 
3999 /*
4000  * uvmspace_alloc: allocate a vmspace structure.
4001  *
4002  * - structure includes vm_map and pmap
4003  * - XXX: no locking on this structure
4004  * - refcnt set to 1, rest must be init'd by caller
4005  */
4006 struct vmspace *
4007 uvmspace_alloc(vaddr_t vmin, vaddr_t vmax, bool topdown)
4008 {
4009 	struct vmspace *vm;
4010 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
4011 
4012 	vm = pool_cache_get(&uvm_vmspace_cache, PR_WAITOK);
4013 	uvmspace_init(vm, NULL, vmin, vmax, topdown);
4014 	UVMHIST_LOG(maphist,"<- done (vm=%#jx)", (uintptr_t)vm, 0, 0, 0);
4015 	return (vm);
4016 }
4017 
4018 /*
4019  * uvmspace_init: initialize a vmspace structure.
4020  *
4021  * - XXX: no locking on this structure
4022  * - refcnt set to 1, rest must be init'd by caller
4023  */
4024 void
4025 uvmspace_init(struct vmspace *vm, struct pmap *pmap, vaddr_t vmin,
4026     vaddr_t vmax, bool topdown)
4027 {
4028 	UVMHIST_FUNC(__func__);
4029 	UVMHIST_CALLARGS(maphist, "(vm=%#jx, pmap=%#jx, vmin=%#jx, vmax=%#jx",
4030 	    (uintptr_t)vm, (uintptr_t)pmap, vmin, vmax);
4031 	UVMHIST_LOG(maphist, "   topdown=%ju)", topdown, 0, 0, 0);
4032 
4033 	memset(vm, 0, sizeof(*vm));
4034 	uvm_map_setup(&vm->vm_map, vmin, vmax, VM_MAP_PAGEABLE
4035 	    | (topdown ? VM_MAP_TOPDOWN : 0)
4036 	    );
4037 	if (pmap)
4038 		pmap_reference(pmap);
4039 	else
4040 		pmap = pmap_create();
4041 	vm->vm_map.pmap = pmap;
4042 	vm->vm_refcnt = 1;
4043 	UVMHIST_LOG(maphist,"<- done",0,0,0,0);
4044 }
4045 
4046 /*
4047  * uvmspace_share: share a vmspace between two processes
4048  *
4049  * - used for vfork, threads(?)
4050  */
4051 
4052 void
4053 uvmspace_share(struct proc *p1, struct proc *p2)
4054 {
4055 
4056 	uvmspace_addref(p1->p_vmspace);
4057 	p2->p_vmspace = p1->p_vmspace;
4058 }
4059 
4060 #if 0
4061 
4062 /*
4063  * uvmspace_unshare: ensure that process "p" has its own, unshared, vmspace
4064  *
4065  * - XXX: no locking on vmspace
4066  */
4067 
4068 void
4069 uvmspace_unshare(struct lwp *l)
4070 {
4071 	struct proc *p = l->l_proc;
4072 	struct vmspace *nvm, *ovm = p->p_vmspace;
4073 
4074 	if (ovm->vm_refcnt == 1)
4075 		/* nothing to do: vmspace isn't shared in the first place */
4076 		return;
4077 
4078 	/* make a new vmspace, still holding old one */
4079 	nvm = uvmspace_fork(ovm);
4080 
4081 	kpreempt_disable();
4082 	pmap_deactivate(l);		/* unbind old vmspace */
4083 	p->p_vmspace = nvm;
4084 	pmap_activate(l);		/* switch to new vmspace */
4085 	kpreempt_enable();
4086 
4087 	uvmspace_free(ovm);		/* drop reference to old vmspace */
4088 }
4089 
4090 #endif
4091 
4092 
4093 /*
4094  * uvmspace_spawn: a new process has been spawned and needs a vmspace
4095  */
4096 
4097 void
4098 uvmspace_spawn(struct lwp *l, vaddr_t start, vaddr_t end, bool topdown)
4099 {
4100 	struct proc *p = l->l_proc;
4101 	struct vmspace *nvm;
4102 
4103 #ifdef __HAVE_CPU_VMSPACE_EXEC
4104 	cpu_vmspace_exec(l, start, end);
4105 #endif
4106 
4107 	nvm = uvmspace_alloc(start, end, topdown);
4108 	kpreempt_disable();
4109 	p->p_vmspace = nvm;
4110 	pmap_activate(l);
4111 	kpreempt_enable();
4112 }
4113 
4114 /*
4115  * uvmspace_exec: the process wants to exec a new program
4116  */
4117 
4118 void
4119 uvmspace_exec(struct lwp *l, vaddr_t start, vaddr_t end, bool topdown)
4120 {
4121 	struct proc *p = l->l_proc;
4122 	struct vmspace *nvm, *ovm = p->p_vmspace;
4123 	struct vm_map *map;
4124 	int flags;
4125 
4126 	KASSERT(ovm != NULL);
4127 #ifdef __HAVE_CPU_VMSPACE_EXEC
4128 	cpu_vmspace_exec(l, start, end);
4129 #endif
4130 
4131 	map = &ovm->vm_map;
4132 	/*
4133 	 * see if more than one process is using this vmspace...
4134 	 */
4135 
4136 	if (ovm->vm_refcnt == 1
4137 	    && topdown == ((ovm->vm_map.flags & VM_MAP_TOPDOWN) != 0)) {
4138 
4139 		/*
4140 		 * if p is the only process using its vmspace then we can safely
4141 		 * recycle that vmspace for the program that is being exec'd.
4142 		 * But only if TOPDOWN matches the requested value for the new
4143 		 * vm space!
4144 		 */
4145 
4146 		/*
4147 		 * SYSV SHM semantics require us to kill all segments on an exec
4148 		 */
4149 		if (uvm_shmexit && ovm->vm_shm)
4150 			(*uvm_shmexit)(ovm);
4151 
4152 		/*
4153 		 * POSIX 1003.1b -- "lock future mappings" is revoked
4154 		 * when a process execs another program image.
4155 		 */
4156 
4157 		map->flags &= ~VM_MAP_WIREFUTURE;
4158 
4159 		/*
4160 		 * now unmap the old program.
4161 		 *
4162 		 * XXX set VM_MAP_DYING for the duration, so pmap_update()
4163 		 * is not called until the pmap has been totally cleared out
4164 		 * after pmap_remove_all(), or it can confuse some pmap
4165 		 * implementations.  it would be nice to handle this by
4166 		 * deferring the pmap_update() while it is known the address
4167 		 * space is not visible to any user LWP other than curlwp,
4168 		 * but there isn't an elegant way of inferring that right
4169 		 * now.
4170 		 */
4171 
4172 		flags = pmap_remove_all(map->pmap) ? UVM_FLAG_VAONLY : 0;
4173 		map->flags |= VM_MAP_DYING;
4174 		uvm_unmap1(map, vm_map_min(map), vm_map_max(map), flags);
4175 		map->flags &= ~VM_MAP_DYING;
4176 		pmap_update(map->pmap);
4177 		KASSERT(map->header.prev == &map->header);
4178 		KASSERT(map->nentries == 0);
4179 
4180 		/*
4181 		 * resize the map
4182 		 */
4183 
4184 		vm_map_setmin(map, start);
4185 		vm_map_setmax(map, end);
4186 	} else {
4187 
4188 		/*
4189 		 * p's vmspace is being shared, so we can't reuse it for p since
4190 		 * it is still being used for others.   allocate a new vmspace
4191 		 * for p
4192 		 */
4193 
4194 		nvm = uvmspace_alloc(start, end, topdown);
4195 
4196 		/*
4197 		 * install new vmspace and drop our ref to the old one.
4198 		 */
4199 
4200 		kpreempt_disable();
4201 		pmap_deactivate(l);
4202 		p->p_vmspace = nvm;
4203 		pmap_activate(l);
4204 		kpreempt_enable();
4205 
4206 		uvmspace_free(ovm);
4207 	}
4208 }
4209 
4210 /*
4211  * uvmspace_addref: add a reference to a vmspace.
4212  */
4213 
4214 void
4215 uvmspace_addref(struct vmspace *vm)
4216 {
4217 
4218 	KASSERT((vm->vm_map.flags & VM_MAP_DYING) == 0);
4219 	KASSERT(vm->vm_refcnt > 0);
4220 	atomic_inc_uint(&vm->vm_refcnt);
4221 }
4222 
4223 /*
4224  * uvmspace_free: free a vmspace data structure
4225  */
4226 
4227 void
4228 uvmspace_free(struct vmspace *vm)
4229 {
4230 	struct vm_map_entry *dead_entries;
4231 	struct vm_map *map = &vm->vm_map;
4232 	int flags;
4233 
4234 	UVMHIST_FUNC(__func__);
4235 	UVMHIST_CALLARGS(maphist,"(vm=%#jx) ref=%jd", (uintptr_t)vm,
4236 	    vm->vm_refcnt, 0, 0);
4237 	if (atomic_dec_uint_nv(&vm->vm_refcnt) > 0)
4238 		return;
4239 
4240 	/*
4241 	 * at this point, there should be no other references to the map.
4242 	 * delete all of the mappings, then destroy the pmap.
4243 	 */
4244 
4245 	map->flags |= VM_MAP_DYING;
4246 	flags = pmap_remove_all(map->pmap) ? UVM_FLAG_VAONLY : 0;
4247 
4248 	/* Get rid of any SYSV shared memory segments. */
4249 	if (uvm_shmexit && vm->vm_shm != NULL)
4250 		(*uvm_shmexit)(vm);
4251 
4252 	if (map->nentries) {
4253 		uvm_unmap_remove(map, vm_map_min(map), vm_map_max(map),
4254 		    &dead_entries, flags);
4255 		if (dead_entries != NULL)
4256 			uvm_unmap_detach(dead_entries, 0);
4257 	}
4258 	KASSERT(map->nentries == 0);
4259 	KASSERT(map->size == 0);
4260 
4261 	mutex_destroy(&map->misc_lock);
4262 	rw_destroy(&map->lock);
4263 	cv_destroy(&map->cv);
4264 	pmap_destroy(map->pmap);
4265 	pool_cache_put(&uvm_vmspace_cache, vm);
4266 }
4267 
4268 static struct vm_map_entry *
4269 uvm_mapent_clone(struct vm_map *new_map, struct vm_map_entry *old_entry,
4270     int flags)
4271 {
4272 	struct vm_map_entry *new_entry;
4273 
4274 	new_entry = uvm_mapent_alloc(new_map, 0);
4275 	/* old_entry -> new_entry */
4276 	uvm_mapent_copy(old_entry, new_entry);
4277 
4278 	/* new pmap has nothing wired in it */
4279 	new_entry->wired_count = 0;
4280 
4281 	/*
4282 	 * gain reference to object backing the map (can't
4283 	 * be a submap, already checked this case).
4284 	 */
4285 
4286 	if (new_entry->aref.ar_amap)
4287 		uvm_map_reference_amap(new_entry, flags);
4288 
4289 	if (new_entry->object.uvm_obj &&
4290 	    new_entry->object.uvm_obj->pgops->pgo_reference)
4291 		new_entry->object.uvm_obj->pgops->pgo_reference(
4292 			new_entry->object.uvm_obj);
4293 
4294 	/* insert entry at end of new_map's entry list */
4295 	uvm_map_entry_link(new_map, new_map->header.prev,
4296 	    new_entry);
4297 
4298 	return new_entry;
4299 }
4300 
4301 /*
4302  * share the mapping: this means we want the old and
4303  * new entries to share amaps and backing objects.
4304  */
4305 static void
4306 uvm_mapent_forkshared(struct vm_map *new_map, struct vm_map *old_map,
4307     struct vm_map_entry *old_entry)
4308 {
4309 	/*
4310 	 * if the old_entry needs a new amap (due to prev fork)
4311 	 * then we need to allocate it now so that we have
4312 	 * something we own to share with the new_entry.   [in
4313 	 * other words, we need to clear needs_copy]
4314 	 */
4315 
4316 	if (UVM_ET_ISNEEDSCOPY(old_entry)) {
4317 		/* get our own amap, clears needs_copy */
4318 		amap_copy(old_map, old_entry, AMAP_COPY_NOCHUNK,
4319 		    0, 0);
4320 		/* XXXCDC: WAITOK??? */
4321 	}
4322 
4323 	uvm_mapent_clone(new_map, old_entry, AMAP_SHARED);
4324 }
4325 
4326 
4327 static void
4328 uvm_mapent_forkcopy(struct vm_map *new_map, struct vm_map *old_map,
4329     struct vm_map_entry *old_entry)
4330 {
4331 	struct vm_map_entry *new_entry;
4332 
4333 	/*
4334 	 * copy-on-write the mapping (using mmap's
4335 	 * MAP_PRIVATE semantics)
4336 	 *
4337 	 * allocate new_entry, adjust reference counts.
4338 	 * (note that new references are read-only).
4339 	 */
4340 
4341 	new_entry = uvm_mapent_clone(new_map, old_entry, 0);
4342 
4343 	new_entry->etype |=
4344 	    (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
4345 
4346 	/*
4347 	 * the new entry will need an amap.  it will either
4348 	 * need to be copied from the old entry or created
4349 	 * from scratch (if the old entry does not have an
4350 	 * amap).  can we defer this process until later
4351 	 * (by setting "needs_copy") or do we need to copy
4352 	 * the amap now?
4353 	 *
4354 	 * we must copy the amap now if any of the following
4355 	 * conditions hold:
4356 	 * 1. the old entry has an amap and that amap is
4357 	 *    being shared.  this means that the old (parent)
4358 	 *    process is sharing the amap with another
4359 	 *    process.  if we do not clear needs_copy here
4360 	 *    we will end up in a situation where both the
4361 	 *    parent and child process are refering to the
4362 	 *    same amap with "needs_copy" set.  if the
4363 	 *    parent write-faults, the fault routine will
4364 	 *    clear "needs_copy" in the parent by allocating
4365 	 *    a new amap.   this is wrong because the
4366 	 *    parent is supposed to be sharing the old amap
4367 	 *    and the new amap will break that.
4368 	 *
4369 	 * 2. if the old entry has an amap and a non-zero
4370 	 *    wire count then we are going to have to call
4371 	 *    amap_cow_now to avoid page faults in the
4372 	 *    parent process.   since amap_cow_now requires
4373 	 *    "needs_copy" to be clear we might as well
4374 	 *    clear it here as well.
4375 	 *
4376 	 */
4377 
4378 	if (old_entry->aref.ar_amap != NULL) {
4379 		if ((amap_flags(old_entry->aref.ar_amap) & AMAP_SHARED) != 0 ||
4380 		    VM_MAPENT_ISWIRED(old_entry)) {
4381 
4382 			amap_copy(new_map, new_entry,
4383 			    AMAP_COPY_NOCHUNK, 0, 0);
4384 			/* XXXCDC: M_WAITOK ... ok? */
4385 		}
4386 	}
4387 
4388 	/*
4389 	 * if the parent's entry is wired down, then the
4390 	 * parent process does not want page faults on
4391 	 * access to that memory.  this means that we
4392 	 * cannot do copy-on-write because we can't write
4393 	 * protect the old entry.   in this case we
4394 	 * resolve all copy-on-write faults now, using
4395 	 * amap_cow_now.   note that we have already
4396 	 * allocated any needed amap (above).
4397 	 */
4398 
4399 	if (VM_MAPENT_ISWIRED(old_entry)) {
4400 
4401 		/*
4402 		 * resolve all copy-on-write faults now
4403 		 * (note that there is nothing to do if
4404 		 * the old mapping does not have an amap).
4405 		 */
4406 		if (old_entry->aref.ar_amap)
4407 			amap_cow_now(new_map, new_entry);
4408 
4409 	} else {
4410 		/*
4411 		 * setup mappings to trigger copy-on-write faults
4412 		 * we must write-protect the parent if it has
4413 		 * an amap and it is not already "needs_copy"...
4414 		 * if it is already "needs_copy" then the parent
4415 		 * has already been write-protected by a previous
4416 		 * fork operation.
4417 		 */
4418 		if (old_entry->aref.ar_amap &&
4419 		    !UVM_ET_ISNEEDSCOPY(old_entry)) {
4420 			if (old_entry->max_protection & VM_PROT_WRITE) {
4421 #ifdef __HAVE_UNLOCKED_PMAP /* XXX temporary */
4422 				uvm_map_lock_entry(old_entry, RW_WRITER);
4423 #else
4424 				uvm_map_lock_entry(old_entry, RW_READER);
4425 #endif
4426 				pmap_protect(old_map->pmap,
4427 				    old_entry->start, old_entry->end,
4428 				    old_entry->protection & ~VM_PROT_WRITE);
4429 				uvm_map_unlock_entry(old_entry);
4430 			}
4431 			old_entry->etype |= UVM_ET_NEEDSCOPY;
4432 		}
4433 	}
4434 }
4435 
4436 /*
4437  * zero the mapping: the new entry will be zero initialized
4438  */
4439 static void
4440 uvm_mapent_forkzero(struct vm_map *new_map, struct vm_map *old_map,
4441     struct vm_map_entry *old_entry)
4442 {
4443 	struct vm_map_entry *new_entry;
4444 
4445 	new_entry = uvm_mapent_clone(new_map, old_entry, 0);
4446 
4447 	new_entry->etype |=
4448 	    (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
4449 
4450 	if (new_entry->aref.ar_amap) {
4451 		uvm_map_unreference_amap(new_entry, 0);
4452 		new_entry->aref.ar_pageoff = 0;
4453 		new_entry->aref.ar_amap = NULL;
4454 	}
4455 
4456 	if (UVM_ET_ISOBJ(new_entry)) {
4457 		if (new_entry->object.uvm_obj->pgops->pgo_detach)
4458 			new_entry->object.uvm_obj->pgops->pgo_detach(
4459 			    new_entry->object.uvm_obj);
4460 		new_entry->object.uvm_obj = NULL;
4461 		new_entry->etype &= ~UVM_ET_OBJ;
4462 	}
4463 }
4464 
4465 /*
4466  *   F O R K   -   m a i n   e n t r y   p o i n t
4467  */
4468 /*
4469  * uvmspace_fork: fork a process' main map
4470  *
4471  * => create a new vmspace for child process from parent.
4472  * => parent's map must not be locked.
4473  */
4474 
4475 struct vmspace *
4476 uvmspace_fork(struct vmspace *vm1)
4477 {
4478 	struct vmspace *vm2;
4479 	struct vm_map *old_map = &vm1->vm_map;
4480 	struct vm_map *new_map;
4481 	struct vm_map_entry *old_entry;
4482 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
4483 
4484 	vm_map_lock(old_map);
4485 
4486 	vm2 = uvmspace_alloc(vm_map_min(old_map), vm_map_max(old_map),
4487 	    vm1->vm_map.flags & VM_MAP_TOPDOWN);
4488 	memcpy(&vm2->vm_startcopy, &vm1->vm_startcopy,
4489 	    (char *) (vm1 + 1) - (char *) &vm1->vm_startcopy);
4490 	new_map = &vm2->vm_map;		  /* XXX */
4491 
4492 	old_entry = old_map->header.next;
4493 	new_map->size = old_map->size;
4494 
4495 	/*
4496 	 * go entry-by-entry
4497 	 */
4498 
4499 	while (old_entry != &old_map->header) {
4500 
4501 		/*
4502 		 * first, some sanity checks on the old entry
4503 		 */
4504 
4505 		KASSERT(!UVM_ET_ISSUBMAP(old_entry));
4506 		KASSERT(UVM_ET_ISCOPYONWRITE(old_entry) ||
4507 			!UVM_ET_ISNEEDSCOPY(old_entry));
4508 
4509 		switch (old_entry->inheritance) {
4510 		case MAP_INHERIT_NONE:
4511 			/*
4512 			 * drop the mapping, modify size
4513 			 */
4514 			new_map->size -= old_entry->end - old_entry->start;
4515 			break;
4516 
4517 		case MAP_INHERIT_SHARE:
4518 			uvm_mapent_forkshared(new_map, old_map, old_entry);
4519 			break;
4520 
4521 		case MAP_INHERIT_COPY:
4522 			uvm_mapent_forkcopy(new_map, old_map, old_entry);
4523 			break;
4524 
4525 		case MAP_INHERIT_ZERO:
4526 			uvm_mapent_forkzero(new_map, old_map, old_entry);
4527 			break;
4528 		default:
4529 			KASSERT(0);
4530 			break;
4531 		}
4532 		old_entry = old_entry->next;
4533 	}
4534 
4535 	pmap_update(old_map->pmap);
4536 	vm_map_unlock(old_map);
4537 
4538 	if (uvm_shmfork && vm1->vm_shm)
4539 		(*uvm_shmfork)(vm1, vm2);
4540 
4541 #ifdef PMAP_FORK
4542 	pmap_fork(vm1->vm_map.pmap, vm2->vm_map.pmap);
4543 #endif
4544 
4545 	UVMHIST_LOG(maphist,"<- done",0,0,0,0);
4546 	return (vm2);
4547 }
4548 
4549 
4550 /*
4551  * uvm_mapent_trymerge: try to merge an entry with its neighbors.
4552  *
4553  * => called with map locked.
4554  * => return non zero if successfully merged.
4555  */
4556 
4557 int
4558 uvm_mapent_trymerge(struct vm_map *map, struct vm_map_entry *entry, int flags)
4559 {
4560 	struct uvm_object *uobj;
4561 	struct vm_map_entry *next;
4562 	struct vm_map_entry *prev;
4563 	vsize_t size;
4564 	int merged = 0;
4565 	bool copying;
4566 	int newetype;
4567 
4568 	if (entry->aref.ar_amap != NULL) {
4569 		return 0;
4570 	}
4571 	if ((entry->flags & UVM_MAP_NOMERGE) != 0) {
4572 		return 0;
4573 	}
4574 
4575 	uobj = entry->object.uvm_obj;
4576 	size = entry->end - entry->start;
4577 	copying = (flags & UVM_MERGE_COPYING) != 0;
4578 	newetype = copying ? (entry->etype & ~UVM_ET_NEEDSCOPY) : entry->etype;
4579 
4580 	next = entry->next;
4581 	if (next != &map->header &&
4582 	    next->start == entry->end &&
4583 	    ((copying && next->aref.ar_amap != NULL &&
4584 	    amap_refs(next->aref.ar_amap) == 1) ||
4585 	    (!copying && next->aref.ar_amap == NULL)) &&
4586 	    UVM_ET_ISCOMPATIBLE(next, newetype,
4587 	    uobj, entry->flags, entry->protection,
4588 	    entry->max_protection, entry->inheritance, entry->advice,
4589 	    entry->wired_count) &&
4590 	    (uobj == NULL || entry->offset + size == next->offset)) {
4591 		int error;
4592 
4593 		if (copying) {
4594 			error = amap_extend(next, size,
4595 			    AMAP_EXTEND_NOWAIT|AMAP_EXTEND_BACKWARDS);
4596 		} else {
4597 			error = 0;
4598 		}
4599 		if (error == 0) {
4600 			if (uobj) {
4601 				if (uobj->pgops->pgo_detach) {
4602 					uobj->pgops->pgo_detach(uobj);
4603 				}
4604 			}
4605 
4606 			entry->end = next->end;
4607 			clear_hints(map, next);
4608 			uvm_map_entry_unlink(map, next);
4609 			if (copying) {
4610 				entry->aref = next->aref;
4611 				entry->etype &= ~UVM_ET_NEEDSCOPY;
4612 			}
4613 			uvm_map_check(map, "trymerge forwardmerge");
4614 			uvm_mapent_free(next);
4615 			merged++;
4616 		}
4617 	}
4618 
4619 	prev = entry->prev;
4620 	if (prev != &map->header &&
4621 	    prev->end == entry->start &&
4622 	    ((copying && !merged && prev->aref.ar_amap != NULL &&
4623 	    amap_refs(prev->aref.ar_amap) == 1) ||
4624 	    (!copying && prev->aref.ar_amap == NULL)) &&
4625 	    UVM_ET_ISCOMPATIBLE(prev, newetype,
4626 	    uobj, entry->flags, entry->protection,
4627 	    entry->max_protection, entry->inheritance, entry->advice,
4628 	    entry->wired_count) &&
4629 	    (uobj == NULL ||
4630 	    prev->offset + prev->end - prev->start == entry->offset)) {
4631 		int error;
4632 
4633 		if (copying) {
4634 			error = amap_extend(prev, size,
4635 			    AMAP_EXTEND_NOWAIT|AMAP_EXTEND_FORWARDS);
4636 		} else {
4637 			error = 0;
4638 		}
4639 		if (error == 0) {
4640 			if (uobj) {
4641 				if (uobj->pgops->pgo_detach) {
4642 					uobj->pgops->pgo_detach(uobj);
4643 				}
4644 				entry->offset = prev->offset;
4645 			}
4646 
4647 			entry->start = prev->start;
4648 			clear_hints(map, prev);
4649 			uvm_map_entry_unlink(map, prev);
4650 			if (copying) {
4651 				entry->aref = prev->aref;
4652 				entry->etype &= ~UVM_ET_NEEDSCOPY;
4653 			}
4654 			uvm_map_check(map, "trymerge backmerge");
4655 			uvm_mapent_free(prev);
4656 			merged++;
4657 		}
4658 	}
4659 
4660 	return merged;
4661 }
4662 
4663 /*
4664  * uvm_map_setup: init map
4665  *
4666  * => map must not be in service yet.
4667  */
4668 
4669 void
4670 uvm_map_setup(struct vm_map *map, vaddr_t vmin, vaddr_t vmax, int flags)
4671 {
4672 
4673 	rb_tree_init(&map->rb_tree, &uvm_map_tree_ops);
4674 	map->header.next = map->header.prev = &map->header;
4675 	map->nentries = 0;
4676 	map->size = 0;
4677 	map->ref_count = 1;
4678 	vm_map_setmin(map, vmin);
4679 	vm_map_setmax(map, vmax);
4680 	map->flags = flags;
4681 	map->first_free = &map->header;
4682 	map->hint = &map->header;
4683 	map->timestamp = 0;
4684 	map->busy = NULL;
4685 
4686 	rw_init(&map->lock);
4687 	cv_init(&map->cv, "vm_map");
4688 	mutex_init(&map->misc_lock, MUTEX_DRIVER, IPL_NONE);
4689 }
4690 
4691 /*
4692  *   U N M A P   -   m a i n   e n t r y   p o i n t
4693  */
4694 
4695 /*
4696  * uvm_unmap1: remove mappings from a vm_map (from "start" up to "stop")
4697  *
4698  * => caller must check alignment and size
4699  * => map must be unlocked (we will lock it)
4700  * => flags is UVM_FLAG_QUANTUM or 0.
4701  */
4702 
4703 void
4704 uvm_unmap1(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
4705 {
4706 	struct vm_map_entry *dead_entries;
4707 	UVMHIST_FUNC(__func__);
4708 	UVMHIST_CALLARGS(maphist, "  (map=%#jx, start=%#jx, end=%#jx)",
4709 	    (uintptr_t)map, start, end, 0);
4710 
4711 	KASSERTMSG(start < end,
4712 	    "%s: map %p: start %#jx < end %#jx", __func__, map,
4713 	    (uintmax_t)start, (uintmax_t)end);
4714 	if (map == kernel_map) {
4715 		LOCKDEBUG_MEM_CHECK((void *)start, end - start);
4716 	}
4717 
4718 	/*
4719 	 * work now done by helper functions.   wipe the pmap's and then
4720 	 * detach from the dead entries...
4721 	 */
4722 	vm_map_lock(map);
4723 	uvm_unmap_remove(map, start, end, &dead_entries, flags);
4724 	vm_map_unlock(map);
4725 
4726 	if (dead_entries != NULL)
4727 		uvm_unmap_detach(dead_entries, 0);
4728 
4729 	UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
4730 }
4731 
4732 
4733 /*
4734  * uvm_map_reference: add reference to a map
4735  *
4736  * => map need not be locked
4737  */
4738 
4739 void
4740 uvm_map_reference(struct vm_map *map)
4741 {
4742 
4743 	atomic_inc_uint(&map->ref_count);
4744 }
4745 
4746 void
4747 uvm_map_lock_entry(struct vm_map_entry *entry, krw_t op)
4748 {
4749 
4750 	if (entry->aref.ar_amap != NULL) {
4751 		amap_lock(entry->aref.ar_amap, op);
4752 	}
4753 	if (UVM_ET_ISOBJ(entry)) {
4754 		rw_enter(entry->object.uvm_obj->vmobjlock, op);
4755 	}
4756 }
4757 
4758 void
4759 uvm_map_unlock_entry(struct vm_map_entry *entry)
4760 {
4761 
4762 	if (UVM_ET_ISOBJ(entry)) {
4763 		rw_exit(entry->object.uvm_obj->vmobjlock);
4764 	}
4765 	if (entry->aref.ar_amap != NULL) {
4766 		amap_unlock(entry->aref.ar_amap);
4767 	}
4768 }
4769 
4770 #define	UVM_VOADDR_TYPE_MASK	0x3UL
4771 #define	UVM_VOADDR_TYPE_UOBJ	0x1UL
4772 #define	UVM_VOADDR_TYPE_ANON	0x2UL
4773 #define	UVM_VOADDR_OBJECT_MASK	~UVM_VOADDR_TYPE_MASK
4774 
4775 #define	UVM_VOADDR_GET_TYPE(voa)					\
4776 	((voa)->object & UVM_VOADDR_TYPE_MASK)
4777 #define	UVM_VOADDR_GET_OBJECT(voa)					\
4778 	((voa)->object & UVM_VOADDR_OBJECT_MASK)
4779 #define	UVM_VOADDR_SET_OBJECT(voa, obj, type)				\
4780 do {									\
4781 	KASSERT(((uintptr_t)(obj) & UVM_VOADDR_TYPE_MASK) == 0);	\
4782 	(voa)->object = ((uintptr_t)(obj)) | (type);			\
4783 } while (/*CONSTCOND*/0)
4784 
4785 #define	UVM_VOADDR_GET_UOBJ(voa)					\
4786 	((struct uvm_object *)UVM_VOADDR_GET_OBJECT(voa))
4787 #define	UVM_VOADDR_SET_UOBJ(voa, uobj)					\
4788 	UVM_VOADDR_SET_OBJECT(voa, uobj, UVM_VOADDR_TYPE_UOBJ)
4789 
4790 #define	UVM_VOADDR_GET_ANON(voa)					\
4791 	((struct vm_anon *)UVM_VOADDR_GET_OBJECT(voa))
4792 #define	UVM_VOADDR_SET_ANON(voa, anon)					\
4793 	UVM_VOADDR_SET_OBJECT(voa, anon, UVM_VOADDR_TYPE_ANON)
4794 
4795 /*
4796  * uvm_voaddr_acquire: returns the virtual object address corresponding
4797  * to the specified virtual address.
4798  *
4799  * => resolves COW so the true page identity is tracked.
4800  *
4801  * => acquires a reference on the page's owner (uvm_object or vm_anon)
4802  */
4803 bool
4804 uvm_voaddr_acquire(struct vm_map * const map, vaddr_t const va,
4805     struct uvm_voaddr * const voaddr)
4806 {
4807 	struct vm_map_entry *entry;
4808 	struct vm_anon *anon = NULL;
4809 	bool result = false;
4810 	bool exclusive = false;
4811 	void (*unlock_fn)(struct vm_map *);
4812 
4813 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
4814 	UVMHIST_LOG(maphist,"(map=%#jx,va=%jx)", (uintptr_t)map, va, 0, 0);
4815 
4816 	const vaddr_t start = trunc_page(va);
4817 	const vaddr_t end = round_page(va+1);
4818 
4819  lookup_again:
4820 	if (__predict_false(exclusive)) {
4821 		vm_map_lock(map);
4822 		unlock_fn = vm_map_unlock;
4823 	} else {
4824 		vm_map_lock_read(map);
4825 		unlock_fn = vm_map_unlock_read;
4826 	}
4827 
4828 	if (__predict_false(!uvm_map_lookup_entry(map, start, &entry))) {
4829 		unlock_fn(map);
4830 		UVMHIST_LOG(maphist,"<- done (no entry)",0,0,0,0);
4831 		return false;
4832 	}
4833 
4834 	if (__predict_false(entry->protection == VM_PROT_NONE)) {
4835 		unlock_fn(map);
4836 		UVMHIST_LOG(maphist,"<- done (PROT_NONE)",0,0,0,0);
4837 		return false;
4838 	}
4839 
4840 	/*
4841 	 * We have a fast path for the common case of "no COW resolution
4842 	 * needed" whereby we have taken a read lock on the map and if
4843 	 * we don't encounter any need to create a vm_anon then great!
4844 	 * But if we do, we loop around again, instead taking an exclusive
4845 	 * lock so that we can perform the fault.
4846 	 *
4847 	 * In the event that we have to resolve the fault, we do nearly the
4848 	 * same work as uvm_map_pageable() does:
4849 	 *
4850 	 * 1: holding the write lock, we create any anonymous maps that need
4851 	 *    to be created.  however, we do NOT need to clip the map entries
4852 	 *    in this case.
4853 	 *
4854 	 * 2: we downgrade to a read lock, and call uvm_fault_wire to fault
4855 	 *    in the page (assuming the entry is not already wired).  this
4856 	 *    is done because we need the vm_anon to be present.
4857 	 */
4858 	if (__predict_true(!VM_MAPENT_ISWIRED(entry))) {
4859 
4860 		bool need_fault = false;
4861 
4862 		/*
4863 		 * perform the action of vm_map_lookup that need the
4864 		 * write lock on the map: create an anonymous map for
4865 		 * a copy-on-write region, or an anonymous map for
4866 		 * a zero-fill region.
4867 		 */
4868 		if (__predict_false(UVM_ET_ISSUBMAP(entry))) {
4869 			unlock_fn(map);
4870 			UVMHIST_LOG(maphist,"<- done (submap)",0,0,0,0);
4871 			return false;
4872 		}
4873 		if (__predict_false(UVM_ET_ISNEEDSCOPY(entry) &&
4874 		    ((entry->max_protection & VM_PROT_WRITE) ||
4875 		     (entry->object.uvm_obj == NULL)))) {
4876 			if (!exclusive) {
4877 				/* need to take the slow path */
4878 				KASSERT(unlock_fn == vm_map_unlock_read);
4879 				vm_map_unlock_read(map);
4880 				exclusive = true;
4881 				goto lookup_again;
4882 			}
4883 			need_fault = true;
4884 			amap_copy(map, entry, 0, start, end);
4885 			/* XXXCDC: wait OK? */
4886 		}
4887 
4888 		/*
4889 		 * do a quick check to see if the fault has already
4890 		 * been resolved to the upper layer.
4891 		 */
4892 		if (__predict_true(entry->aref.ar_amap != NULL &&
4893 				   need_fault == false)) {
4894 			amap_lock(entry->aref.ar_amap, RW_WRITER);
4895 			anon = amap_lookup(&entry->aref, start - entry->start);
4896 			if (__predict_true(anon != NULL)) {
4897 				/* amap unlocked below */
4898 				goto found_anon;
4899 			}
4900 			amap_unlock(entry->aref.ar_amap);
4901 			need_fault = true;
4902 		}
4903 
4904 		/*
4905 		 * we predict this test as false because if we reach
4906 		 * this point, then we are likely dealing with a
4907 		 * shared memory region backed by a uvm_object, in
4908 		 * which case a fault to create the vm_anon is not
4909 		 * necessary.
4910 		 */
4911 		if (__predict_false(need_fault)) {
4912 			if (exclusive) {
4913 				vm_map_busy(map);
4914 				vm_map_unlock(map);
4915 				unlock_fn = vm_map_unbusy;
4916 			}
4917 
4918 			if (uvm_fault_wire(map, start, end,
4919 					   entry->max_protection, 1)) {
4920 				/* wiring failed */
4921 				unlock_fn(map);
4922 				UVMHIST_LOG(maphist,"<- done (wire failed)",
4923 					    0,0,0,0);
4924 				return false;
4925 			}
4926 
4927 			/*
4928 			 * now that we have resolved the fault, we can unwire
4929 			 * the page.
4930 			 */
4931 			if (exclusive) {
4932 				vm_map_lock(map);
4933 				vm_map_unbusy(map);
4934 				unlock_fn = vm_map_unlock;
4935 			}
4936 
4937 			uvm_fault_unwire_locked(map, start, end);
4938 		}
4939 	}
4940 
4941 	/* check the upper layer */
4942 	if (entry->aref.ar_amap) {
4943 		amap_lock(entry->aref.ar_amap, RW_WRITER);
4944 		anon = amap_lookup(&entry->aref, start - entry->start);
4945 		if (anon) {
4946  found_anon:		KASSERT(anon->an_lock == entry->aref.ar_amap->am_lock);
4947 			anon->an_ref++;
4948 			rw_obj_hold(anon->an_lock);
4949 			KASSERT(anon->an_ref != 0);
4950 			UVM_VOADDR_SET_ANON(voaddr, anon);
4951 			voaddr->offset = va & PAGE_MASK;
4952 			result = true;
4953 		}
4954 		amap_unlock(entry->aref.ar_amap);
4955 	}
4956 
4957 	/* check the lower layer */
4958 	if (!result && UVM_ET_ISOBJ(entry)) {
4959 		struct uvm_object *uobj = entry->object.uvm_obj;
4960 
4961 		KASSERT(uobj != NULL);
4962 		(*uobj->pgops->pgo_reference)(uobj);
4963 		UVM_VOADDR_SET_UOBJ(voaddr, uobj);
4964 		voaddr->offset = entry->offset + (va - entry->start);
4965 		result = true;
4966 	}
4967 
4968 	unlock_fn(map);
4969 
4970 	if (result) {
4971 		UVMHIST_LOG(maphist,
4972 		    "<- done OK (type=%jd,owner=#%jx,offset=%jx)",
4973 		    UVM_VOADDR_GET_TYPE(voaddr),
4974 		    UVM_VOADDR_GET_OBJECT(voaddr),
4975 		    voaddr->offset, 0);
4976 	} else {
4977 		UVMHIST_LOG(maphist,"<- done (failed)",0,0,0,0);
4978 	}
4979 
4980 	return result;
4981 }
4982 
4983 /*
4984  * uvm_voaddr_release: release the references held by the
4985  * vitual object address.
4986  */
4987 void
4988 uvm_voaddr_release(struct uvm_voaddr * const voaddr)
4989 {
4990 
4991 	switch (UVM_VOADDR_GET_TYPE(voaddr)) {
4992 	case UVM_VOADDR_TYPE_UOBJ: {
4993 		struct uvm_object * const uobj = UVM_VOADDR_GET_UOBJ(voaddr);
4994 
4995 		KASSERT(uobj != NULL);
4996 		KASSERT(uobj->pgops->pgo_detach != NULL);
4997 		(*uobj->pgops->pgo_detach)(uobj);
4998 		break;
4999 	    }
5000 	case UVM_VOADDR_TYPE_ANON: {
5001 		struct vm_anon * const anon = UVM_VOADDR_GET_ANON(voaddr);
5002 		krwlock_t *lock;
5003 
5004 		KASSERT(anon != NULL);
5005 		rw_enter((lock = anon->an_lock), RW_WRITER);
5006 	    	KASSERT(anon->an_ref > 0);
5007 		if (--anon->an_ref == 0) {
5008 			uvm_anfree(anon);
5009 		}
5010 		rw_exit(lock);
5011 		rw_obj_free(lock);
5012 	    	break;
5013 	    }
5014 	default:
5015 		panic("uvm_voaddr_release: bad type");
5016 	}
5017 	memset(voaddr, 0, sizeof(*voaddr));
5018 }
5019 
5020 /*
5021  * uvm_voaddr_compare: compare two uvm_voaddr objects.
5022  *
5023  * => memcmp() semantics
5024  */
5025 int
5026 uvm_voaddr_compare(const struct uvm_voaddr * const voaddr1,
5027     const struct uvm_voaddr * const voaddr2)
5028 {
5029 	const uintptr_t type1 = UVM_VOADDR_GET_TYPE(voaddr1);
5030 	const uintptr_t type2 = UVM_VOADDR_GET_TYPE(voaddr2);
5031 
5032 	KASSERT(type1 == UVM_VOADDR_TYPE_UOBJ ||
5033 		type1 == UVM_VOADDR_TYPE_ANON);
5034 
5035 	KASSERT(type2 == UVM_VOADDR_TYPE_UOBJ ||
5036 		type2 == UVM_VOADDR_TYPE_ANON);
5037 
5038 	if (type1 < type2)
5039 		return -1;
5040 	if (type1 > type2)
5041 		return 1;
5042 
5043 	const uintptr_t addr1 = UVM_VOADDR_GET_OBJECT(voaddr1);
5044 	const uintptr_t addr2 = UVM_VOADDR_GET_OBJECT(voaddr2);
5045 
5046 	if (addr1 < addr2)
5047 		return -1;
5048 	if (addr1 > addr2)
5049 		return 1;
5050 
5051 	if (voaddr1->offset < voaddr2->offset)
5052 		return -1;
5053 	if (voaddr1->offset > voaddr2->offset)
5054 		return 1;
5055 
5056 	return 0;
5057 }
5058 
5059 #if defined(DDB) || defined(DEBUGPRINT)
5060 
5061 /*
5062  * uvm_map_printit: actually prints the map
5063  */
5064 
5065 void
5066 uvm_map_printit(struct vm_map *map, bool full,
5067     void (*pr)(const char *, ...))
5068 {
5069 	struct vm_map_entry *entry;
5070 
5071 	(*pr)("MAP %p: [%#lx->%#lx]\n", map, vm_map_min(map),
5072 	    vm_map_max(map));
5073 	(*pr)("\t#ent=%d, sz=%d, ref=%d, version=%d, flags=%#x\n",
5074 	    map->nentries, map->size, map->ref_count, map->timestamp,
5075 	    map->flags);
5076 	(*pr)("\tpmap=%p(resident=%ld, wired=%ld)\n", map->pmap,
5077 	    pmap_resident_count(map->pmap), pmap_wired_count(map->pmap));
5078 	if (!full)
5079 		return;
5080 	for (entry = map->header.next; entry != &map->header;
5081 	    entry = entry->next) {
5082 		(*pr)(" - %p: %#lx->%#lx: obj=%p/%#llx, amap=%p/%d\n",
5083 		    entry, entry->start, entry->end, entry->object.uvm_obj,
5084 		    (long long)entry->offset, entry->aref.ar_amap,
5085 		    entry->aref.ar_pageoff);
5086 		(*pr)(
5087 		    "\tsubmap=%c, cow=%c, nc=%c, prot(max)=%d/%d, inh=%d, "
5088 		    "wc=%d, adv=%d\n",
5089 		    (entry->etype & UVM_ET_SUBMAP) ? 'T' : 'F',
5090 		    (entry->etype & UVM_ET_COPYONWRITE) ? 'T' : 'F',
5091 		    (entry->etype & UVM_ET_NEEDSCOPY) ? 'T' : 'F',
5092 		    entry->protection, entry->max_protection,
5093 		    entry->inheritance, entry->wired_count, entry->advice);
5094 	}
5095 }
5096 
5097 void
5098 uvm_whatis(uintptr_t addr, void (*pr)(const char *, ...))
5099 {
5100 	struct vm_map *map;
5101 
5102 	for (map = kernel_map;;) {
5103 		struct vm_map_entry *entry;
5104 
5105 		if (!uvm_map_lookup_entry_bytree(map, (vaddr_t)addr, &entry)) {
5106 			break;
5107 		}
5108 		(*pr)("%p is %p+%zu from VMMAP %p\n",
5109 		    (void *)addr, (void *)entry->start,
5110 		    (size_t)(addr - (uintptr_t)entry->start), map);
5111 		if (!UVM_ET_ISSUBMAP(entry)) {
5112 			break;
5113 		}
5114 		map = entry->object.sub_map;
5115 	}
5116 }
5117 
5118 #endif /* DDB || DEBUGPRINT */
5119 
5120 #ifndef __USER_VA0_IS_SAFE
5121 static int
5122 sysctl_user_va0_disable(SYSCTLFN_ARGS)
5123 {
5124 	struct sysctlnode node;
5125 	int t, error;
5126 
5127 	node = *rnode;
5128 	node.sysctl_data = &t;
5129 	t = user_va0_disable;
5130 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
5131 	if (error || newp == NULL)
5132 		return (error);
5133 
5134 	if (!t && user_va0_disable &&
5135 	    kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MAP_VA_ZERO, 0,
5136 	    NULL, NULL, NULL))
5137 		return EPERM;
5138 
5139 	user_va0_disable = !!t;
5140 	return 0;
5141 }
5142 #endif
5143 
5144 static int
5145 fill_vmentry(struct lwp *l, struct proc *p, struct kinfo_vmentry *kve,
5146     struct vm_map *m, struct vm_map_entry *e)
5147 {
5148 #ifndef _RUMPKERNEL
5149 	int error;
5150 
5151 	memset(kve, 0, sizeof(*kve));
5152 	KASSERT(e != NULL);
5153 	if (UVM_ET_ISOBJ(e)) {
5154 		struct uvm_object *uobj = e->object.uvm_obj;
5155 		KASSERT(uobj != NULL);
5156 		kve->kve_ref_count = uobj->uo_refs;
5157 		kve->kve_count = uobj->uo_npages;
5158 		if (UVM_OBJ_IS_VNODE(uobj)) {
5159 			struct vattr va;
5160 			struct vnode *vp = (struct vnode *)uobj;
5161 			vn_lock(vp, LK_SHARED | LK_RETRY);
5162 			error = VOP_GETATTR(vp, &va, l->l_cred);
5163 			VOP_UNLOCK(vp);
5164 			kve->kve_type = KVME_TYPE_VNODE;
5165 			if (error == 0) {
5166 				kve->kve_vn_size = vp->v_size;
5167 				kve->kve_vn_type = (int)vp->v_type;
5168 				kve->kve_vn_mode = va.va_mode;
5169 				kve->kve_vn_rdev = va.va_rdev;
5170 				kve->kve_vn_fileid = va.va_fileid;
5171 				kve->kve_vn_fsid = va.va_fsid;
5172 				error = vnode_to_path(kve->kve_path,
5173 				    sizeof(kve->kve_path) / 2, vp, l, p);
5174 #ifdef DIAGNOSTIC
5175 				if (error)
5176 					printf("%s: vp %p error %d\n", __func__,
5177 						vp, error);
5178 #endif
5179 			}
5180 		} else if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
5181 			kve->kve_type = KVME_TYPE_KERN;
5182 		} else if (UVM_OBJ_IS_DEVICE(uobj)) {
5183 			kve->kve_type = KVME_TYPE_DEVICE;
5184 		} else if (UVM_OBJ_IS_AOBJ(uobj)) {
5185 			kve->kve_type = KVME_TYPE_ANON;
5186 		} else {
5187 			kve->kve_type = KVME_TYPE_OBJECT;
5188 		}
5189 	} else if (UVM_ET_ISSUBMAP(e)) {
5190 		struct vm_map *map = e->object.sub_map;
5191 		KASSERT(map != NULL);
5192 		kve->kve_ref_count = map->ref_count;
5193 		kve->kve_count = map->nentries;
5194 		kve->kve_type = KVME_TYPE_SUBMAP;
5195 	} else
5196 		kve->kve_type = KVME_TYPE_UNKNOWN;
5197 
5198 	kve->kve_start = e->start;
5199 	kve->kve_end = e->end;
5200 	kve->kve_offset = e->offset;
5201 	kve->kve_wired_count = e->wired_count;
5202 	kve->kve_inheritance = e->inheritance;
5203 	kve->kve_attributes = 0; /* unused */
5204 	kve->kve_advice = e->advice;
5205 #define PROT(p) (((p) & VM_PROT_READ) ? KVME_PROT_READ : 0) | \
5206 	(((p) & VM_PROT_WRITE) ? KVME_PROT_WRITE : 0) | \
5207 	(((p) & VM_PROT_EXECUTE) ? KVME_PROT_EXEC : 0)
5208 	kve->kve_protection = PROT(e->protection);
5209 	kve->kve_max_protection = PROT(e->max_protection);
5210 	kve->kve_flags |= (e->etype & UVM_ET_COPYONWRITE)
5211 	    ? KVME_FLAG_COW : 0;
5212 	kve->kve_flags |= (e->etype & UVM_ET_NEEDSCOPY)
5213 	    ? KVME_FLAG_NEEDS_COPY : 0;
5214 	kve->kve_flags |= (m->flags & VM_MAP_TOPDOWN)
5215 	    ? KVME_FLAG_GROWS_DOWN : KVME_FLAG_GROWS_UP;
5216 	kve->kve_flags |= (m->flags & VM_MAP_PAGEABLE)
5217 	    ? KVME_FLAG_PAGEABLE : 0;
5218 #endif
5219 	return 0;
5220 }
5221 
5222 static int
5223 fill_vmentries(struct lwp *l, pid_t pid, u_int elem_size, void *oldp,
5224     size_t *oldlenp)
5225 {
5226 	int error;
5227 	struct proc *p;
5228 	struct kinfo_vmentry *vme;
5229 	struct vmspace *vm;
5230 	struct vm_map *map;
5231 	struct vm_map_entry *entry;
5232 	char *dp;
5233 	size_t count, vmesize;
5234 
5235 	if (elem_size == 0 || elem_size > 2 * sizeof(*vme))
5236 		return EINVAL;
5237 
5238 	if (oldp) {
5239 		if (*oldlenp > 10UL * 1024UL * 1024UL)
5240 			return E2BIG;
5241 		count = *oldlenp / elem_size;
5242 		if (count == 0)
5243 			return ENOMEM;
5244 		vmesize = count * sizeof(*vme);
5245 	} else
5246 		vmesize = 0;
5247 
5248 	if ((error = proc_find_locked(l, &p, pid)) != 0)
5249 		return error;
5250 
5251 	vme = NULL;
5252 	count = 0;
5253 
5254 	if ((error = proc_vmspace_getref(p, &vm)) != 0)
5255 		goto out;
5256 
5257 	map = &vm->vm_map;
5258 	vm_map_lock_read(map);
5259 
5260 	dp = oldp;
5261 	if (oldp)
5262 		vme = kmem_alloc(vmesize, KM_SLEEP);
5263 	for (entry = map->header.next; entry != &map->header;
5264 	    entry = entry->next) {
5265 		if (oldp && (dp - (char *)oldp) < vmesize) {
5266 			error = fill_vmentry(l, p, &vme[count], map, entry);
5267 			if (error)
5268 				goto out;
5269 			dp += elem_size;
5270 		}
5271 		count++;
5272 	}
5273 	vm_map_unlock_read(map);
5274 	uvmspace_free(vm);
5275 
5276 out:
5277 	if (pid != -1)
5278 		mutex_exit(p->p_lock);
5279 	if (error == 0) {
5280 		const u_int esize = uimin(sizeof(*vme), elem_size);
5281 		dp = oldp;
5282 		for (size_t i = 0; i < count; i++) {
5283 			if (oldp && (dp - (char *)oldp) < vmesize) {
5284 				error = sysctl_copyout(l, &vme[i], dp, esize);
5285 				if (error)
5286 					break;
5287 				dp += elem_size;
5288 			} else
5289 				break;
5290 		}
5291 		count *= elem_size;
5292 		if (oldp != NULL && *oldlenp < count)
5293 			error = ENOSPC;
5294 		*oldlenp = count;
5295 	}
5296 	if (vme)
5297 		kmem_free(vme, vmesize);
5298 	return error;
5299 }
5300 
5301 static int
5302 sysctl_vmproc(SYSCTLFN_ARGS)
5303 {
5304 	int error;
5305 
5306 	if (namelen == 1 && name[0] == CTL_QUERY)
5307 		return (sysctl_query(SYSCTLFN_CALL(rnode)));
5308 
5309 	if (namelen == 0)
5310 		return EINVAL;
5311 
5312 	switch (name[0]) {
5313 	case VM_PROC_MAP:
5314 		if (namelen != 3)
5315 			return EINVAL;
5316 		sysctl_unlock();
5317 		error = fill_vmentries(l, name[1], name[2], oldp, oldlenp);
5318 		sysctl_relock();
5319 		return error;
5320 	default:
5321 		return EINVAL;
5322 	}
5323 }
5324 
5325 SYSCTL_SETUP(sysctl_uvmmap_setup, "sysctl uvmmap setup")
5326 {
5327 
5328 	sysctl_createv(clog, 0, NULL, NULL,
5329 		       CTLFLAG_PERMANENT,
5330 		       CTLTYPE_STRUCT, "proc",
5331 		       SYSCTL_DESCR("Process vm information"),
5332 		       sysctl_vmproc, 0, NULL, 0,
5333 		       CTL_VM, VM_PROC, CTL_EOL);
5334 #ifndef __USER_VA0_IS_SAFE
5335         sysctl_createv(clog, 0, NULL, NULL,
5336                        CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
5337                        CTLTYPE_INT, "user_va0_disable",
5338                        SYSCTL_DESCR("Disable VA 0"),
5339                        sysctl_user_va0_disable, 0, &user_va0_disable, 0,
5340                        CTL_VM, CTL_CREATE, CTL_EOL);
5341 #endif
5342 }
5343