xref: /netbsd-src/sys/uvm/uvm_map.c (revision 946379e7b37692fc43f68eb0d1c10daa0a7f3b6c)
1 /*	$NetBSD: uvm_map.c,v 1.336 2015/11/05 00:10:48 pgoyette Exp $	*/
2 
3 /*
4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
5  * Copyright (c) 1991, 1993, The Regents of the University of California.
6  *
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * The Mach Operating System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)vm_map.c    8.3 (Berkeley) 1/12/94
37  * from: Id: uvm_map.c,v 1.1.2.27 1998/02/07 01:16:54 chs Exp
38  *
39  *
40  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
41  * All rights reserved.
42  *
43  * Permission to use, copy, modify and distribute this software and
44  * its documentation is hereby granted, provided that both the copyright
45  * notice and this permission notice appear in all copies of the
46  * software, derivative works or modified versions, and any portions
47  * thereof, and that both notices appear in supporting documentation.
48  *
49  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
50  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
51  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52  *
53  * Carnegie Mellon requests users of this software to return to
54  *
55  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
56  *  School of Computer Science
57  *  Carnegie Mellon University
58  *  Pittsburgh PA 15213-3890
59  *
60  * any improvements or extensions that they make and grant Carnegie the
61  * rights to redistribute these changes.
62  */
63 
64 /*
65  * uvm_map.c: uvm map operations
66  */
67 
68 #include <sys/cdefs.h>
69 __KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.336 2015/11/05 00:10:48 pgoyette Exp $");
70 
71 #include "opt_ddb.h"
72 #include "opt_uvmhist.h"
73 #include "opt_uvm.h"
74 #include "opt_sysv.h"
75 
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/mman.h>
79 #include <sys/proc.h>
80 #include <sys/pool.h>
81 #include <sys/kernel.h>
82 #include <sys/mount.h>
83 #include <sys/vnode.h>
84 #include <sys/filedesc.h>
85 #include <sys/lockdebug.h>
86 #include <sys/atomic.h>
87 #include <sys/sysctl.h>
88 #ifndef __USER_VA0_IS_SAFE
89 #include <sys/kauth.h>
90 #include "opt_user_va0_disable_default.h"
91 #endif
92 
93 #include <sys/shm.h>
94 
95 #include <uvm/uvm.h>
96 #include <uvm/uvm_readahead.h>
97 
98 #if defined(DDB) || defined(DEBUGPRINT)
99 #include <uvm/uvm_ddb.h>
100 #endif
101 
102 #ifdef UVMHIST
103 static struct kern_history_ent maphistbuf[100];
104 UVMHIST_DEFINE(maphist) = UVMHIST_INITIALIZER(maphist, maphistbuf);
105 #endif
106 
107 #if !defined(UVMMAP_COUNTERS)
108 
109 #define	UVMMAP_EVCNT_DEFINE(name)	/* nothing */
110 #define UVMMAP_EVCNT_INCR(ev)		/* nothing */
111 #define UVMMAP_EVCNT_DECR(ev)		/* nothing */
112 
113 #else /* defined(UVMMAP_NOCOUNTERS) */
114 
115 #include <sys/evcnt.h>
116 #define	UVMMAP_EVCNT_DEFINE(name) \
117 struct evcnt uvmmap_evcnt_##name = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, \
118     "uvmmap", #name); \
119 EVCNT_ATTACH_STATIC(uvmmap_evcnt_##name);
120 #define	UVMMAP_EVCNT_INCR(ev)		uvmmap_evcnt_##ev.ev_count++
121 #define	UVMMAP_EVCNT_DECR(ev)		uvmmap_evcnt_##ev.ev_count--
122 
123 #endif /* defined(UVMMAP_NOCOUNTERS) */
124 
125 UVMMAP_EVCNT_DEFINE(ubackmerge)
126 UVMMAP_EVCNT_DEFINE(uforwmerge)
127 UVMMAP_EVCNT_DEFINE(ubimerge)
128 UVMMAP_EVCNT_DEFINE(unomerge)
129 UVMMAP_EVCNT_DEFINE(kbackmerge)
130 UVMMAP_EVCNT_DEFINE(kforwmerge)
131 UVMMAP_EVCNT_DEFINE(kbimerge)
132 UVMMAP_EVCNT_DEFINE(knomerge)
133 UVMMAP_EVCNT_DEFINE(map_call)
134 UVMMAP_EVCNT_DEFINE(mlk_call)
135 UVMMAP_EVCNT_DEFINE(mlk_hint)
136 UVMMAP_EVCNT_DEFINE(mlk_list)
137 UVMMAP_EVCNT_DEFINE(mlk_tree)
138 UVMMAP_EVCNT_DEFINE(mlk_treeloop)
139 UVMMAP_EVCNT_DEFINE(mlk_listloop)
140 
141 const char vmmapbsy[] = "vmmapbsy";
142 
143 /*
144  * cache for vmspace structures.
145  */
146 
147 static struct pool_cache uvm_vmspace_cache;
148 
149 /*
150  * cache for dynamically-allocated map entries.
151  */
152 
153 static struct pool_cache uvm_map_entry_cache;
154 
155 #ifdef PMAP_GROWKERNEL
156 /*
157  * This global represents the end of the kernel virtual address
158  * space.  If we want to exceed this, we must grow the kernel
159  * virtual address space dynamically.
160  *
161  * Note, this variable is locked by kernel_map's lock.
162  */
163 vaddr_t uvm_maxkaddr;
164 #endif
165 
166 #ifndef __USER_VA0_IS_SAFE
167 #ifndef __USER_VA0_DISABLE_DEFAULT
168 #define __USER_VA0_DISABLE_DEFAULT 1
169 #endif
170 #ifdef USER_VA0_DISABLE_DEFAULT /* kernel config option overrides */
171 #undef __USER_VA0_DISABLE_DEFAULT
172 #define __USER_VA0_DISABLE_DEFAULT USER_VA0_DISABLE_DEFAULT
173 #endif
174 static int user_va0_disable = __USER_VA0_DISABLE_DEFAULT;
175 #endif
176 
177 /*
178  * macros
179  */
180 
181 /*
182  * UVM_ET_ISCOMPATIBLE: check some requirements for map entry merging
183  */
184 extern struct vm_map *pager_map;
185 
186 #define	UVM_ET_ISCOMPATIBLE(ent, type, uobj, meflags, \
187     prot, maxprot, inh, adv, wire) \
188 	((ent)->etype == (type) && \
189 	(((ent)->flags ^ (meflags)) & (UVM_MAP_NOMERGE)) == 0 && \
190 	(ent)->object.uvm_obj == (uobj) && \
191 	(ent)->protection == (prot) && \
192 	(ent)->max_protection == (maxprot) && \
193 	(ent)->inheritance == (inh) && \
194 	(ent)->advice == (adv) && \
195 	(ent)->wired_count == (wire))
196 
197 /*
198  * uvm_map_entry_link: insert entry into a map
199  *
200  * => map must be locked
201  */
202 #define uvm_map_entry_link(map, after_where, entry) do { \
203 	uvm_mapent_check(entry); \
204 	(map)->nentries++; \
205 	(entry)->prev = (after_where); \
206 	(entry)->next = (after_where)->next; \
207 	(entry)->prev->next = (entry); \
208 	(entry)->next->prev = (entry); \
209 	uvm_rb_insert((map), (entry)); \
210 } while (/*CONSTCOND*/ 0)
211 
212 /*
213  * uvm_map_entry_unlink: remove entry from a map
214  *
215  * => map must be locked
216  */
217 #define uvm_map_entry_unlink(map, entry) do { \
218 	KASSERT((entry) != (map)->first_free); \
219 	KASSERT((entry) != (map)->hint); \
220 	uvm_mapent_check(entry); \
221 	(map)->nentries--; \
222 	(entry)->next->prev = (entry)->prev; \
223 	(entry)->prev->next = (entry)->next; \
224 	uvm_rb_remove((map), (entry)); \
225 } while (/*CONSTCOND*/ 0)
226 
227 /*
228  * SAVE_HINT: saves the specified entry as the hint for future lookups.
229  *
230  * => map need not be locked.
231  */
232 #define SAVE_HINT(map, check, value) do { \
233 	if ((map)->hint == (check)) \
234 		(map)->hint = (value); \
235 } while (/*CONSTCOND*/ 0)
236 
237 /*
238  * clear_hints: ensure that hints don't point to the entry.
239  *
240  * => map must be write-locked.
241  */
242 static void
243 clear_hints(struct vm_map *map, struct vm_map_entry *ent)
244 {
245 
246 	SAVE_HINT(map, ent, ent->prev);
247 	if (map->first_free == ent) {
248 		map->first_free = ent->prev;
249 	}
250 }
251 
252 /*
253  * VM_MAP_RANGE_CHECK: check and correct range
254  *
255  * => map must at least be read locked
256  */
257 
258 #define VM_MAP_RANGE_CHECK(map, start, end) do { \
259 	if (start < vm_map_min(map))		\
260 		start = vm_map_min(map);	\
261 	if (end > vm_map_max(map))		\
262 		end = vm_map_max(map);		\
263 	if (start > end)			\
264 		start = end;			\
265 } while (/*CONSTCOND*/ 0)
266 
267 /*
268  * local prototypes
269  */
270 
271 static struct vm_map_entry *
272 		uvm_mapent_alloc(struct vm_map *, int);
273 static void	uvm_mapent_copy(struct vm_map_entry *, struct vm_map_entry *);
274 static void	uvm_mapent_free(struct vm_map_entry *);
275 #if defined(DEBUG)
276 static void	_uvm_mapent_check(const struct vm_map_entry *, const char *,
277 		    int);
278 #define	uvm_mapent_check(map)	_uvm_mapent_check(map, __FILE__, __LINE__)
279 #else /* defined(DEBUG) */
280 #define	uvm_mapent_check(e)	/* nothing */
281 #endif /* defined(DEBUG) */
282 
283 static void	uvm_map_entry_unwire(struct vm_map *, struct vm_map_entry *);
284 static void	uvm_map_reference_amap(struct vm_map_entry *, int);
285 static int	uvm_map_space_avail(vaddr_t *, vsize_t, voff_t, vsize_t, int,
286 		    int, struct vm_map_entry *);
287 static void	uvm_map_unreference_amap(struct vm_map_entry *, int);
288 
289 int _uvm_map_sanity(struct vm_map *);
290 int _uvm_tree_sanity(struct vm_map *);
291 static vsize_t uvm_rb_maxgap(const struct vm_map_entry *);
292 
293 #define	ROOT_ENTRY(map)		((struct vm_map_entry *)(map)->rb_tree.rbt_root)
294 #define	LEFT_ENTRY(entry)	((struct vm_map_entry *)(entry)->rb_node.rb_left)
295 #define	RIGHT_ENTRY(entry)	((struct vm_map_entry *)(entry)->rb_node.rb_right)
296 #define	PARENT_ENTRY(map, entry) \
297 	(ROOT_ENTRY(map) == (entry) \
298 	    ? NULL : (struct vm_map_entry *)RB_FATHER(&(entry)->rb_node))
299 
300 /*
301  * These get filled in if/when SYSVSHM shared memory code is loaded
302  *
303  * We do this with function pointers rather the #ifdef SYSVSHM so the
304  * SYSVSHM code can be loaded and unloaded
305  */
306 void (*uvm_shmexit)(struct vmspace *) = NULL;
307 void (*uvm_shmfork)(struct vmspace *, struct vmspace *) = NULL;
308 
309 static int
310 uvm_map_compare_nodes(void *ctx, const void *nparent, const void *nkey)
311 {
312 	const struct vm_map_entry *eparent = nparent;
313 	const struct vm_map_entry *ekey = nkey;
314 
315 	KASSERT(eparent->start < ekey->start || eparent->start >= ekey->end);
316 	KASSERT(ekey->start < eparent->start || ekey->start >= eparent->end);
317 
318 	if (eparent->start < ekey->start)
319 		return -1;
320 	if (eparent->end >= ekey->start)
321 		return 1;
322 	return 0;
323 }
324 
325 static int
326 uvm_map_compare_key(void *ctx, const void *nparent, const void *vkey)
327 {
328 	const struct vm_map_entry *eparent = nparent;
329 	const vaddr_t va = *(const vaddr_t *) vkey;
330 
331 	if (eparent->start < va)
332 		return -1;
333 	if (eparent->end >= va)
334 		return 1;
335 	return 0;
336 }
337 
338 static const rb_tree_ops_t uvm_map_tree_ops = {
339 	.rbto_compare_nodes = uvm_map_compare_nodes,
340 	.rbto_compare_key = uvm_map_compare_key,
341 	.rbto_node_offset = offsetof(struct vm_map_entry, rb_node),
342 	.rbto_context = NULL
343 };
344 
345 /*
346  * uvm_rb_gap: return the gap size between our entry and next entry.
347  */
348 static inline vsize_t
349 uvm_rb_gap(const struct vm_map_entry *entry)
350 {
351 
352 	KASSERT(entry->next != NULL);
353 	return entry->next->start - entry->end;
354 }
355 
356 static vsize_t
357 uvm_rb_maxgap(const struct vm_map_entry *entry)
358 {
359 	struct vm_map_entry *child;
360 	vsize_t maxgap = entry->gap;
361 
362 	/*
363 	 * We need maxgap to be the largest gap of us or any of our
364 	 * descendents.  Since each of our children's maxgap is the
365 	 * cached value of their largest gap of themselves or their
366 	 * descendents, we can just use that value and avoid recursing
367 	 * down the tree to calculate it.
368 	 */
369 	if ((child = LEFT_ENTRY(entry)) != NULL && maxgap < child->maxgap)
370 		maxgap = child->maxgap;
371 
372 	if ((child = RIGHT_ENTRY(entry)) != NULL && maxgap < child->maxgap)
373 		maxgap = child->maxgap;
374 
375 	return maxgap;
376 }
377 
378 static void
379 uvm_rb_fixup(struct vm_map *map, struct vm_map_entry *entry)
380 {
381 	struct vm_map_entry *parent;
382 
383 	KASSERT(entry->gap == uvm_rb_gap(entry));
384 	entry->maxgap = uvm_rb_maxgap(entry);
385 
386 	while ((parent = PARENT_ENTRY(map, entry)) != NULL) {
387 		struct vm_map_entry *brother;
388 		vsize_t maxgap = parent->gap;
389 		unsigned int which;
390 
391 		KDASSERT(parent->gap == uvm_rb_gap(parent));
392 		if (maxgap < entry->maxgap)
393 			maxgap = entry->maxgap;
394 		/*
395 		 * Since we work towards the root, we know entry's maxgap
396 		 * value is OK, but its brothers may now be out-of-date due
397 		 * to rebalancing.  So refresh it.
398 		 */
399 		which = RB_POSITION(&entry->rb_node) ^ RB_DIR_OTHER;
400 		brother = (struct vm_map_entry *)parent->rb_node.rb_nodes[which];
401 		if (brother != NULL) {
402 			KDASSERT(brother->gap == uvm_rb_gap(brother));
403 			brother->maxgap = uvm_rb_maxgap(brother);
404 			if (maxgap < brother->maxgap)
405 				maxgap = brother->maxgap;
406 		}
407 
408 		parent->maxgap = maxgap;
409 		entry = parent;
410 	}
411 }
412 
413 static void
414 uvm_rb_insert(struct vm_map *map, struct vm_map_entry *entry)
415 {
416 	struct vm_map_entry *ret __diagused;
417 
418 	entry->gap = entry->maxgap = uvm_rb_gap(entry);
419 	if (entry->prev != &map->header)
420 		entry->prev->gap = uvm_rb_gap(entry->prev);
421 
422 	ret = rb_tree_insert_node(&map->rb_tree, entry);
423 	KASSERTMSG(ret == entry,
424 	    "uvm_rb_insert: map %p: duplicate entry %p", map, ret);
425 
426 	/*
427 	 * If the previous entry is not our immediate left child, then it's an
428 	 * ancestor and will be fixed up on the way to the root.  We don't
429 	 * have to check entry->prev against &map->header since &map->header
430 	 * will never be in the tree.
431 	 */
432 	uvm_rb_fixup(map,
433 	    LEFT_ENTRY(entry) == entry->prev ? entry->prev : entry);
434 }
435 
436 static void
437 uvm_rb_remove(struct vm_map *map, struct vm_map_entry *entry)
438 {
439 	struct vm_map_entry *prev_parent = NULL, *next_parent = NULL;
440 
441 	/*
442 	 * If we are removing an interior node, then an adjacent node will
443 	 * be used to replace its position in the tree.  Therefore we will
444 	 * need to fixup the tree starting at the parent of the replacement
445 	 * node.  So record their parents for later use.
446 	 */
447 	if (entry->prev != &map->header)
448 		prev_parent = PARENT_ENTRY(map, entry->prev);
449 	if (entry->next != &map->header)
450 		next_parent = PARENT_ENTRY(map, entry->next);
451 
452 	rb_tree_remove_node(&map->rb_tree, entry);
453 
454 	/*
455 	 * If the previous node has a new parent, fixup the tree starting
456 	 * at the previous node's old parent.
457 	 */
458 	if (entry->prev != &map->header) {
459 		/*
460 		 * Update the previous entry's gap due to our absence.
461 		 */
462 		entry->prev->gap = uvm_rb_gap(entry->prev);
463 		uvm_rb_fixup(map, entry->prev);
464 		if (prev_parent != NULL
465 		    && prev_parent != entry
466 		    && prev_parent != PARENT_ENTRY(map, entry->prev))
467 			uvm_rb_fixup(map, prev_parent);
468 	}
469 
470 	/*
471 	 * If the next node has a new parent, fixup the tree starting
472 	 * at the next node's old parent.
473 	 */
474 	if (entry->next != &map->header) {
475 		uvm_rb_fixup(map, entry->next);
476 		if (next_parent != NULL
477 		    && next_parent != entry
478 		    && next_parent != PARENT_ENTRY(map, entry->next))
479 			uvm_rb_fixup(map, next_parent);
480 	}
481 }
482 
483 #if defined(DEBUG)
484 int uvm_debug_check_map = 0;
485 int uvm_debug_check_rbtree = 0;
486 #define uvm_map_check(map, name) \
487 	_uvm_map_check((map), (name), __FILE__, __LINE__)
488 static void
489 _uvm_map_check(struct vm_map *map, const char *name,
490     const char *file, int line)
491 {
492 
493 	if ((uvm_debug_check_map && _uvm_map_sanity(map)) ||
494 	    (uvm_debug_check_rbtree && _uvm_tree_sanity(map))) {
495 		panic("uvm_map_check failed: \"%s\" map=%p (%s:%d)",
496 		    name, map, file, line);
497 	}
498 }
499 #else /* defined(DEBUG) */
500 #define uvm_map_check(map, name)	/* nothing */
501 #endif /* defined(DEBUG) */
502 
503 #if defined(DEBUG) || defined(DDB)
504 int
505 _uvm_map_sanity(struct vm_map *map)
506 {
507 	bool first_free_found = false;
508 	bool hint_found = false;
509 	const struct vm_map_entry *e;
510 	struct vm_map_entry *hint = map->hint;
511 
512 	e = &map->header;
513 	for (;;) {
514 		if (map->first_free == e) {
515 			first_free_found = true;
516 		} else if (!first_free_found && e->next->start > e->end) {
517 			printf("first_free %p should be %p\n",
518 			    map->first_free, e);
519 			return -1;
520 		}
521 		if (hint == e) {
522 			hint_found = true;
523 		}
524 
525 		e = e->next;
526 		if (e == &map->header) {
527 			break;
528 		}
529 	}
530 	if (!first_free_found) {
531 		printf("stale first_free\n");
532 		return -1;
533 	}
534 	if (!hint_found) {
535 		printf("stale hint\n");
536 		return -1;
537 	}
538 	return 0;
539 }
540 
541 int
542 _uvm_tree_sanity(struct vm_map *map)
543 {
544 	struct vm_map_entry *tmp, *trtmp;
545 	int n = 0, i = 1;
546 
547 	for (tmp = map->header.next; tmp != &map->header; tmp = tmp->next) {
548 		if (tmp->gap != uvm_rb_gap(tmp)) {
549 			printf("%d/%d gap %#lx != %#lx %s\n",
550 			    n + 1, map->nentries,
551 			    (ulong)tmp->gap, (ulong)uvm_rb_gap(tmp),
552 			    tmp->next == &map->header ? "(last)" : "");
553 			goto error;
554 		}
555 		/*
556 		 * If any entries are out of order, tmp->gap will be unsigned
557 		 * and will likely exceed the size of the map.
558 		 */
559 		if (tmp->gap >= vm_map_max(map) - vm_map_min(map)) {
560 			printf("too large gap %zu\n", (size_t)tmp->gap);
561 			goto error;
562 		}
563 		n++;
564 	}
565 
566 	if (n != map->nentries) {
567 		printf("nentries: %d vs %d\n", n, map->nentries);
568 		goto error;
569 	}
570 
571 	trtmp = NULL;
572 	for (tmp = map->header.next; tmp != &map->header; tmp = tmp->next) {
573 		if (tmp->maxgap != uvm_rb_maxgap(tmp)) {
574 			printf("maxgap %#lx != %#lx\n",
575 			    (ulong)tmp->maxgap,
576 			    (ulong)uvm_rb_maxgap(tmp));
577 			goto error;
578 		}
579 		if (trtmp != NULL && trtmp->start >= tmp->start) {
580 			printf("corrupt: 0x%"PRIxVADDR"x >= 0x%"PRIxVADDR"x\n",
581 			    trtmp->start, tmp->start);
582 			goto error;
583 		}
584 
585 		trtmp = tmp;
586 	}
587 
588 	for (tmp = map->header.next; tmp != &map->header;
589 	    tmp = tmp->next, i++) {
590 		trtmp = rb_tree_iterate(&map->rb_tree, tmp, RB_DIR_LEFT);
591 		if (trtmp == NULL)
592 			trtmp = &map->header;
593 		if (tmp->prev != trtmp) {
594 			printf("lookup: %d: %p->prev=%p: %p\n",
595 			    i, tmp, tmp->prev, trtmp);
596 			goto error;
597 		}
598 		trtmp = rb_tree_iterate(&map->rb_tree, tmp, RB_DIR_RIGHT);
599 		if (trtmp == NULL)
600 			trtmp = &map->header;
601 		if (tmp->next != trtmp) {
602 			printf("lookup: %d: %p->next=%p: %p\n",
603 			    i, tmp, tmp->next, trtmp);
604 			goto error;
605 		}
606 		trtmp = rb_tree_find_node(&map->rb_tree, &tmp->start);
607 		if (trtmp != tmp) {
608 			printf("lookup: %d: %p - %p: %p\n", i, tmp, trtmp,
609 			    PARENT_ENTRY(map, tmp));
610 			goto error;
611 		}
612 	}
613 
614 	return (0);
615  error:
616 	return (-1);
617 }
618 #endif /* defined(DEBUG) || defined(DDB) */
619 
620 /*
621  * vm_map_lock: acquire an exclusive (write) lock on a map.
622  *
623  * => The locking protocol provides for guaranteed upgrade from shared ->
624  *    exclusive by whichever thread currently has the map marked busy.
625  *    See "LOCKING PROTOCOL NOTES" in uvm_map.h.  This is horrible; among
626  *    other problems, it defeats any fairness guarantees provided by RW
627  *    locks.
628  */
629 
630 void
631 vm_map_lock(struct vm_map *map)
632 {
633 
634 	for (;;) {
635 		rw_enter(&map->lock, RW_WRITER);
636 		if (map->busy == NULL || map->busy == curlwp) {
637 			break;
638 		}
639 		mutex_enter(&map->misc_lock);
640 		rw_exit(&map->lock);
641 		if (map->busy != NULL) {
642 			cv_wait(&map->cv, &map->misc_lock);
643 		}
644 		mutex_exit(&map->misc_lock);
645 	}
646 	map->timestamp++;
647 }
648 
649 /*
650  * vm_map_lock_try: try to lock a map, failing if it is already locked.
651  */
652 
653 bool
654 vm_map_lock_try(struct vm_map *map)
655 {
656 
657 	if (!rw_tryenter(&map->lock, RW_WRITER)) {
658 		return false;
659 	}
660 	if (map->busy != NULL) {
661 		rw_exit(&map->lock);
662 		return false;
663 	}
664 	map->timestamp++;
665 	return true;
666 }
667 
668 /*
669  * vm_map_unlock: release an exclusive lock on a map.
670  */
671 
672 void
673 vm_map_unlock(struct vm_map *map)
674 {
675 
676 	KASSERT(rw_write_held(&map->lock));
677 	KASSERT(map->busy == NULL || map->busy == curlwp);
678 	rw_exit(&map->lock);
679 }
680 
681 /*
682  * vm_map_unbusy: mark the map as unbusy, and wake any waiters that
683  *     want an exclusive lock.
684  */
685 
686 void
687 vm_map_unbusy(struct vm_map *map)
688 {
689 
690 	KASSERT(map->busy == curlwp);
691 
692 	/*
693 	 * Safe to clear 'busy' and 'waiters' with only a read lock held:
694 	 *
695 	 * o they can only be set with a write lock held
696 	 * o writers are blocked out with a read or write hold
697 	 * o at any time, only one thread owns the set of values
698 	 */
699 	mutex_enter(&map->misc_lock);
700 	map->busy = NULL;
701 	cv_broadcast(&map->cv);
702 	mutex_exit(&map->misc_lock);
703 }
704 
705 /*
706  * vm_map_lock_read: acquire a shared (read) lock on a map.
707  */
708 
709 void
710 vm_map_lock_read(struct vm_map *map)
711 {
712 
713 	rw_enter(&map->lock, RW_READER);
714 }
715 
716 /*
717  * vm_map_unlock_read: release a shared lock on a map.
718  */
719 
720 void
721 vm_map_unlock_read(struct vm_map *map)
722 {
723 
724 	rw_exit(&map->lock);
725 }
726 
727 /*
728  * vm_map_busy: mark a map as busy.
729  *
730  * => the caller must hold the map write locked
731  */
732 
733 void
734 vm_map_busy(struct vm_map *map)
735 {
736 
737 	KASSERT(rw_write_held(&map->lock));
738 	KASSERT(map->busy == NULL);
739 
740 	map->busy = curlwp;
741 }
742 
743 /*
744  * vm_map_locked_p: return true if the map is write locked.
745  *
746  * => only for debug purposes like KASSERTs.
747  * => should not be used to verify that a map is not locked.
748  */
749 
750 bool
751 vm_map_locked_p(struct vm_map *map)
752 {
753 
754 	return rw_write_held(&map->lock);
755 }
756 
757 /*
758  * uvm_mapent_alloc: allocate a map entry
759  */
760 
761 static struct vm_map_entry *
762 uvm_mapent_alloc(struct vm_map *map, int flags)
763 {
764 	struct vm_map_entry *me;
765 	int pflags = (flags & UVM_FLAG_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
766 	UVMHIST_FUNC("uvm_mapent_alloc"); UVMHIST_CALLED(maphist);
767 
768 	me = pool_cache_get(&uvm_map_entry_cache, pflags);
769 	if (__predict_false(me == NULL)) {
770 		return NULL;
771 	}
772 	me->flags = 0;
773 
774 	UVMHIST_LOG(maphist, "<- new entry=%p [kentry=%d]", me,
775 	    (map == kernel_map), 0, 0);
776 	return me;
777 }
778 
779 /*
780  * uvm_mapent_free: free map entry
781  */
782 
783 static void
784 uvm_mapent_free(struct vm_map_entry *me)
785 {
786 	UVMHIST_FUNC("uvm_mapent_free"); UVMHIST_CALLED(maphist);
787 
788 	UVMHIST_LOG(maphist,"<- freeing map entry=%p [flags=%d]",
789 		me, me->flags, 0, 0);
790 	pool_cache_put(&uvm_map_entry_cache, me);
791 }
792 
793 /*
794  * uvm_mapent_copy: copy a map entry, preserving flags
795  */
796 
797 static inline void
798 uvm_mapent_copy(struct vm_map_entry *src, struct vm_map_entry *dst)
799 {
800 
801 	memcpy(dst, src, ((char *)&src->uvm_map_entry_stop_copy) -
802 	    ((char *)src));
803 }
804 
805 #if defined(DEBUG)
806 static void
807 _uvm_mapent_check(const struct vm_map_entry *entry, const char *file, int line)
808 {
809 
810 	if (entry->start >= entry->end) {
811 		goto bad;
812 	}
813 	if (UVM_ET_ISOBJ(entry)) {
814 		if (entry->object.uvm_obj == NULL) {
815 			goto bad;
816 		}
817 	} else if (UVM_ET_ISSUBMAP(entry)) {
818 		if (entry->object.sub_map == NULL) {
819 			goto bad;
820 		}
821 	} else {
822 		if (entry->object.uvm_obj != NULL ||
823 		    entry->object.sub_map != NULL) {
824 			goto bad;
825 		}
826 	}
827 	if (!UVM_ET_ISOBJ(entry)) {
828 		if (entry->offset != 0) {
829 			goto bad;
830 		}
831 	}
832 
833 	return;
834 
835 bad:
836 	panic("%s: bad entry %p (%s:%d)", __func__, entry, file, line);
837 }
838 #endif /* defined(DEBUG) */
839 
840 /*
841  * uvm_map_entry_unwire: unwire a map entry
842  *
843  * => map should be locked by caller
844  */
845 
846 static inline void
847 uvm_map_entry_unwire(struct vm_map *map, struct vm_map_entry *entry)
848 {
849 
850 	entry->wired_count = 0;
851 	uvm_fault_unwire_locked(map, entry->start, entry->end);
852 }
853 
854 
855 /*
856  * wrapper for calling amap_ref()
857  */
858 static inline void
859 uvm_map_reference_amap(struct vm_map_entry *entry, int flags)
860 {
861 
862 	amap_ref(entry->aref.ar_amap, entry->aref.ar_pageoff,
863 	    (entry->end - entry->start) >> PAGE_SHIFT, flags);
864 }
865 
866 
867 /*
868  * wrapper for calling amap_unref()
869  */
870 static inline void
871 uvm_map_unreference_amap(struct vm_map_entry *entry, int flags)
872 {
873 
874 	amap_unref(entry->aref.ar_amap, entry->aref.ar_pageoff,
875 	    (entry->end - entry->start) >> PAGE_SHIFT, flags);
876 }
877 
878 
879 /*
880  * uvm_map_init: init mapping system at boot time.
881  */
882 
883 void
884 uvm_map_init(void)
885 {
886 #if defined(UVMHIST)
887 	static struct kern_history_ent pdhistbuf[100];
888 #endif
889 
890 	/*
891 	 * first, init logging system.
892 	 */
893 
894 	UVMHIST_FUNC("uvm_map_init");
895 	UVMHIST_LINK_STATIC(maphist);
896 	UVMHIST_INIT_STATIC(pdhist, pdhistbuf);
897 	UVMHIST_CALLED(maphist);
898 	UVMHIST_LOG(maphist,"<starting uvm map system>", 0, 0, 0, 0);
899 
900 	/*
901 	 * initialize the global lock for kernel map entry.
902 	 */
903 
904 	mutex_init(&uvm_kentry_lock, MUTEX_DRIVER, IPL_VM);
905 }
906 
907 /*
908  * uvm_map_init_caches: init mapping system caches.
909  */
910 void
911 uvm_map_init_caches(void)
912 {
913 	/*
914 	 * initialize caches.
915 	 */
916 
917 	pool_cache_bootstrap(&uvm_map_entry_cache, sizeof(struct vm_map_entry),
918 	    0, 0, 0, "vmmpepl", NULL, IPL_NONE, NULL, NULL, NULL);
919 	pool_cache_bootstrap(&uvm_vmspace_cache, sizeof(struct vmspace),
920 	    0, 0, 0, "vmsppl", NULL, IPL_NONE, NULL, NULL, NULL);
921 }
922 
923 /*
924  * clippers
925  */
926 
927 /*
928  * uvm_mapent_splitadj: adjust map entries for splitting, after uvm_mapent_copy.
929  */
930 
931 static void
932 uvm_mapent_splitadj(struct vm_map_entry *entry1, struct vm_map_entry *entry2,
933     vaddr_t splitat)
934 {
935 	vaddr_t adj;
936 
937 	KASSERT(entry1->start < splitat);
938 	KASSERT(splitat < entry1->end);
939 
940 	adj = splitat - entry1->start;
941 	entry1->end = entry2->start = splitat;
942 
943 	if (entry1->aref.ar_amap) {
944 		amap_splitref(&entry1->aref, &entry2->aref, adj);
945 	}
946 	if (UVM_ET_ISSUBMAP(entry1)) {
947 		/* ... unlikely to happen, but play it safe */
948 		 uvm_map_reference(entry1->object.sub_map);
949 	} else if (UVM_ET_ISOBJ(entry1)) {
950 		KASSERT(entry1->object.uvm_obj != NULL); /* suppress coverity */
951 		entry2->offset += adj;
952 		if (entry1->object.uvm_obj->pgops &&
953 		    entry1->object.uvm_obj->pgops->pgo_reference)
954 			entry1->object.uvm_obj->pgops->pgo_reference(
955 			    entry1->object.uvm_obj);
956 	}
957 }
958 
959 /*
960  * uvm_map_clip_start: ensure that the entry begins at or after
961  *	the starting address, if it doesn't we split the entry.
962  *
963  * => caller should use UVM_MAP_CLIP_START macro rather than calling
964  *    this directly
965  * => map must be locked by caller
966  */
967 
968 void
969 uvm_map_clip_start(struct vm_map *map, struct vm_map_entry *entry,
970     vaddr_t start)
971 {
972 	struct vm_map_entry *new_entry;
973 
974 	/* uvm_map_simplify_entry(map, entry); */ /* XXX */
975 
976 	uvm_map_check(map, "clip_start entry");
977 	uvm_mapent_check(entry);
978 
979 	/*
980 	 * Split off the front portion.  note that we must insert the new
981 	 * entry BEFORE this one, so that this entry has the specified
982 	 * starting address.
983 	 */
984 	new_entry = uvm_mapent_alloc(map, 0);
985 	uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
986 	uvm_mapent_splitadj(new_entry, entry, start);
987 	uvm_map_entry_link(map, entry->prev, new_entry);
988 
989 	uvm_map_check(map, "clip_start leave");
990 }
991 
992 /*
993  * uvm_map_clip_end: ensure that the entry ends at or before
994  *	the ending address, if it does't we split the reference
995  *
996  * => caller should use UVM_MAP_CLIP_END macro rather than calling
997  *    this directly
998  * => map must be locked by caller
999  */
1000 
1001 void
1002 uvm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, vaddr_t end)
1003 {
1004 	struct vm_map_entry *new_entry;
1005 
1006 	uvm_map_check(map, "clip_end entry");
1007 	uvm_mapent_check(entry);
1008 
1009 	/*
1010 	 *	Create a new entry and insert it
1011 	 *	AFTER the specified entry
1012 	 */
1013 	new_entry = uvm_mapent_alloc(map, 0);
1014 	uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
1015 	uvm_mapent_splitadj(entry, new_entry, end);
1016 	uvm_map_entry_link(map, entry, new_entry);
1017 
1018 	uvm_map_check(map, "clip_end leave");
1019 }
1020 
1021 /*
1022  *   M A P   -   m a i n   e n t r y   p o i n t
1023  */
1024 /*
1025  * uvm_map: establish a valid mapping in a map
1026  *
1027  * => assume startp is page aligned.
1028  * => assume size is a multiple of PAGE_SIZE.
1029  * => assume sys_mmap provides enough of a "hint" to have us skip
1030  *	over text/data/bss area.
1031  * => map must be unlocked (we will lock it)
1032  * => <uobj,uoffset> value meanings (4 cases):
1033  *	 [1] <NULL,uoffset>		== uoffset is a hint for PMAP_PREFER
1034  *	 [2] <NULL,UVM_UNKNOWN_OFFSET>	== don't PMAP_PREFER
1035  *	 [3] <uobj,uoffset>		== normal mapping
1036  *	 [4] <uobj,UVM_UNKNOWN_OFFSET>	== uvm_map finds offset based on VA
1037  *
1038  *    case [4] is for kernel mappings where we don't know the offset until
1039  *    we've found a virtual address.   note that kernel object offsets are
1040  *    always relative to vm_map_min(kernel_map).
1041  *
1042  * => if `align' is non-zero, we align the virtual address to the specified
1043  *	alignment.
1044  *	this is provided as a mechanism for large pages.
1045  *
1046  * => XXXCDC: need way to map in external amap?
1047  */
1048 
1049 int
1050 uvm_map(struct vm_map *map, vaddr_t *startp /* IN/OUT */, vsize_t size,
1051     struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags)
1052 {
1053 	struct uvm_map_args args;
1054 	struct vm_map_entry *new_entry;
1055 	int error;
1056 
1057 	KASSERT((size & PAGE_MASK) == 0);
1058 
1059 #ifndef __USER_VA0_IS_SAFE
1060 	if ((flags & UVM_FLAG_FIXED) && *startp == 0 &&
1061 	    !VM_MAP_IS_KERNEL(map) && user_va0_disable)
1062 		return EACCES;
1063 #endif
1064 
1065 	/*
1066 	 * for pager_map, allocate the new entry first to avoid sleeping
1067 	 * for memory while we have the map locked.
1068 	 */
1069 
1070 	new_entry = NULL;
1071 	if (map == pager_map) {
1072 		new_entry = uvm_mapent_alloc(map, (flags & UVM_FLAG_NOWAIT));
1073 		if (__predict_false(new_entry == NULL))
1074 			return ENOMEM;
1075 	}
1076 	if (map == pager_map)
1077 		flags |= UVM_FLAG_NOMERGE;
1078 
1079 	error = uvm_map_prepare(map, *startp, size, uobj, uoffset, align,
1080 	    flags, &args);
1081 	if (!error) {
1082 		error = uvm_map_enter(map, &args, new_entry);
1083 		*startp = args.uma_start;
1084 	} else if (new_entry) {
1085 		uvm_mapent_free(new_entry);
1086 	}
1087 
1088 #if defined(DEBUG)
1089 	if (!error && VM_MAP_IS_KERNEL(map) && (flags & UVM_FLAG_NOWAIT) == 0) {
1090 		uvm_km_check_empty(map, *startp, *startp + size);
1091 	}
1092 #endif /* defined(DEBUG) */
1093 
1094 	return error;
1095 }
1096 
1097 /*
1098  * uvm_map_prepare:
1099  *
1100  * called with map unlocked.
1101  * on success, returns the map locked.
1102  */
1103 
1104 int
1105 uvm_map_prepare(struct vm_map *map, vaddr_t start, vsize_t size,
1106     struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags,
1107     struct uvm_map_args *args)
1108 {
1109 	struct vm_map_entry *prev_entry;
1110 	vm_prot_t prot = UVM_PROTECTION(flags);
1111 	vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
1112 
1113 	UVMHIST_FUNC("uvm_map_prepare");
1114 	UVMHIST_CALLED(maphist);
1115 
1116 	UVMHIST_LOG(maphist, "(map=%p, start=%#lx, size=%lu, flags=%#x)",
1117 	    map, start, size, flags);
1118 	UVMHIST_LOG(maphist, "  uobj/offset %p/%ld", uobj, uoffset,0,0);
1119 
1120 	/*
1121 	 * detect a popular device driver bug.
1122 	 */
1123 
1124 	KASSERT(doing_shutdown || curlwp != NULL);
1125 
1126 	/*
1127 	 * zero-sized mapping doesn't make any sense.
1128 	 */
1129 	KASSERT(size > 0);
1130 
1131 	KASSERT((~flags & (UVM_FLAG_NOWAIT | UVM_FLAG_WAITVA)) != 0);
1132 
1133 	uvm_map_check(map, "map entry");
1134 
1135 	/*
1136 	 * check sanity of protection code
1137 	 */
1138 
1139 	if ((prot & maxprot) != prot) {
1140 		UVMHIST_LOG(maphist, "<- prot. failure:  prot=%#x, max=%#x",
1141 		prot, maxprot,0,0);
1142 		return EACCES;
1143 	}
1144 
1145 	/*
1146 	 * figure out where to put new VM range
1147 	 */
1148 retry:
1149 	if (vm_map_lock_try(map) == false) {
1150 		if ((flags & UVM_FLAG_TRYLOCK) != 0) {
1151 			return EAGAIN;
1152 		}
1153 		vm_map_lock(map); /* could sleep here */
1154 	}
1155 	prev_entry = uvm_map_findspace(map, start, size, &start,
1156 	    uobj, uoffset, align, flags);
1157 	if (prev_entry == NULL) {
1158 		unsigned int timestamp;
1159 
1160 		timestamp = map->timestamp;
1161 		UVMHIST_LOG(maphist,"waiting va timestamp=%#x",
1162 			    timestamp,0,0,0);
1163 		map->flags |= VM_MAP_WANTVA;
1164 		vm_map_unlock(map);
1165 
1166 		/*
1167 		 * try to reclaim kva and wait until someone does unmap.
1168 		 * fragile locking here, so we awaken every second to
1169 		 * recheck the condition.
1170 		 */
1171 
1172 		mutex_enter(&map->misc_lock);
1173 		while ((map->flags & VM_MAP_WANTVA) != 0 &&
1174 		   map->timestamp == timestamp) {
1175 			if ((flags & UVM_FLAG_WAITVA) == 0) {
1176 				mutex_exit(&map->misc_lock);
1177 				UVMHIST_LOG(maphist,
1178 				    "<- uvm_map_findspace failed!", 0,0,0,0);
1179 				return ENOMEM;
1180 			} else {
1181 				cv_timedwait(&map->cv, &map->misc_lock, hz);
1182 			}
1183 		}
1184 		mutex_exit(&map->misc_lock);
1185 		goto retry;
1186 	}
1187 
1188 #ifdef PMAP_GROWKERNEL
1189 	/*
1190 	 * If the kernel pmap can't map the requested space,
1191 	 * then allocate more resources for it.
1192 	 */
1193 	if (map == kernel_map && uvm_maxkaddr < (start + size))
1194 		uvm_maxkaddr = pmap_growkernel(start + size);
1195 #endif
1196 
1197 	UVMMAP_EVCNT_INCR(map_call);
1198 
1199 	/*
1200 	 * if uobj is null, then uoffset is either a VAC hint for PMAP_PREFER
1201 	 * [typically from uvm_map_reserve] or it is UVM_UNKNOWN_OFFSET.   in
1202 	 * either case we want to zero it  before storing it in the map entry
1203 	 * (because it looks strange and confusing when debugging...)
1204 	 *
1205 	 * if uobj is not null
1206 	 *   if uoffset is not UVM_UNKNOWN_OFFSET then we have a normal mapping
1207 	 *      and we do not need to change uoffset.
1208 	 *   if uoffset is UVM_UNKNOWN_OFFSET then we need to find the offset
1209 	 *      now (based on the starting address of the map).   this case is
1210 	 *      for kernel object mappings where we don't know the offset until
1211 	 *      the virtual address is found (with uvm_map_findspace).   the
1212 	 *      offset is the distance we are from the start of the map.
1213 	 */
1214 
1215 	if (uobj == NULL) {
1216 		uoffset = 0;
1217 	} else {
1218 		if (uoffset == UVM_UNKNOWN_OFFSET) {
1219 			KASSERT(UVM_OBJ_IS_KERN_OBJECT(uobj));
1220 			uoffset = start - vm_map_min(kernel_map);
1221 		}
1222 	}
1223 
1224 	args->uma_flags = flags;
1225 	args->uma_prev = prev_entry;
1226 	args->uma_start = start;
1227 	args->uma_size = size;
1228 	args->uma_uobj = uobj;
1229 	args->uma_uoffset = uoffset;
1230 
1231 	UVMHIST_LOG(maphist, "<- done!", 0,0,0,0);
1232 	return 0;
1233 }
1234 
1235 /*
1236  * uvm_map_enter:
1237  *
1238  * called with map locked.
1239  * unlock the map before returning.
1240  */
1241 
1242 int
1243 uvm_map_enter(struct vm_map *map, const struct uvm_map_args *args,
1244     struct vm_map_entry *new_entry)
1245 {
1246 	struct vm_map_entry *prev_entry = args->uma_prev;
1247 	struct vm_map_entry *dead = NULL;
1248 
1249 	const uvm_flag_t flags = args->uma_flags;
1250 	const vm_prot_t prot = UVM_PROTECTION(flags);
1251 	const vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
1252 	const vm_inherit_t inherit = UVM_INHERIT(flags);
1253 	const int amapwaitflag = (flags & UVM_FLAG_NOWAIT) ?
1254 	    AMAP_EXTEND_NOWAIT : 0;
1255 	const int advice = UVM_ADVICE(flags);
1256 
1257 	vaddr_t start = args->uma_start;
1258 	vsize_t size = args->uma_size;
1259 	struct uvm_object *uobj = args->uma_uobj;
1260 	voff_t uoffset = args->uma_uoffset;
1261 
1262 	const int kmap = (vm_map_pmap(map) == pmap_kernel());
1263 	int merged = 0;
1264 	int error;
1265 	int newetype;
1266 
1267 	UVMHIST_FUNC("uvm_map_enter");
1268 	UVMHIST_CALLED(maphist);
1269 
1270 	UVMHIST_LOG(maphist, "(map=%p, start=%#lx, size=%lu, flags=%#x)",
1271 	    map, start, size, flags);
1272 	UVMHIST_LOG(maphist, "  uobj/offset %p/%ld", uobj, uoffset,0,0);
1273 
1274 	KASSERT(map->hint == prev_entry); /* bimerge case assumes this */
1275 	KASSERT(vm_map_locked_p(map));
1276 
1277 	if (uobj)
1278 		newetype = UVM_ET_OBJ;
1279 	else
1280 		newetype = 0;
1281 
1282 	if (flags & UVM_FLAG_COPYONW) {
1283 		newetype |= UVM_ET_COPYONWRITE;
1284 		if ((flags & UVM_FLAG_OVERLAY) == 0)
1285 			newetype |= UVM_ET_NEEDSCOPY;
1286 	}
1287 
1288 	/*
1289 	 * try and insert in map by extending previous entry, if possible.
1290 	 * XXX: we don't try and pull back the next entry.   might be useful
1291 	 * for a stack, but we are currently allocating our stack in advance.
1292 	 */
1293 
1294 	if (flags & UVM_FLAG_NOMERGE)
1295 		goto nomerge;
1296 
1297 	if (prev_entry->end == start &&
1298 	    prev_entry != &map->header &&
1299 	    UVM_ET_ISCOMPATIBLE(prev_entry, newetype, uobj, 0,
1300 	    prot, maxprot, inherit, advice, 0)) {
1301 
1302 		if (uobj && prev_entry->offset +
1303 		    (prev_entry->end - prev_entry->start) != uoffset)
1304 			goto forwardmerge;
1305 
1306 		/*
1307 		 * can't extend a shared amap.  note: no need to lock amap to
1308 		 * look at refs since we don't care about its exact value.
1309 		 * if it is one (i.e. we have only reference) it will stay there
1310 		 */
1311 
1312 		if (prev_entry->aref.ar_amap &&
1313 		    amap_refs(prev_entry->aref.ar_amap) != 1) {
1314 			goto forwardmerge;
1315 		}
1316 
1317 		if (prev_entry->aref.ar_amap) {
1318 			error = amap_extend(prev_entry, size,
1319 			    amapwaitflag | AMAP_EXTEND_FORWARDS);
1320 			if (error)
1321 				goto nomerge;
1322 		}
1323 
1324 		if (kmap) {
1325 			UVMMAP_EVCNT_INCR(kbackmerge);
1326 		} else {
1327 			UVMMAP_EVCNT_INCR(ubackmerge);
1328 		}
1329 		UVMHIST_LOG(maphist,"  starting back merge", 0, 0, 0, 0);
1330 
1331 		/*
1332 		 * drop our reference to uobj since we are extending a reference
1333 		 * that we already have (the ref count can not drop to zero).
1334 		 */
1335 
1336 		if (uobj && uobj->pgops->pgo_detach)
1337 			uobj->pgops->pgo_detach(uobj);
1338 
1339 		/*
1340 		 * Now that we've merged the entries, note that we've grown
1341 		 * and our gap has shrunk.  Then fix the tree.
1342 		 */
1343 		prev_entry->end += size;
1344 		prev_entry->gap -= size;
1345 		uvm_rb_fixup(map, prev_entry);
1346 
1347 		uvm_map_check(map, "map backmerged");
1348 
1349 		UVMHIST_LOG(maphist,"<- done (via backmerge)!", 0, 0, 0, 0);
1350 		merged++;
1351 	}
1352 
1353 forwardmerge:
1354 	if (prev_entry->next->start == (start + size) &&
1355 	    prev_entry->next != &map->header &&
1356 	    UVM_ET_ISCOMPATIBLE(prev_entry->next, newetype, uobj, 0,
1357 	    prot, maxprot, inherit, advice, 0)) {
1358 
1359 		if (uobj && prev_entry->next->offset != uoffset + size)
1360 			goto nomerge;
1361 
1362 		/*
1363 		 * can't extend a shared amap.  note: no need to lock amap to
1364 		 * look at refs since we don't care about its exact value.
1365 		 * if it is one (i.e. we have only reference) it will stay there.
1366 		 *
1367 		 * note that we also can't merge two amaps, so if we
1368 		 * merged with the previous entry which has an amap,
1369 		 * and the next entry also has an amap, we give up.
1370 		 *
1371 		 * Interesting cases:
1372 		 * amap, new, amap -> give up second merge (single fwd extend)
1373 		 * amap, new, none -> double forward extend (extend again here)
1374 		 * none, new, amap -> double backward extend (done here)
1375 		 * uobj, new, amap -> single backward extend (done here)
1376 		 *
1377 		 * XXX should we attempt to deal with someone refilling
1378 		 * the deallocated region between two entries that are
1379 		 * backed by the same amap (ie, arefs is 2, "prev" and
1380 		 * "next" refer to it, and adding this allocation will
1381 		 * close the hole, thus restoring arefs to 1 and
1382 		 * deallocating the "next" vm_map_entry)?  -- @@@
1383 		 */
1384 
1385 		if (prev_entry->next->aref.ar_amap &&
1386 		    (amap_refs(prev_entry->next->aref.ar_amap) != 1 ||
1387 		     (merged && prev_entry->aref.ar_amap))) {
1388 			goto nomerge;
1389 		}
1390 
1391 		if (merged) {
1392 			/*
1393 			 * Try to extend the amap of the previous entry to
1394 			 * cover the next entry as well.  If it doesn't work
1395 			 * just skip on, don't actually give up, since we've
1396 			 * already completed the back merge.
1397 			 */
1398 			if (prev_entry->aref.ar_amap) {
1399 				if (amap_extend(prev_entry,
1400 				    prev_entry->next->end -
1401 				    prev_entry->next->start,
1402 				    amapwaitflag | AMAP_EXTEND_FORWARDS))
1403 					goto nomerge;
1404 			}
1405 
1406 			/*
1407 			 * Try to extend the amap of the *next* entry
1408 			 * back to cover the new allocation *and* the
1409 			 * previous entry as well (the previous merge
1410 			 * didn't have an amap already otherwise we
1411 			 * wouldn't be checking here for an amap).  If
1412 			 * it doesn't work just skip on, again, don't
1413 			 * actually give up, since we've already
1414 			 * completed the back merge.
1415 			 */
1416 			else if (prev_entry->next->aref.ar_amap) {
1417 				if (amap_extend(prev_entry->next,
1418 				    prev_entry->end -
1419 				    prev_entry->start,
1420 				    amapwaitflag | AMAP_EXTEND_BACKWARDS))
1421 					goto nomerge;
1422 			}
1423 		} else {
1424 			/*
1425 			 * Pull the next entry's amap backwards to cover this
1426 			 * new allocation.
1427 			 */
1428 			if (prev_entry->next->aref.ar_amap) {
1429 				error = amap_extend(prev_entry->next, size,
1430 				    amapwaitflag | AMAP_EXTEND_BACKWARDS);
1431 				if (error)
1432 					goto nomerge;
1433 			}
1434 		}
1435 
1436 		if (merged) {
1437 			if (kmap) {
1438 				UVMMAP_EVCNT_DECR(kbackmerge);
1439 				UVMMAP_EVCNT_INCR(kbimerge);
1440 			} else {
1441 				UVMMAP_EVCNT_DECR(ubackmerge);
1442 				UVMMAP_EVCNT_INCR(ubimerge);
1443 			}
1444 		} else {
1445 			if (kmap) {
1446 				UVMMAP_EVCNT_INCR(kforwmerge);
1447 			} else {
1448 				UVMMAP_EVCNT_INCR(uforwmerge);
1449 			}
1450 		}
1451 		UVMHIST_LOG(maphist,"  starting forward merge", 0, 0, 0, 0);
1452 
1453 		/*
1454 		 * drop our reference to uobj since we are extending a reference
1455 		 * that we already have (the ref count can not drop to zero).
1456 		 */
1457 		if (uobj && uobj->pgops->pgo_detach)
1458 			uobj->pgops->pgo_detach(uobj);
1459 
1460 		if (merged) {
1461 			dead = prev_entry->next;
1462 			prev_entry->end = dead->end;
1463 			uvm_map_entry_unlink(map, dead);
1464 			if (dead->aref.ar_amap != NULL) {
1465 				prev_entry->aref = dead->aref;
1466 				dead->aref.ar_amap = NULL;
1467 			}
1468 		} else {
1469 			prev_entry->next->start -= size;
1470 			if (prev_entry != &map->header) {
1471 				prev_entry->gap -= size;
1472 				KASSERT(prev_entry->gap == uvm_rb_gap(prev_entry));
1473 				uvm_rb_fixup(map, prev_entry);
1474 			}
1475 			if (uobj)
1476 				prev_entry->next->offset = uoffset;
1477 		}
1478 
1479 		uvm_map_check(map, "map forwardmerged");
1480 
1481 		UVMHIST_LOG(maphist,"<- done forwardmerge", 0, 0, 0, 0);
1482 		merged++;
1483 	}
1484 
1485 nomerge:
1486 	if (!merged) {
1487 		UVMHIST_LOG(maphist,"  allocating new map entry", 0, 0, 0, 0);
1488 		if (kmap) {
1489 			UVMMAP_EVCNT_INCR(knomerge);
1490 		} else {
1491 			UVMMAP_EVCNT_INCR(unomerge);
1492 		}
1493 
1494 		/*
1495 		 * allocate new entry and link it in.
1496 		 */
1497 
1498 		if (new_entry == NULL) {
1499 			new_entry = uvm_mapent_alloc(map,
1500 				(flags & UVM_FLAG_NOWAIT));
1501 			if (__predict_false(new_entry == NULL)) {
1502 				error = ENOMEM;
1503 				goto done;
1504 			}
1505 		}
1506 		new_entry->start = start;
1507 		new_entry->end = new_entry->start + size;
1508 		new_entry->object.uvm_obj = uobj;
1509 		new_entry->offset = uoffset;
1510 
1511 		new_entry->etype = newetype;
1512 
1513 		if (flags & UVM_FLAG_NOMERGE) {
1514 			new_entry->flags |= UVM_MAP_NOMERGE;
1515 		}
1516 
1517 		new_entry->protection = prot;
1518 		new_entry->max_protection = maxprot;
1519 		new_entry->inheritance = inherit;
1520 		new_entry->wired_count = 0;
1521 		new_entry->advice = advice;
1522 		if (flags & UVM_FLAG_OVERLAY) {
1523 
1524 			/*
1525 			 * to_add: for BSS we overallocate a little since we
1526 			 * are likely to extend
1527 			 */
1528 
1529 			vaddr_t to_add = (flags & UVM_FLAG_AMAPPAD) ?
1530 				UVM_AMAP_CHUNK << PAGE_SHIFT : 0;
1531 			struct vm_amap *amap = amap_alloc(size, to_add,
1532 			    (flags & UVM_FLAG_NOWAIT));
1533 			if (__predict_false(amap == NULL)) {
1534 				error = ENOMEM;
1535 				goto done;
1536 			}
1537 			new_entry->aref.ar_pageoff = 0;
1538 			new_entry->aref.ar_amap = amap;
1539 		} else {
1540 			new_entry->aref.ar_pageoff = 0;
1541 			new_entry->aref.ar_amap = NULL;
1542 		}
1543 		uvm_map_entry_link(map, prev_entry, new_entry);
1544 
1545 		/*
1546 		 * Update the free space hint
1547 		 */
1548 
1549 		if ((map->first_free == prev_entry) &&
1550 		    (prev_entry->end >= new_entry->start))
1551 			map->first_free = new_entry;
1552 
1553 		new_entry = NULL;
1554 	}
1555 
1556 	map->size += size;
1557 
1558 	UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
1559 
1560 	error = 0;
1561 done:
1562 	vm_map_unlock(map);
1563 
1564 	if (new_entry) {
1565 		uvm_mapent_free(new_entry);
1566 	}
1567 
1568 	if (dead) {
1569 		KDASSERT(merged);
1570 		uvm_mapent_free(dead);
1571 	}
1572 
1573 	return error;
1574 }
1575 
1576 /*
1577  * uvm_map_lookup_entry_bytree: lookup an entry in tree
1578  */
1579 
1580 static inline bool
1581 uvm_map_lookup_entry_bytree(struct vm_map *map, vaddr_t address,
1582     struct vm_map_entry **entry	/* OUT */)
1583 {
1584 	struct vm_map_entry *prev = &map->header;
1585 	struct vm_map_entry *cur = ROOT_ENTRY(map);
1586 
1587 	while (cur) {
1588 		UVMMAP_EVCNT_INCR(mlk_treeloop);
1589 		if (address >= cur->start) {
1590 			if (address < cur->end) {
1591 				*entry = cur;
1592 				return true;
1593 			}
1594 			prev = cur;
1595 			cur = RIGHT_ENTRY(cur);
1596 		} else
1597 			cur = LEFT_ENTRY(cur);
1598 	}
1599 	*entry = prev;
1600 	return false;
1601 }
1602 
1603 /*
1604  * uvm_map_lookup_entry: find map entry at or before an address
1605  *
1606  * => map must at least be read-locked by caller
1607  * => entry is returned in "entry"
1608  * => return value is true if address is in the returned entry
1609  */
1610 
1611 bool
1612 uvm_map_lookup_entry(struct vm_map *map, vaddr_t address,
1613     struct vm_map_entry **entry	/* OUT */)
1614 {
1615 	struct vm_map_entry *cur;
1616 	bool use_tree = false;
1617 	UVMHIST_FUNC("uvm_map_lookup_entry");
1618 	UVMHIST_CALLED(maphist);
1619 
1620 	UVMHIST_LOG(maphist,"(map=%p,addr=%#lx,ent=%p)",
1621 	    map, address, entry, 0);
1622 
1623 	/*
1624 	 * start looking either from the head of the
1625 	 * list, or from the hint.
1626 	 */
1627 
1628 	cur = map->hint;
1629 
1630 	if (cur == &map->header)
1631 		cur = cur->next;
1632 
1633 	UVMMAP_EVCNT_INCR(mlk_call);
1634 	if (address >= cur->start) {
1635 
1636 		/*
1637 		 * go from hint to end of list.
1638 		 *
1639 		 * but first, make a quick check to see if
1640 		 * we are already looking at the entry we
1641 		 * want (which is usually the case).
1642 		 * note also that we don't need to save the hint
1643 		 * here... it is the same hint (unless we are
1644 		 * at the header, in which case the hint didn't
1645 		 * buy us anything anyway).
1646 		 */
1647 
1648 		if (cur != &map->header && cur->end > address) {
1649 			UVMMAP_EVCNT_INCR(mlk_hint);
1650 			*entry = cur;
1651 			UVMHIST_LOG(maphist,"<- got it via hint (%p)",
1652 			    cur, 0, 0, 0);
1653 			uvm_mapent_check(*entry);
1654 			return (true);
1655 		}
1656 
1657 		if (map->nentries > 15)
1658 			use_tree = true;
1659 	} else {
1660 
1661 		/*
1662 		 * invalid hint.  use tree.
1663 		 */
1664 		use_tree = true;
1665 	}
1666 
1667 	uvm_map_check(map, __func__);
1668 
1669 	if (use_tree) {
1670 		/*
1671 		 * Simple lookup in the tree.  Happens when the hint is
1672 		 * invalid, or nentries reach a threshold.
1673 		 */
1674 		UVMMAP_EVCNT_INCR(mlk_tree);
1675 		if (uvm_map_lookup_entry_bytree(map, address, entry)) {
1676 			goto got;
1677 		} else {
1678 			goto failed;
1679 		}
1680 	}
1681 
1682 	/*
1683 	 * search linearly
1684 	 */
1685 
1686 	UVMMAP_EVCNT_INCR(mlk_list);
1687 	while (cur != &map->header) {
1688 		UVMMAP_EVCNT_INCR(mlk_listloop);
1689 		if (cur->end > address) {
1690 			if (address >= cur->start) {
1691 				/*
1692 				 * save this lookup for future
1693 				 * hints, and return
1694 				 */
1695 
1696 				*entry = cur;
1697 got:
1698 				SAVE_HINT(map, map->hint, *entry);
1699 				UVMHIST_LOG(maphist,"<- search got it (%p)",
1700 					cur, 0, 0, 0);
1701 				KDASSERT((*entry)->start <= address);
1702 				KDASSERT(address < (*entry)->end);
1703 				uvm_mapent_check(*entry);
1704 				return (true);
1705 			}
1706 			break;
1707 		}
1708 		cur = cur->next;
1709 	}
1710 	*entry = cur->prev;
1711 failed:
1712 	SAVE_HINT(map, map->hint, *entry);
1713 	UVMHIST_LOG(maphist,"<- failed!",0,0,0,0);
1714 	KDASSERT((*entry) == &map->header || (*entry)->end <= address);
1715 	KDASSERT((*entry)->next == &map->header ||
1716 	    address < (*entry)->next->start);
1717 	return (false);
1718 }
1719 
1720 /*
1721  * See if the range between start and start + length fits in the gap
1722  * entry->next->start and entry->end.  Returns 1 if fits, 0 if doesn't
1723  * fit, and -1 address wraps around.
1724  */
1725 static int
1726 uvm_map_space_avail(vaddr_t *start, vsize_t length, voff_t uoffset,
1727     vsize_t align, int flags, int topdown, struct vm_map_entry *entry)
1728 {
1729 	vaddr_t end;
1730 
1731 #ifdef PMAP_PREFER
1732 	/*
1733 	 * push start address forward as needed to avoid VAC alias problems.
1734 	 * we only do this if a valid offset is specified.
1735 	 */
1736 
1737 	if (uoffset != UVM_UNKNOWN_OFFSET)
1738 		PMAP_PREFER(uoffset, start, length, topdown);
1739 #endif
1740 	if ((flags & UVM_FLAG_COLORMATCH) != 0) {
1741 		KASSERT(align < uvmexp.ncolors);
1742 		if (uvmexp.ncolors > 1) {
1743 			const u_int colormask = uvmexp.colormask;
1744 			const u_int colorsize = colormask + 1;
1745 			vaddr_t hint = atop(*start);
1746 			const u_int color = hint & colormask;
1747 			if (color != align) {
1748 				hint -= color;	/* adjust to color boundary */
1749 				KASSERT((hint & colormask) == 0);
1750 				if (topdown) {
1751 					if (align > color)
1752 						hint -= colorsize;
1753 				} else {
1754 					if (align < color)
1755 						hint += colorsize;
1756 				}
1757 				*start = ptoa(hint + align); /* adjust to color */
1758 			}
1759 		}
1760 	} else if (align != 0) {
1761 		if ((*start & (align - 1)) != 0) {
1762 			if (topdown)
1763 				*start &= ~(align - 1);
1764 			else
1765 				*start = roundup(*start, align);
1766 		}
1767 		/*
1768 		 * XXX Should we PMAP_PREFER() here again?
1769 		 * eh...i think we're okay
1770 		 */
1771 	}
1772 
1773 	/*
1774 	 * Find the end of the proposed new region.  Be sure we didn't
1775 	 * wrap around the address; if so, we lose.  Otherwise, if the
1776 	 * proposed new region fits before the next entry, we win.
1777 	 */
1778 
1779 	end = *start + length;
1780 	if (end < *start)
1781 		return (-1);
1782 
1783 	if (entry->next->start >= end && *start >= entry->end)
1784 		return (1);
1785 
1786 	return (0);
1787 }
1788 
1789 /*
1790  * uvm_map_findspace: find "length" sized space in "map".
1791  *
1792  * => "hint" is a hint about where we want it, unless UVM_FLAG_FIXED is
1793  *	set in "flags" (in which case we insist on using "hint").
1794  * => "result" is VA returned
1795  * => uobj/uoffset are to be used to handle VAC alignment, if required
1796  * => if "align" is non-zero, we attempt to align to that value.
1797  * => caller must at least have read-locked map
1798  * => returns NULL on failure, or pointer to prev. map entry if success
1799  * => note this is a cross between the old vm_map_findspace and vm_map_find
1800  */
1801 
1802 struct vm_map_entry *
1803 uvm_map_findspace(struct vm_map *map, vaddr_t hint, vsize_t length,
1804     vaddr_t *result /* OUT */, struct uvm_object *uobj, voff_t uoffset,
1805     vsize_t align, int flags)
1806 {
1807 	struct vm_map_entry *entry;
1808 	struct vm_map_entry *child, *prev, *tmp;
1809 	vaddr_t orig_hint __diagused;
1810 	const int topdown = map->flags & VM_MAP_TOPDOWN;
1811 	UVMHIST_FUNC("uvm_map_findspace");
1812 	UVMHIST_CALLED(maphist);
1813 
1814 	UVMHIST_LOG(maphist, "(map=%p, hint=%l#x, len=%lu, flags=%#x)",
1815 	    map, hint, length, flags);
1816 	KASSERT((flags & UVM_FLAG_COLORMATCH) != 0 || (align & (align - 1)) == 0);
1817 	KASSERT((flags & UVM_FLAG_COLORMATCH) == 0 || align < uvmexp.ncolors);
1818 	KASSERT((flags & UVM_FLAG_FIXED) == 0 || align == 0);
1819 
1820 	uvm_map_check(map, "map_findspace entry");
1821 
1822 	/*
1823 	 * remember the original hint.  if we are aligning, then we
1824 	 * may have to try again with no alignment constraint if
1825 	 * we fail the first time.
1826 	 */
1827 
1828 	orig_hint = hint;
1829 	if (hint < vm_map_min(map)) {	/* check ranges ... */
1830 		if (flags & UVM_FLAG_FIXED) {
1831 			UVMHIST_LOG(maphist,"<- VA below map range",0,0,0,0);
1832 			return (NULL);
1833 		}
1834 		hint = vm_map_min(map);
1835 	}
1836 	if (hint > vm_map_max(map)) {
1837 		UVMHIST_LOG(maphist,"<- VA %#lx > range [%#lx->%#lx]",
1838 		    hint, vm_map_min(map), vm_map_max(map), 0);
1839 		return (NULL);
1840 	}
1841 
1842 	/*
1843 	 * Look for the first possible address; if there's already
1844 	 * something at this address, we have to start after it.
1845 	 */
1846 
1847 	/*
1848 	 * @@@: there are four, no, eight cases to consider.
1849 	 *
1850 	 * 0: found,     fixed,     bottom up -> fail
1851 	 * 1: found,     fixed,     top down  -> fail
1852 	 * 2: found,     not fixed, bottom up -> start after entry->end,
1853 	 *                                       loop up
1854 	 * 3: found,     not fixed, top down  -> start before entry->start,
1855 	 *                                       loop down
1856 	 * 4: not found, fixed,     bottom up -> check entry->next->start, fail
1857 	 * 5: not found, fixed,     top down  -> check entry->next->start, fail
1858 	 * 6: not found, not fixed, bottom up -> check entry->next->start,
1859 	 *                                       loop up
1860 	 * 7: not found, not fixed, top down  -> check entry->next->start,
1861 	 *                                       loop down
1862 	 *
1863 	 * as you can see, it reduces to roughly five cases, and that
1864 	 * adding top down mapping only adds one unique case (without
1865 	 * it, there would be four cases).
1866 	 */
1867 
1868 	if ((flags & UVM_FLAG_FIXED) == 0 && hint == vm_map_min(map)) {
1869 		entry = map->first_free;
1870 	} else {
1871 		if (uvm_map_lookup_entry(map, hint, &entry)) {
1872 			/* "hint" address already in use ... */
1873 			if (flags & UVM_FLAG_FIXED) {
1874 				UVMHIST_LOG(maphist, "<- fixed & VA in use",
1875 				    0, 0, 0, 0);
1876 				return (NULL);
1877 			}
1878 			if (topdown)
1879 				/* Start from lower gap. */
1880 				entry = entry->prev;
1881 		} else if (flags & UVM_FLAG_FIXED) {
1882 			if (entry->next->start >= hint + length &&
1883 			    hint + length > hint)
1884 				goto found;
1885 
1886 			/* "hint" address is gap but too small */
1887 			UVMHIST_LOG(maphist, "<- fixed mapping failed",
1888 			    0, 0, 0, 0);
1889 			return (NULL); /* only one shot at it ... */
1890 		} else {
1891 			/*
1892 			 * See if given hint fits in this gap.
1893 			 */
1894 			switch (uvm_map_space_avail(&hint, length,
1895 			    uoffset, align, flags, topdown, entry)) {
1896 			case 1:
1897 				goto found;
1898 			case -1:
1899 				goto wraparound;
1900 			}
1901 
1902 			if (topdown) {
1903 				/*
1904 				 * Still there is a chance to fit
1905 				 * if hint > entry->end.
1906 				 */
1907 			} else {
1908 				/* Start from higher gap. */
1909 				entry = entry->next;
1910 				if (entry == &map->header)
1911 					goto notfound;
1912 				goto nextgap;
1913 			}
1914 		}
1915 	}
1916 
1917 	/*
1918 	 * Note that all UVM_FLAGS_FIXED case is already handled.
1919 	 */
1920 	KDASSERT((flags & UVM_FLAG_FIXED) == 0);
1921 
1922 	/* Try to find the space in the red-black tree */
1923 
1924 	/* Check slot before any entry */
1925 	hint = topdown ? entry->next->start - length : entry->end;
1926 	switch (uvm_map_space_avail(&hint, length, uoffset, align, flags,
1927 	    topdown, entry)) {
1928 	case 1:
1929 		goto found;
1930 	case -1:
1931 		goto wraparound;
1932 	}
1933 
1934 nextgap:
1935 	KDASSERT((flags & UVM_FLAG_FIXED) == 0);
1936 	/* If there is not enough space in the whole tree, we fail */
1937 	tmp = ROOT_ENTRY(map);
1938 	if (tmp == NULL || tmp->maxgap < length)
1939 		goto notfound;
1940 
1941 	prev = NULL; /* previous candidate */
1942 
1943 	/* Find an entry close to hint that has enough space */
1944 	for (; tmp;) {
1945 		KASSERT(tmp->next->start == tmp->end + tmp->gap);
1946 		if (topdown) {
1947 			if (tmp->next->start < hint + length &&
1948 			    (prev == NULL || tmp->end > prev->end)) {
1949 				if (tmp->gap >= length)
1950 					prev = tmp;
1951 				else if ((child = LEFT_ENTRY(tmp)) != NULL
1952 				    && child->maxgap >= length)
1953 					prev = tmp;
1954 			}
1955 		} else {
1956 			if (tmp->end >= hint &&
1957 			    (prev == NULL || tmp->end < prev->end)) {
1958 				if (tmp->gap >= length)
1959 					prev = tmp;
1960 				else if ((child = RIGHT_ENTRY(tmp)) != NULL
1961 				    && child->maxgap >= length)
1962 					prev = tmp;
1963 			}
1964 		}
1965 		if (tmp->next->start < hint + length)
1966 			child = RIGHT_ENTRY(tmp);
1967 		else if (tmp->end > hint)
1968 			child = LEFT_ENTRY(tmp);
1969 		else {
1970 			if (tmp->gap >= length)
1971 				break;
1972 			if (topdown)
1973 				child = LEFT_ENTRY(tmp);
1974 			else
1975 				child = RIGHT_ENTRY(tmp);
1976 		}
1977 		if (child == NULL || child->maxgap < length)
1978 			break;
1979 		tmp = child;
1980 	}
1981 
1982 	if (tmp != NULL && tmp->start < hint && hint < tmp->next->start) {
1983 		/*
1984 		 * Check if the entry that we found satifies the
1985 		 * space requirement
1986 		 */
1987 		if (topdown) {
1988 			if (hint > tmp->next->start - length)
1989 				hint = tmp->next->start - length;
1990 		} else {
1991 			if (hint < tmp->end)
1992 				hint = tmp->end;
1993 		}
1994 		switch (uvm_map_space_avail(&hint, length, uoffset, align,
1995 		    flags, topdown, tmp)) {
1996 		case 1:
1997 			entry = tmp;
1998 			goto found;
1999 		case -1:
2000 			goto wraparound;
2001 		}
2002 		if (tmp->gap >= length)
2003 			goto listsearch;
2004 	}
2005 	if (prev == NULL)
2006 		goto notfound;
2007 
2008 	if (topdown) {
2009 		KASSERT(orig_hint >= prev->next->start - length ||
2010 		    prev->next->start - length > prev->next->start);
2011 		hint = prev->next->start - length;
2012 	} else {
2013 		KASSERT(orig_hint <= prev->end);
2014 		hint = prev->end;
2015 	}
2016 	switch (uvm_map_space_avail(&hint, length, uoffset, align,
2017 	    flags, topdown, prev)) {
2018 	case 1:
2019 		entry = prev;
2020 		goto found;
2021 	case -1:
2022 		goto wraparound;
2023 	}
2024 	if (prev->gap >= length)
2025 		goto listsearch;
2026 
2027 	if (topdown)
2028 		tmp = LEFT_ENTRY(prev);
2029 	else
2030 		tmp = RIGHT_ENTRY(prev);
2031 	for (;;) {
2032 		KASSERT(tmp && tmp->maxgap >= length);
2033 		if (topdown)
2034 			child = RIGHT_ENTRY(tmp);
2035 		else
2036 			child = LEFT_ENTRY(tmp);
2037 		if (child && child->maxgap >= length) {
2038 			tmp = child;
2039 			continue;
2040 		}
2041 		if (tmp->gap >= length)
2042 			break;
2043 		if (topdown)
2044 			tmp = LEFT_ENTRY(tmp);
2045 		else
2046 			tmp = RIGHT_ENTRY(tmp);
2047 	}
2048 
2049 	if (topdown) {
2050 		KASSERT(orig_hint >= tmp->next->start - length ||
2051 		    tmp->next->start - length > tmp->next->start);
2052 		hint = tmp->next->start - length;
2053 	} else {
2054 		KASSERT(orig_hint <= tmp->end);
2055 		hint = tmp->end;
2056 	}
2057 	switch (uvm_map_space_avail(&hint, length, uoffset, align,
2058 	    flags, topdown, tmp)) {
2059 	case 1:
2060 		entry = tmp;
2061 		goto found;
2062 	case -1:
2063 		goto wraparound;
2064 	}
2065 
2066 	/*
2067 	 * The tree fails to find an entry because of offset or alignment
2068 	 * restrictions.  Search the list instead.
2069 	 */
2070  listsearch:
2071 	/*
2072 	 * Look through the rest of the map, trying to fit a new region in
2073 	 * the gap between existing regions, or after the very last region.
2074 	 * note: entry->end = base VA of current gap,
2075 	 *	 entry->next->start = VA of end of current gap
2076 	 */
2077 
2078 	for (;;) {
2079 		/* Update hint for current gap. */
2080 		hint = topdown ? entry->next->start - length : entry->end;
2081 
2082 		/* See if it fits. */
2083 		switch (uvm_map_space_avail(&hint, length, uoffset, align,
2084 		    flags, topdown, entry)) {
2085 		case 1:
2086 			goto found;
2087 		case -1:
2088 			goto wraparound;
2089 		}
2090 
2091 		/* Advance to next/previous gap */
2092 		if (topdown) {
2093 			if (entry == &map->header) {
2094 				UVMHIST_LOG(maphist, "<- failed (off start)",
2095 				    0,0,0,0);
2096 				goto notfound;
2097 			}
2098 			entry = entry->prev;
2099 		} else {
2100 			entry = entry->next;
2101 			if (entry == &map->header) {
2102 				UVMHIST_LOG(maphist, "<- failed (off end)",
2103 				    0,0,0,0);
2104 				goto notfound;
2105 			}
2106 		}
2107 	}
2108 
2109  found:
2110 	SAVE_HINT(map, map->hint, entry);
2111 	*result = hint;
2112 	UVMHIST_LOG(maphist,"<- got it!  (result=%#lx)", hint, 0,0,0);
2113 	KASSERT( topdown || hint >= orig_hint);
2114 	KASSERT(!topdown || hint <= orig_hint);
2115 	KASSERT(entry->end <= hint);
2116 	KASSERT(hint + length <= entry->next->start);
2117 	return (entry);
2118 
2119  wraparound:
2120 	UVMHIST_LOG(maphist, "<- failed (wrap around)", 0,0,0,0);
2121 
2122 	return (NULL);
2123 
2124  notfound:
2125 	UVMHIST_LOG(maphist, "<- failed (notfound)", 0,0,0,0);
2126 
2127 	return (NULL);
2128 }
2129 
2130 /*
2131  *   U N M A P   -   m a i n   h e l p e r   f u n c t i o n s
2132  */
2133 
2134 /*
2135  * uvm_unmap_remove: remove mappings from a vm_map (from "start" up to "stop")
2136  *
2137  * => caller must check alignment and size
2138  * => map must be locked by caller
2139  * => we return a list of map entries that we've remove from the map
2140  *    in "entry_list"
2141  */
2142 
2143 void
2144 uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
2145     struct vm_map_entry **entry_list /* OUT */, int flags)
2146 {
2147 	struct vm_map_entry *entry, *first_entry, *next;
2148 	vaddr_t len;
2149 	UVMHIST_FUNC("uvm_unmap_remove"); UVMHIST_CALLED(maphist);
2150 
2151 	UVMHIST_LOG(maphist,"(map=%p, start=%#lx, end=%#lx)",
2152 	    map, start, end, 0);
2153 	VM_MAP_RANGE_CHECK(map, start, end);
2154 
2155 	uvm_map_check(map, "unmap_remove entry");
2156 
2157 	/*
2158 	 * find first entry
2159 	 */
2160 
2161 	if (uvm_map_lookup_entry(map, start, &first_entry) == true) {
2162 		/* clip and go... */
2163 		entry = first_entry;
2164 		UVM_MAP_CLIP_START(map, entry, start);
2165 		/* critical!  prevents stale hint */
2166 		SAVE_HINT(map, entry, entry->prev);
2167 	} else {
2168 		entry = first_entry->next;
2169 	}
2170 
2171 	/*
2172 	 * Save the free space hint
2173 	 */
2174 
2175 	if (map->first_free != &map->header && map->first_free->start >= start)
2176 		map->first_free = entry->prev;
2177 
2178 	/*
2179 	 * note: we now re-use first_entry for a different task.  we remove
2180 	 * a number of map entries from the map and save them in a linked
2181 	 * list headed by "first_entry".  once we remove them from the map
2182 	 * the caller should unlock the map and drop the references to the
2183 	 * backing objects [c.f. uvm_unmap_detach].  the object is to
2184 	 * separate unmapping from reference dropping.  why?
2185 	 *   [1] the map has to be locked for unmapping
2186 	 *   [2] the map need not be locked for reference dropping
2187 	 *   [3] dropping references may trigger pager I/O, and if we hit
2188 	 *       a pager that does synchronous I/O we may have to wait for it.
2189 	 *   [4] we would like all waiting for I/O to occur with maps unlocked
2190 	 *       so that we don't block other threads.
2191 	 */
2192 
2193 	first_entry = NULL;
2194 	*entry_list = NULL;
2195 
2196 	/*
2197 	 * break up the area into map entry sized regions and unmap.  note
2198 	 * that all mappings have to be removed before we can even consider
2199 	 * dropping references to amaps or VM objects (otherwise we could end
2200 	 * up with a mapping to a page on the free list which would be very bad)
2201 	 */
2202 
2203 	while ((entry != &map->header) && (entry->start < end)) {
2204 		KASSERT((entry->flags & UVM_MAP_STATIC) == 0);
2205 
2206 		UVM_MAP_CLIP_END(map, entry, end);
2207 		next = entry->next;
2208 		len = entry->end - entry->start;
2209 
2210 		/*
2211 		 * unwire before removing addresses from the pmap; otherwise
2212 		 * unwiring will put the entries back into the pmap (XXX).
2213 		 */
2214 
2215 		if (VM_MAPENT_ISWIRED(entry)) {
2216 			uvm_map_entry_unwire(map, entry);
2217 		}
2218 		if (flags & UVM_FLAG_VAONLY) {
2219 
2220 			/* nothing */
2221 
2222 		} else if ((map->flags & VM_MAP_PAGEABLE) == 0) {
2223 
2224 			/*
2225 			 * if the map is non-pageable, any pages mapped there
2226 			 * must be wired and entered with pmap_kenter_pa(),
2227 			 * and we should free any such pages immediately.
2228 			 * this is mostly used for kmem_map.
2229 			 */
2230 			KASSERT(vm_map_pmap(map) == pmap_kernel());
2231 
2232 			uvm_km_pgremove_intrsafe(map, entry->start, entry->end);
2233 		} else if (UVM_ET_ISOBJ(entry) &&
2234 			   UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) {
2235 			panic("%s: kernel object %p %p\n",
2236 			    __func__, map, entry);
2237 		} else if (UVM_ET_ISOBJ(entry) || entry->aref.ar_amap) {
2238 			/*
2239 			 * remove mappings the standard way.  lock object
2240 			 * and/or amap to ensure vm_page state does not
2241 			 * change while in pmap_remove().
2242 			 */
2243 
2244 			uvm_map_lock_entry(entry);
2245 			pmap_remove(map->pmap, entry->start, entry->end);
2246 			uvm_map_unlock_entry(entry);
2247 		}
2248 
2249 #if defined(UVMDEBUG)
2250 		/*
2251 		 * check if there's remaining mapping,
2252 		 * which is a bug in caller.
2253 		 */
2254 
2255 		vaddr_t va;
2256 		for (va = entry->start; va < entry->end;
2257 		    va += PAGE_SIZE) {
2258 			if (pmap_extract(vm_map_pmap(map), va, NULL)) {
2259 				panic("%s: %#"PRIxVADDR" has mapping",
2260 				    __func__, va);
2261 			}
2262 		}
2263 
2264 		if (VM_MAP_IS_KERNEL(map) && (flags & UVM_FLAG_NOWAIT) == 0) {
2265 			uvm_km_check_empty(map, entry->start,
2266 			    entry->end);
2267 		}
2268 #endif /* defined(UVMDEBUG) */
2269 
2270 		/*
2271 		 * remove entry from map and put it on our list of entries
2272 		 * that we've nuked.  then go to next entry.
2273 		 */
2274 
2275 		UVMHIST_LOG(maphist, "  removed map entry %p", entry, 0, 0,0);
2276 
2277 		/* critical!  prevents stale hint */
2278 		SAVE_HINT(map, entry, entry->prev);
2279 
2280 		uvm_map_entry_unlink(map, entry);
2281 		KASSERT(map->size >= len);
2282 		map->size -= len;
2283 		entry->prev = NULL;
2284 		entry->next = first_entry;
2285 		first_entry = entry;
2286 		entry = next;
2287 	}
2288 
2289 	/*
2290 	 * Note: if map is dying, leave pmap_update() for pmap_destroy(),
2291 	 * which will be called later.
2292 	 */
2293 	if ((map->flags & VM_MAP_DYING) == 0) {
2294 		pmap_update(vm_map_pmap(map));
2295 	} else {
2296 		KASSERT(vm_map_pmap(map) != pmap_kernel());
2297 	}
2298 
2299 	uvm_map_check(map, "unmap_remove leave");
2300 
2301 	/*
2302 	 * now we've cleaned up the map and are ready for the caller to drop
2303 	 * references to the mapped objects.
2304 	 */
2305 
2306 	*entry_list = first_entry;
2307 	UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
2308 
2309 	if (map->flags & VM_MAP_WANTVA) {
2310 		mutex_enter(&map->misc_lock);
2311 		map->flags &= ~VM_MAP_WANTVA;
2312 		cv_broadcast(&map->cv);
2313 		mutex_exit(&map->misc_lock);
2314 	}
2315 }
2316 
2317 /*
2318  * uvm_unmap_detach: drop references in a chain of map entries
2319  *
2320  * => we will free the map entries as we traverse the list.
2321  */
2322 
2323 void
2324 uvm_unmap_detach(struct vm_map_entry *first_entry, int flags)
2325 {
2326 	struct vm_map_entry *next_entry;
2327 	UVMHIST_FUNC("uvm_unmap_detach"); UVMHIST_CALLED(maphist);
2328 
2329 	while (first_entry) {
2330 		KASSERT(!VM_MAPENT_ISWIRED(first_entry));
2331 		UVMHIST_LOG(maphist,
2332 		    "  detach %p: amap=%p, obj=%p, submap?=%d",
2333 		    first_entry, first_entry->aref.ar_amap,
2334 		    first_entry->object.uvm_obj,
2335 		    UVM_ET_ISSUBMAP(first_entry));
2336 
2337 		/*
2338 		 * drop reference to amap, if we've got one
2339 		 */
2340 
2341 		if (first_entry->aref.ar_amap)
2342 			uvm_map_unreference_amap(first_entry, flags);
2343 
2344 		/*
2345 		 * drop reference to our backing object, if we've got one
2346 		 */
2347 
2348 		KASSERT(!UVM_ET_ISSUBMAP(first_entry));
2349 		if (UVM_ET_ISOBJ(first_entry) &&
2350 		    first_entry->object.uvm_obj->pgops->pgo_detach) {
2351 			(*first_entry->object.uvm_obj->pgops->pgo_detach)
2352 				(first_entry->object.uvm_obj);
2353 		}
2354 		next_entry = first_entry->next;
2355 		uvm_mapent_free(first_entry);
2356 		first_entry = next_entry;
2357 	}
2358 	UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
2359 }
2360 
2361 /*
2362  *   E X T R A C T I O N   F U N C T I O N S
2363  */
2364 
2365 /*
2366  * uvm_map_reserve: reserve space in a vm_map for future use.
2367  *
2368  * => we reserve space in a map by putting a dummy map entry in the
2369  *    map (dummy means obj=NULL, amap=NULL, prot=VM_PROT_NONE)
2370  * => map should be unlocked (we will write lock it)
2371  * => we return true if we were able to reserve space
2372  * => XXXCDC: should be inline?
2373  */
2374 
2375 int
2376 uvm_map_reserve(struct vm_map *map, vsize_t size,
2377     vaddr_t offset	/* hint for pmap_prefer */,
2378     vsize_t align	/* alignment */,
2379     vaddr_t *raddr	/* IN:hint, OUT: reserved VA */,
2380     uvm_flag_t flags	/* UVM_FLAG_FIXED or UVM_FLAG_COLORMATCH or 0 */)
2381 {
2382 	UVMHIST_FUNC("uvm_map_reserve"); UVMHIST_CALLED(maphist);
2383 
2384 	UVMHIST_LOG(maphist, "(map=%p, size=%#lx, offset=%#lx, addr=%p)",
2385 	    map,size,offset,raddr);
2386 
2387 	size = round_page(size);
2388 
2389 	/*
2390 	 * reserve some virtual space.
2391 	 */
2392 
2393 	if (uvm_map(map, raddr, size, NULL, offset, align,
2394 	    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
2395 	    UVM_ADV_RANDOM, UVM_FLAG_NOMERGE|flags)) != 0) {
2396 	    UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
2397 		return (false);
2398 	}
2399 
2400 	UVMHIST_LOG(maphist, "<- done (*raddr=%#lx)", *raddr,0,0,0);
2401 	return (true);
2402 }
2403 
2404 /*
2405  * uvm_map_replace: replace a reserved (blank) area of memory with
2406  * real mappings.
2407  *
2408  * => caller must WRITE-LOCK the map
2409  * => we return true if replacement was a success
2410  * => we expect the newents chain to have nnewents entrys on it and
2411  *    we expect newents->prev to point to the last entry on the list
2412  * => note newents is allowed to be NULL
2413  */
2414 
2415 static int
2416 uvm_map_replace(struct vm_map *map, vaddr_t start, vaddr_t end,
2417     struct vm_map_entry *newents, int nnewents, vsize_t nsize,
2418     struct vm_map_entry **oldentryp)
2419 {
2420 	struct vm_map_entry *oldent, *last;
2421 
2422 	uvm_map_check(map, "map_replace entry");
2423 
2424 	/*
2425 	 * first find the blank map entry at the specified address
2426 	 */
2427 
2428 	if (!uvm_map_lookup_entry(map, start, &oldent)) {
2429 		return (false);
2430 	}
2431 
2432 	/*
2433 	 * check to make sure we have a proper blank entry
2434 	 */
2435 
2436 	if (end < oldent->end) {
2437 		UVM_MAP_CLIP_END(map, oldent, end);
2438 	}
2439 	if (oldent->start != start || oldent->end != end ||
2440 	    oldent->object.uvm_obj != NULL || oldent->aref.ar_amap != NULL) {
2441 		return (false);
2442 	}
2443 
2444 #ifdef DIAGNOSTIC
2445 
2446 	/*
2447 	 * sanity check the newents chain
2448 	 */
2449 
2450 	{
2451 		struct vm_map_entry *tmpent = newents;
2452 		int nent = 0;
2453 		vsize_t sz = 0;
2454 		vaddr_t cur = start;
2455 
2456 		while (tmpent) {
2457 			nent++;
2458 			sz += tmpent->end - tmpent->start;
2459 			if (tmpent->start < cur)
2460 				panic("uvm_map_replace1");
2461 			if (tmpent->start >= tmpent->end || tmpent->end > end) {
2462 				panic("uvm_map_replace2: "
2463 				    "tmpent->start=%#"PRIxVADDR
2464 				    ", tmpent->end=%#"PRIxVADDR
2465 				    ", end=%#"PRIxVADDR,
2466 				    tmpent->start, tmpent->end, end);
2467 			}
2468 			cur = tmpent->end;
2469 			if (tmpent->next) {
2470 				if (tmpent->next->prev != tmpent)
2471 					panic("uvm_map_replace3");
2472 			} else {
2473 				if (newents->prev != tmpent)
2474 					panic("uvm_map_replace4");
2475 			}
2476 			tmpent = tmpent->next;
2477 		}
2478 		if (nent != nnewents)
2479 			panic("uvm_map_replace5");
2480 		if (sz != nsize)
2481 			panic("uvm_map_replace6");
2482 	}
2483 #endif
2484 
2485 	/*
2486 	 * map entry is a valid blank!   replace it.   (this does all the
2487 	 * work of map entry link/unlink...).
2488 	 */
2489 
2490 	if (newents) {
2491 		last = newents->prev;
2492 
2493 		/* critical: flush stale hints out of map */
2494 		SAVE_HINT(map, map->hint, newents);
2495 		if (map->first_free == oldent)
2496 			map->first_free = last;
2497 
2498 		last->next = oldent->next;
2499 		last->next->prev = last;
2500 
2501 		/* Fix RB tree */
2502 		uvm_rb_remove(map, oldent);
2503 
2504 		newents->prev = oldent->prev;
2505 		newents->prev->next = newents;
2506 		map->nentries = map->nentries + (nnewents - 1);
2507 
2508 		/* Fixup the RB tree */
2509 		{
2510 			int i;
2511 			struct vm_map_entry *tmp;
2512 
2513 			tmp = newents;
2514 			for (i = 0; i < nnewents && tmp; i++) {
2515 				uvm_rb_insert(map, tmp);
2516 				tmp = tmp->next;
2517 			}
2518 		}
2519 	} else {
2520 		/* NULL list of new entries: just remove the old one */
2521 		clear_hints(map, oldent);
2522 		uvm_map_entry_unlink(map, oldent);
2523 	}
2524 	map->size -= end - start - nsize;
2525 
2526 	uvm_map_check(map, "map_replace leave");
2527 
2528 	/*
2529 	 * now we can free the old blank entry and return.
2530 	 */
2531 
2532 	*oldentryp = oldent;
2533 	return (true);
2534 }
2535 
2536 /*
2537  * uvm_map_extract: extract a mapping from a map and put it somewhere
2538  *	(maybe removing the old mapping)
2539  *
2540  * => maps should be unlocked (we will write lock them)
2541  * => returns 0 on success, error code otherwise
2542  * => start must be page aligned
2543  * => len must be page sized
2544  * => flags:
2545  *      UVM_EXTRACT_REMOVE: remove mappings from srcmap
2546  *      UVM_EXTRACT_CONTIG: abort if unmapped area (advisory only)
2547  *      UVM_EXTRACT_QREF: for a temporary extraction do quick obj refs
2548  *      UVM_EXTRACT_FIXPROT: set prot to maxprot as we go
2549  *    >>>NOTE: if you set REMOVE, you are not allowed to use CONTIG or QREF!<<<
2550  *    >>>NOTE: QREF's must be unmapped via the QREF path, thus should only
2551  *             be used from within the kernel in a kernel level map <<<
2552  */
2553 
2554 int
2555 uvm_map_extract(struct vm_map *srcmap, vaddr_t start, vsize_t len,
2556     struct vm_map *dstmap, vaddr_t *dstaddrp, int flags)
2557 {
2558 	vaddr_t dstaddr, end, newend, oldoffset, fudge, orig_fudge;
2559 	struct vm_map_entry *chain, *endchain, *entry, *orig_entry, *newentry,
2560 	    *deadentry, *oldentry;
2561 	struct vm_map_entry *resentry = NULL; /* a dummy reservation entry */
2562 	vsize_t elen __unused;
2563 	int nchain, error, copy_ok;
2564 	vsize_t nsize;
2565 	UVMHIST_FUNC("uvm_map_extract"); UVMHIST_CALLED(maphist);
2566 
2567 	UVMHIST_LOG(maphist,"(srcmap=%p,start=%#lx, len=%#lx", srcmap, start,
2568 	    len,0);
2569 	UVMHIST_LOG(maphist," ...,dstmap=%p, flags=%#x)", dstmap,flags,0,0);
2570 
2571 	/*
2572 	 * step 0: sanity check: start must be on a page boundary, length
2573 	 * must be page sized.  can't ask for CONTIG/QREF if you asked for
2574 	 * REMOVE.
2575 	 */
2576 
2577 	KASSERT((start & PAGE_MASK) == 0 && (len & PAGE_MASK) == 0);
2578 	KASSERT((flags & UVM_EXTRACT_REMOVE) == 0 ||
2579 		(flags & (UVM_EXTRACT_CONTIG|UVM_EXTRACT_QREF)) == 0);
2580 
2581 	/*
2582 	 * step 1: reserve space in the target map for the extracted area
2583 	 */
2584 
2585 	if ((flags & UVM_EXTRACT_RESERVED) == 0) {
2586 		dstaddr = vm_map_min(dstmap);
2587 		if (!uvm_map_reserve(dstmap, len, start,
2588 		    atop(start) & uvmexp.colormask, &dstaddr,
2589 		    UVM_FLAG_COLORMATCH))
2590 			return (ENOMEM);
2591 		KASSERT((atop(start ^ dstaddr) & uvmexp.colormask) == 0);
2592 		*dstaddrp = dstaddr;	/* pass address back to caller */
2593 		UVMHIST_LOG(maphist, "  dstaddr=%#lx", dstaddr,0,0,0);
2594 	} else {
2595 		dstaddr = *dstaddrp;
2596 	}
2597 
2598 	/*
2599 	 * step 2: setup for the extraction process loop by init'ing the
2600 	 * map entry chain, locking src map, and looking up the first useful
2601 	 * entry in the map.
2602 	 */
2603 
2604 	end = start + len;
2605 	newend = dstaddr + len;
2606 	chain = endchain = NULL;
2607 	nchain = 0;
2608 	nsize = 0;
2609 	vm_map_lock(srcmap);
2610 
2611 	if (uvm_map_lookup_entry(srcmap, start, &entry)) {
2612 
2613 		/* "start" is within an entry */
2614 		if (flags & UVM_EXTRACT_QREF) {
2615 
2616 			/*
2617 			 * for quick references we don't clip the entry, so
2618 			 * the entry may map space "before" the starting
2619 			 * virtual address... this is the "fudge" factor
2620 			 * (which can be non-zero only the first time
2621 			 * through the "while" loop in step 3).
2622 			 */
2623 
2624 			fudge = start - entry->start;
2625 		} else {
2626 
2627 			/*
2628 			 * normal reference: we clip the map to fit (thus
2629 			 * fudge is zero)
2630 			 */
2631 
2632 			UVM_MAP_CLIP_START(srcmap, entry, start);
2633 			SAVE_HINT(srcmap, srcmap->hint, entry->prev);
2634 			fudge = 0;
2635 		}
2636 	} else {
2637 
2638 		/* "start" is not within an entry ... skip to next entry */
2639 		if (flags & UVM_EXTRACT_CONTIG) {
2640 			error = EINVAL;
2641 			goto bad;    /* definite hole here ... */
2642 		}
2643 
2644 		entry = entry->next;
2645 		fudge = 0;
2646 	}
2647 
2648 	/* save values from srcmap for step 6 */
2649 	orig_entry = entry;
2650 	orig_fudge = fudge;
2651 
2652 	/*
2653 	 * step 3: now start looping through the map entries, extracting
2654 	 * as we go.
2655 	 */
2656 
2657 	while (entry->start < end && entry != &srcmap->header) {
2658 
2659 		/* if we are not doing a quick reference, clip it */
2660 		if ((flags & UVM_EXTRACT_QREF) == 0)
2661 			UVM_MAP_CLIP_END(srcmap, entry, end);
2662 
2663 		/* clear needs_copy (allow chunking) */
2664 		if (UVM_ET_ISNEEDSCOPY(entry)) {
2665 			amap_copy(srcmap, entry,
2666 			    AMAP_COPY_NOWAIT|AMAP_COPY_NOMERGE, start, end);
2667 			if (UVM_ET_ISNEEDSCOPY(entry)) {  /* failed? */
2668 				error = ENOMEM;
2669 				goto bad;
2670 			}
2671 
2672 			/* amap_copy could clip (during chunk)!  update fudge */
2673 			if (fudge) {
2674 				fudge = start - entry->start;
2675 				orig_fudge = fudge;
2676 			}
2677 		}
2678 
2679 		/* calculate the offset of this from "start" */
2680 		oldoffset = (entry->start + fudge) - start;
2681 
2682 		/* allocate a new map entry */
2683 		newentry = uvm_mapent_alloc(dstmap, 0);
2684 		if (newentry == NULL) {
2685 			error = ENOMEM;
2686 			goto bad;
2687 		}
2688 
2689 		/* set up new map entry */
2690 		newentry->next = NULL;
2691 		newentry->prev = endchain;
2692 		newentry->start = dstaddr + oldoffset;
2693 		newentry->end =
2694 		    newentry->start + (entry->end - (entry->start + fudge));
2695 		if (newentry->end > newend || newentry->end < newentry->start)
2696 			newentry->end = newend;
2697 		newentry->object.uvm_obj = entry->object.uvm_obj;
2698 		if (newentry->object.uvm_obj) {
2699 			if (newentry->object.uvm_obj->pgops->pgo_reference)
2700 				newentry->object.uvm_obj->pgops->
2701 				    pgo_reference(newentry->object.uvm_obj);
2702 				newentry->offset = entry->offset + fudge;
2703 		} else {
2704 			newentry->offset = 0;
2705 		}
2706 		newentry->etype = entry->etype;
2707 		newentry->protection = (flags & UVM_EXTRACT_FIXPROT) ?
2708 			entry->max_protection : entry->protection;
2709 		newentry->max_protection = entry->max_protection;
2710 		newentry->inheritance = entry->inheritance;
2711 		newentry->wired_count = 0;
2712 		newentry->aref.ar_amap = entry->aref.ar_amap;
2713 		if (newentry->aref.ar_amap) {
2714 			newentry->aref.ar_pageoff =
2715 			    entry->aref.ar_pageoff + (fudge >> PAGE_SHIFT);
2716 			uvm_map_reference_amap(newentry, AMAP_SHARED |
2717 			    ((flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0));
2718 		} else {
2719 			newentry->aref.ar_pageoff = 0;
2720 		}
2721 		newentry->advice = entry->advice;
2722 		if ((flags & UVM_EXTRACT_QREF) != 0) {
2723 			newentry->flags |= UVM_MAP_NOMERGE;
2724 		}
2725 
2726 		/* now link it on the chain */
2727 		nchain++;
2728 		nsize += newentry->end - newentry->start;
2729 		if (endchain == NULL) {
2730 			chain = endchain = newentry;
2731 		} else {
2732 			endchain->next = newentry;
2733 			endchain = newentry;
2734 		}
2735 
2736 		/* end of 'while' loop! */
2737 		if ((flags & UVM_EXTRACT_CONTIG) && entry->end < end &&
2738 		    (entry->next == &srcmap->header ||
2739 		    entry->next->start != entry->end)) {
2740 			error = EINVAL;
2741 			goto bad;
2742 		}
2743 		entry = entry->next;
2744 		fudge = 0;
2745 	}
2746 
2747 	/*
2748 	 * step 4: close off chain (in format expected by uvm_map_replace)
2749 	 */
2750 
2751 	if (chain)
2752 		chain->prev = endchain;
2753 
2754 	/*
2755 	 * step 5: attempt to lock the dest map so we can pmap_copy.
2756 	 * note usage of copy_ok:
2757 	 *   1 => dstmap locked, pmap_copy ok, and we "replace" here (step 5)
2758 	 *   0 => dstmap unlocked, NO pmap_copy, and we will "replace" in step 7
2759 	 */
2760 
2761 	if (srcmap == dstmap || vm_map_lock_try(dstmap) == true) {
2762 		copy_ok = 1;
2763 		if (!uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
2764 		    nchain, nsize, &resentry)) {
2765 			if (srcmap != dstmap)
2766 				vm_map_unlock(dstmap);
2767 			error = EIO;
2768 			goto bad;
2769 		}
2770 	} else {
2771 		copy_ok = 0;
2772 		/* replace defered until step 7 */
2773 	}
2774 
2775 	/*
2776 	 * step 6: traverse the srcmap a second time to do the following:
2777 	 *  - if we got a lock on the dstmap do pmap_copy
2778 	 *  - if UVM_EXTRACT_REMOVE remove the entries
2779 	 * we make use of orig_entry and orig_fudge (saved in step 2)
2780 	 */
2781 
2782 	if (copy_ok || (flags & UVM_EXTRACT_REMOVE)) {
2783 
2784 		/* purge possible stale hints from srcmap */
2785 		if (flags & UVM_EXTRACT_REMOVE) {
2786 			SAVE_HINT(srcmap, srcmap->hint, orig_entry->prev);
2787 			if (srcmap->first_free != &srcmap->header &&
2788 			    srcmap->first_free->start >= start)
2789 				srcmap->first_free = orig_entry->prev;
2790 		}
2791 
2792 		entry = orig_entry;
2793 		fudge = orig_fudge;
2794 		deadentry = NULL;	/* for UVM_EXTRACT_REMOVE */
2795 
2796 		while (entry->start < end && entry != &srcmap->header) {
2797 			if (copy_ok) {
2798 				oldoffset = (entry->start + fudge) - start;
2799 				elen = MIN(end, entry->end) -
2800 				    (entry->start + fudge);
2801 				pmap_copy(dstmap->pmap, srcmap->pmap,
2802 				    dstaddr + oldoffset, elen,
2803 				    entry->start + fudge);
2804 			}
2805 
2806 			/* we advance "entry" in the following if statement */
2807 			if (flags & UVM_EXTRACT_REMOVE) {
2808 				uvm_map_lock_entry(entry);
2809 				pmap_remove(srcmap->pmap, entry->start,
2810 						entry->end);
2811 				uvm_map_unlock_entry(entry);
2812 				oldentry = entry;	/* save entry */
2813 				entry = entry->next;	/* advance */
2814 				uvm_map_entry_unlink(srcmap, oldentry);
2815 							/* add to dead list */
2816 				oldentry->next = deadentry;
2817 				deadentry = oldentry;
2818 			} else {
2819 				entry = entry->next;		/* advance */
2820 			}
2821 
2822 			/* end of 'while' loop */
2823 			fudge = 0;
2824 		}
2825 		pmap_update(srcmap->pmap);
2826 
2827 		/*
2828 		 * unlock dstmap.  we will dispose of deadentry in
2829 		 * step 7 if needed
2830 		 */
2831 
2832 		if (copy_ok && srcmap != dstmap)
2833 			vm_map_unlock(dstmap);
2834 
2835 	} else {
2836 		deadentry = NULL;
2837 	}
2838 
2839 	/*
2840 	 * step 7: we are done with the source map, unlock.   if copy_ok
2841 	 * is 0 then we have not replaced the dummy mapping in dstmap yet
2842 	 * and we need to do so now.
2843 	 */
2844 
2845 	vm_map_unlock(srcmap);
2846 	if ((flags & UVM_EXTRACT_REMOVE) && deadentry)
2847 		uvm_unmap_detach(deadentry, 0);   /* dispose of old entries */
2848 
2849 	/* now do the replacement if we didn't do it in step 5 */
2850 	if (copy_ok == 0) {
2851 		vm_map_lock(dstmap);
2852 		error = uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
2853 		    nchain, nsize, &resentry);
2854 		vm_map_unlock(dstmap);
2855 
2856 		if (error == false) {
2857 			error = EIO;
2858 			goto bad2;
2859 		}
2860 	}
2861 
2862 	if (resentry != NULL)
2863 		uvm_mapent_free(resentry);
2864 
2865 	return (0);
2866 
2867 	/*
2868 	 * bad: failure recovery
2869 	 */
2870 bad:
2871 	vm_map_unlock(srcmap);
2872 bad2:			/* src already unlocked */
2873 	if (chain)
2874 		uvm_unmap_detach(chain,
2875 		    (flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0);
2876 
2877 	if (resentry != NULL)
2878 		uvm_mapent_free(resentry);
2879 
2880 	if ((flags & UVM_EXTRACT_RESERVED) == 0) {
2881 		uvm_unmap(dstmap, dstaddr, dstaddr+len);   /* ??? */
2882 	}
2883 	return (error);
2884 }
2885 
2886 /* end of extraction functions */
2887 
2888 /*
2889  * uvm_map_submap: punch down part of a map into a submap
2890  *
2891  * => only the kernel_map is allowed to be submapped
2892  * => the purpose of submapping is to break up the locking granularity
2893  *	of a larger map
2894  * => the range specified must have been mapped previously with a uvm_map()
2895  *	call [with uobj==NULL] to create a blank map entry in the main map.
2896  *	[And it had better still be blank!]
2897  * => maps which contain submaps should never be copied or forked.
2898  * => to remove a submap, use uvm_unmap() on the main map
2899  *	and then uvm_map_deallocate() the submap.
2900  * => main map must be unlocked.
2901  * => submap must have been init'd and have a zero reference count.
2902  *	[need not be locked as we don't actually reference it]
2903  */
2904 
2905 int
2906 uvm_map_submap(struct vm_map *map, vaddr_t start, vaddr_t end,
2907     struct vm_map *submap)
2908 {
2909 	struct vm_map_entry *entry;
2910 	int error;
2911 
2912 	vm_map_lock(map);
2913 	VM_MAP_RANGE_CHECK(map, start, end);
2914 
2915 	if (uvm_map_lookup_entry(map, start, &entry)) {
2916 		UVM_MAP_CLIP_START(map, entry, start);
2917 		UVM_MAP_CLIP_END(map, entry, end);	/* to be safe */
2918 	} else {
2919 		entry = NULL;
2920 	}
2921 
2922 	if (entry != NULL &&
2923 	    entry->start == start && entry->end == end &&
2924 	    entry->object.uvm_obj == NULL && entry->aref.ar_amap == NULL &&
2925 	    !UVM_ET_ISCOPYONWRITE(entry) && !UVM_ET_ISNEEDSCOPY(entry)) {
2926 		entry->etype |= UVM_ET_SUBMAP;
2927 		entry->object.sub_map = submap;
2928 		entry->offset = 0;
2929 		uvm_map_reference(submap);
2930 		error = 0;
2931 	} else {
2932 		error = EINVAL;
2933 	}
2934 	vm_map_unlock(map);
2935 
2936 	return error;
2937 }
2938 
2939 /*
2940  * uvm_map_protect: change map protection
2941  *
2942  * => set_max means set max_protection.
2943  * => map must be unlocked.
2944  */
2945 
2946 #define MASK(entry)	(UVM_ET_ISCOPYONWRITE(entry) ? \
2947 			 ~VM_PROT_WRITE : VM_PROT_ALL)
2948 
2949 int
2950 uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
2951     vm_prot_t new_prot, bool set_max)
2952 {
2953 	struct vm_map_entry *current, *entry;
2954 	int error = 0;
2955 	UVMHIST_FUNC("uvm_map_protect"); UVMHIST_CALLED(maphist);
2956 	UVMHIST_LOG(maphist,"(map=%p,start=%#lx,end=%#lx,new_prot=%#x)",
2957 		    map, start, end, new_prot);
2958 
2959 	vm_map_lock(map);
2960 	VM_MAP_RANGE_CHECK(map, start, end);
2961 	if (uvm_map_lookup_entry(map, start, &entry)) {
2962 		UVM_MAP_CLIP_START(map, entry, start);
2963 	} else {
2964 		entry = entry->next;
2965 	}
2966 
2967 	/*
2968 	 * make a first pass to check for protection violations.
2969 	 */
2970 
2971 	current = entry;
2972 	while ((current != &map->header) && (current->start < end)) {
2973 		if (UVM_ET_ISSUBMAP(current)) {
2974 			error = EINVAL;
2975 			goto out;
2976 		}
2977 		if ((new_prot & current->max_protection) != new_prot) {
2978 			error = EACCES;
2979 			goto out;
2980 		}
2981 		/*
2982 		 * Don't allow VM_PROT_EXECUTE to be set on entries that
2983 		 * point to vnodes that are associated with a NOEXEC file
2984 		 * system.
2985 		 */
2986 		if (UVM_ET_ISOBJ(current) &&
2987 		    UVM_OBJ_IS_VNODE(current->object.uvm_obj)) {
2988 			struct vnode *vp =
2989 			    (struct vnode *) current->object.uvm_obj;
2990 
2991 			if ((new_prot & VM_PROT_EXECUTE) != 0 &&
2992 			    (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0) {
2993 				error = EACCES;
2994 				goto out;
2995 			}
2996 		}
2997 
2998 		current = current->next;
2999 	}
3000 
3001 	/* go back and fix up protections (no need to clip this time). */
3002 
3003 	current = entry;
3004 	while ((current != &map->header) && (current->start < end)) {
3005 		vm_prot_t old_prot;
3006 
3007 		UVM_MAP_CLIP_END(map, current, end);
3008 		old_prot = current->protection;
3009 		if (set_max)
3010 			current->protection =
3011 			    (current->max_protection = new_prot) & old_prot;
3012 		else
3013 			current->protection = new_prot;
3014 
3015 		/*
3016 		 * update physical map if necessary.  worry about copy-on-write
3017 		 * here -- CHECK THIS XXX
3018 		 */
3019 
3020 		if (current->protection != old_prot) {
3021 			/* update pmap! */
3022 			uvm_map_lock_entry(current);
3023 			pmap_protect(map->pmap, current->start, current->end,
3024 			    current->protection & MASK(entry));
3025 			uvm_map_unlock_entry(current);
3026 
3027 			/*
3028 			 * If this entry points at a vnode, and the
3029 			 * protection includes VM_PROT_EXECUTE, mark
3030 			 * the vnode as VEXECMAP.
3031 			 */
3032 			if (UVM_ET_ISOBJ(current)) {
3033 				struct uvm_object *uobj =
3034 				    current->object.uvm_obj;
3035 
3036 				if (UVM_OBJ_IS_VNODE(uobj) &&
3037 				    (current->protection & VM_PROT_EXECUTE)) {
3038 					vn_markexec((struct vnode *) uobj);
3039 				}
3040 			}
3041 		}
3042 
3043 		/*
3044 		 * If the map is configured to lock any future mappings,
3045 		 * wire this entry now if the old protection was VM_PROT_NONE
3046 		 * and the new protection is not VM_PROT_NONE.
3047 		 */
3048 
3049 		if ((map->flags & VM_MAP_WIREFUTURE) != 0 &&
3050 		    VM_MAPENT_ISWIRED(entry) == 0 &&
3051 		    old_prot == VM_PROT_NONE &&
3052 		    new_prot != VM_PROT_NONE) {
3053 			if (uvm_map_pageable(map, entry->start,
3054 			    entry->end, false,
3055 			    UVM_LK_ENTER|UVM_LK_EXIT) != 0) {
3056 
3057 				/*
3058 				 * If locking the entry fails, remember the
3059 				 * error if it's the first one.  Note we
3060 				 * still continue setting the protection in
3061 				 * the map, but will return the error
3062 				 * condition regardless.
3063 				 *
3064 				 * XXX Ignore what the actual error is,
3065 				 * XXX just call it a resource shortage
3066 				 * XXX so that it doesn't get confused
3067 				 * XXX what uvm_map_protect() itself would
3068 				 * XXX normally return.
3069 				 */
3070 
3071 				error = ENOMEM;
3072 			}
3073 		}
3074 		current = current->next;
3075 	}
3076 	pmap_update(map->pmap);
3077 
3078  out:
3079 	vm_map_unlock(map);
3080 
3081 	UVMHIST_LOG(maphist, "<- done, error=%d",error,0,0,0);
3082 	return error;
3083 }
3084 
3085 #undef  MASK
3086 
3087 /*
3088  * uvm_map_inherit: set inheritance code for range of addrs in map.
3089  *
3090  * => map must be unlocked
3091  * => note that the inherit code is used during a "fork".  see fork
3092  *	code for details.
3093  */
3094 
3095 int
3096 uvm_map_inherit(struct vm_map *map, vaddr_t start, vaddr_t end,
3097     vm_inherit_t new_inheritance)
3098 {
3099 	struct vm_map_entry *entry, *temp_entry;
3100 	UVMHIST_FUNC("uvm_map_inherit"); UVMHIST_CALLED(maphist);
3101 	UVMHIST_LOG(maphist,"(map=%p,start=%#lx,end=%#lx,new_inh=%#x)",
3102 	    map, start, end, new_inheritance);
3103 
3104 	switch (new_inheritance) {
3105 	case MAP_INHERIT_NONE:
3106 	case MAP_INHERIT_COPY:
3107 	case MAP_INHERIT_SHARE:
3108 	case MAP_INHERIT_ZERO:
3109 		break;
3110 	default:
3111 		UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
3112 		return EINVAL;
3113 	}
3114 
3115 	vm_map_lock(map);
3116 	VM_MAP_RANGE_CHECK(map, start, end);
3117 	if (uvm_map_lookup_entry(map, start, &temp_entry)) {
3118 		entry = temp_entry;
3119 		UVM_MAP_CLIP_START(map, entry, start);
3120 	}  else {
3121 		entry = temp_entry->next;
3122 	}
3123 	while ((entry != &map->header) && (entry->start < end)) {
3124 		UVM_MAP_CLIP_END(map, entry, end);
3125 		entry->inheritance = new_inheritance;
3126 		entry = entry->next;
3127 	}
3128 	vm_map_unlock(map);
3129 	UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3130 	return 0;
3131 }
3132 
3133 /*
3134  * uvm_map_advice: set advice code for range of addrs in map.
3135  *
3136  * => map must be unlocked
3137  */
3138 
3139 int
3140 uvm_map_advice(struct vm_map *map, vaddr_t start, vaddr_t end, int new_advice)
3141 {
3142 	struct vm_map_entry *entry, *temp_entry;
3143 	UVMHIST_FUNC("uvm_map_advice"); UVMHIST_CALLED(maphist);
3144 	UVMHIST_LOG(maphist,"(map=%p,start=%#lx,end=%#lx,new_adv=%#x)",
3145 	    map, start, end, new_advice);
3146 
3147 	vm_map_lock(map);
3148 	VM_MAP_RANGE_CHECK(map, start, end);
3149 	if (uvm_map_lookup_entry(map, start, &temp_entry)) {
3150 		entry = temp_entry;
3151 		UVM_MAP_CLIP_START(map, entry, start);
3152 	} else {
3153 		entry = temp_entry->next;
3154 	}
3155 
3156 	/*
3157 	 * XXXJRT: disallow holes?
3158 	 */
3159 
3160 	while ((entry != &map->header) && (entry->start < end)) {
3161 		UVM_MAP_CLIP_END(map, entry, end);
3162 
3163 		switch (new_advice) {
3164 		case MADV_NORMAL:
3165 		case MADV_RANDOM:
3166 		case MADV_SEQUENTIAL:
3167 			/* nothing special here */
3168 			break;
3169 
3170 		default:
3171 			vm_map_unlock(map);
3172 			UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
3173 			return EINVAL;
3174 		}
3175 		entry->advice = new_advice;
3176 		entry = entry->next;
3177 	}
3178 
3179 	vm_map_unlock(map);
3180 	UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3181 	return 0;
3182 }
3183 
3184 /*
3185  * uvm_map_willneed: apply MADV_WILLNEED
3186  */
3187 
3188 int
3189 uvm_map_willneed(struct vm_map *map, vaddr_t start, vaddr_t end)
3190 {
3191 	struct vm_map_entry *entry;
3192 	UVMHIST_FUNC("uvm_map_willneed"); UVMHIST_CALLED(maphist);
3193 	UVMHIST_LOG(maphist,"(map=%p,start=%#lx,end=%#lx)",
3194 	    map, start, end, 0);
3195 
3196 	vm_map_lock_read(map);
3197 	VM_MAP_RANGE_CHECK(map, start, end);
3198 	if (!uvm_map_lookup_entry(map, start, &entry)) {
3199 		entry = entry->next;
3200 	}
3201 	while (entry->start < end) {
3202 		struct vm_amap * const amap = entry->aref.ar_amap;
3203 		struct uvm_object * const uobj = entry->object.uvm_obj;
3204 
3205 		KASSERT(entry != &map->header);
3206 		KASSERT(start < entry->end);
3207 		/*
3208 		 * For now, we handle only the easy but commonly-requested case.
3209 		 * ie. start prefetching of backing uobj pages.
3210 		 *
3211 		 * XXX It might be useful to pmap_enter() the already-in-core
3212 		 * pages by inventing a "weak" mode for uvm_fault() which would
3213 		 * only do the PGO_LOCKED pgo_get().
3214 		 */
3215 		if (UVM_ET_ISOBJ(entry) && amap == NULL && uobj != NULL) {
3216 			off_t offset;
3217 			off_t size;
3218 
3219 			offset = entry->offset;
3220 			if (start < entry->start) {
3221 				offset += entry->start - start;
3222 			}
3223 			size = entry->offset + (entry->end - entry->start);
3224 			if (entry->end < end) {
3225 				size -= end - entry->end;
3226 			}
3227 			uvm_readahead(uobj, offset, size);
3228 		}
3229 		entry = entry->next;
3230 	}
3231 	vm_map_unlock_read(map);
3232 	UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
3233 	return 0;
3234 }
3235 
3236 /*
3237  * uvm_map_pageable: sets the pageability of a range in a map.
3238  *
3239  * => wires map entries.  should not be used for transient page locking.
3240  *	for that, use uvm_fault_wire()/uvm_fault_unwire() (see uvm_vslock()).
3241  * => regions specified as not pageable require lock-down (wired) memory
3242  *	and page tables.
3243  * => map must never be read-locked
3244  * => if islocked is true, map is already write-locked
3245  * => we always unlock the map, since we must downgrade to a read-lock
3246  *	to call uvm_fault_wire()
3247  * => XXXCDC: check this and try and clean it up.
3248  */
3249 
3250 int
3251 uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end,
3252     bool new_pageable, int lockflags)
3253 {
3254 	struct vm_map_entry *entry, *start_entry, *failed_entry;
3255 	int rv;
3256 #ifdef DIAGNOSTIC
3257 	u_int timestamp_save;
3258 #endif
3259 	UVMHIST_FUNC("uvm_map_pageable"); UVMHIST_CALLED(maphist);
3260 	UVMHIST_LOG(maphist,"(map=%p,start=%#lx,end=%#lx,new_pageable=%u)",
3261 		    map, start, end, new_pageable);
3262 	KASSERT(map->flags & VM_MAP_PAGEABLE);
3263 
3264 	if ((lockflags & UVM_LK_ENTER) == 0)
3265 		vm_map_lock(map);
3266 	VM_MAP_RANGE_CHECK(map, start, end);
3267 
3268 	/*
3269 	 * only one pageability change may take place at one time, since
3270 	 * uvm_fault_wire assumes it will be called only once for each
3271 	 * wiring/unwiring.  therefore, we have to make sure we're actually
3272 	 * changing the pageability for the entire region.  we do so before
3273 	 * making any changes.
3274 	 */
3275 
3276 	if (uvm_map_lookup_entry(map, start, &start_entry) == false) {
3277 		if ((lockflags & UVM_LK_EXIT) == 0)
3278 			vm_map_unlock(map);
3279 
3280 		UVMHIST_LOG(maphist,"<- done (fault)",0,0,0,0);
3281 		return EFAULT;
3282 	}
3283 	entry = start_entry;
3284 
3285 	/*
3286 	 * handle wiring and unwiring separately.
3287 	 */
3288 
3289 	if (new_pageable) {		/* unwire */
3290 		UVM_MAP_CLIP_START(map, entry, start);
3291 
3292 		/*
3293 		 * unwiring.  first ensure that the range to be unwired is
3294 		 * really wired down and that there are no holes.
3295 		 */
3296 
3297 		while ((entry != &map->header) && (entry->start < end)) {
3298 			if (entry->wired_count == 0 ||
3299 			    (entry->end < end &&
3300 			     (entry->next == &map->header ||
3301 			      entry->next->start > entry->end))) {
3302 				if ((lockflags & UVM_LK_EXIT) == 0)
3303 					vm_map_unlock(map);
3304 				UVMHIST_LOG(maphist, "<- done (INVAL)",0,0,0,0);
3305 				return EINVAL;
3306 			}
3307 			entry = entry->next;
3308 		}
3309 
3310 		/*
3311 		 * POSIX 1003.1b - a single munlock call unlocks a region,
3312 		 * regardless of the number of mlock calls made on that
3313 		 * region.
3314 		 */
3315 
3316 		entry = start_entry;
3317 		while ((entry != &map->header) && (entry->start < end)) {
3318 			UVM_MAP_CLIP_END(map, entry, end);
3319 			if (VM_MAPENT_ISWIRED(entry))
3320 				uvm_map_entry_unwire(map, entry);
3321 			entry = entry->next;
3322 		}
3323 		if ((lockflags & UVM_LK_EXIT) == 0)
3324 			vm_map_unlock(map);
3325 		UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
3326 		return 0;
3327 	}
3328 
3329 	/*
3330 	 * wire case: in two passes [XXXCDC: ugly block of code here]
3331 	 *
3332 	 * 1: holding the write lock, we create any anonymous maps that need
3333 	 *    to be created.  then we clip each map entry to the region to
3334 	 *    be wired and increment its wiring count.
3335 	 *
3336 	 * 2: we downgrade to a read lock, and call uvm_fault_wire to fault
3337 	 *    in the pages for any newly wired area (wired_count == 1).
3338 	 *
3339 	 *    downgrading to a read lock for uvm_fault_wire avoids a possible
3340 	 *    deadlock with another thread that may have faulted on one of
3341 	 *    the pages to be wired (it would mark the page busy, blocking
3342 	 *    us, then in turn block on the map lock that we hold).  because
3343 	 *    of problems in the recursive lock package, we cannot upgrade
3344 	 *    to a write lock in vm_map_lookup.  thus, any actions that
3345 	 *    require the write lock must be done beforehand.  because we
3346 	 *    keep the read lock on the map, the copy-on-write status of the
3347 	 *    entries we modify here cannot change.
3348 	 */
3349 
3350 	while ((entry != &map->header) && (entry->start < end)) {
3351 		if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3352 
3353 			/*
3354 			 * perform actions of vm_map_lookup that need the
3355 			 * write lock on the map: create an anonymous map
3356 			 * for a copy-on-write region, or an anonymous map
3357 			 * for a zero-fill region.  (XXXCDC: submap case
3358 			 * ok?)
3359 			 */
3360 
3361 			if (!UVM_ET_ISSUBMAP(entry)) {  /* not submap */
3362 				if (UVM_ET_ISNEEDSCOPY(entry) &&
3363 				    ((entry->max_protection & VM_PROT_WRITE) ||
3364 				     (entry->object.uvm_obj == NULL))) {
3365 					amap_copy(map, entry, 0, start, end);
3366 					/* XXXCDC: wait OK? */
3367 				}
3368 			}
3369 		}
3370 		UVM_MAP_CLIP_START(map, entry, start);
3371 		UVM_MAP_CLIP_END(map, entry, end);
3372 		entry->wired_count++;
3373 
3374 		/*
3375 		 * Check for holes
3376 		 */
3377 
3378 		if (entry->protection == VM_PROT_NONE ||
3379 		    (entry->end < end &&
3380 		     (entry->next == &map->header ||
3381 		      entry->next->start > entry->end))) {
3382 
3383 			/*
3384 			 * found one.  amap creation actions do not need to
3385 			 * be undone, but the wired counts need to be restored.
3386 			 */
3387 
3388 			while (entry != &map->header && entry->end > start) {
3389 				entry->wired_count--;
3390 				entry = entry->prev;
3391 			}
3392 			if ((lockflags & UVM_LK_EXIT) == 0)
3393 				vm_map_unlock(map);
3394 			UVMHIST_LOG(maphist,"<- done (INVALID WIRE)",0,0,0,0);
3395 			return EINVAL;
3396 		}
3397 		entry = entry->next;
3398 	}
3399 
3400 	/*
3401 	 * Pass 2.
3402 	 */
3403 
3404 #ifdef DIAGNOSTIC
3405 	timestamp_save = map->timestamp;
3406 #endif
3407 	vm_map_busy(map);
3408 	vm_map_unlock(map);
3409 
3410 	rv = 0;
3411 	entry = start_entry;
3412 	while (entry != &map->header && entry->start < end) {
3413 		if (entry->wired_count == 1) {
3414 			rv = uvm_fault_wire(map, entry->start, entry->end,
3415 			    entry->max_protection, 1);
3416 			if (rv) {
3417 
3418 				/*
3419 				 * wiring failed.  break out of the loop.
3420 				 * we'll clean up the map below, once we
3421 				 * have a write lock again.
3422 				 */
3423 
3424 				break;
3425 			}
3426 		}
3427 		entry = entry->next;
3428 	}
3429 
3430 	if (rv) {	/* failed? */
3431 
3432 		/*
3433 		 * Get back to an exclusive (write) lock.
3434 		 */
3435 
3436 		vm_map_lock(map);
3437 		vm_map_unbusy(map);
3438 
3439 #ifdef DIAGNOSTIC
3440 		if (timestamp_save + 1 != map->timestamp)
3441 			panic("uvm_map_pageable: stale map");
3442 #endif
3443 
3444 		/*
3445 		 * first drop the wiring count on all the entries
3446 		 * which haven't actually been wired yet.
3447 		 */
3448 
3449 		failed_entry = entry;
3450 		while (entry != &map->header && entry->start < end) {
3451 			entry->wired_count--;
3452 			entry = entry->next;
3453 		}
3454 
3455 		/*
3456 		 * now, unwire all the entries that were successfully
3457 		 * wired above.
3458 		 */
3459 
3460 		entry = start_entry;
3461 		while (entry != failed_entry) {
3462 			entry->wired_count--;
3463 			if (VM_MAPENT_ISWIRED(entry) == 0)
3464 				uvm_map_entry_unwire(map, entry);
3465 			entry = entry->next;
3466 		}
3467 		if ((lockflags & UVM_LK_EXIT) == 0)
3468 			vm_map_unlock(map);
3469 		UVMHIST_LOG(maphist, "<- done (RV=%d)", rv,0,0,0);
3470 		return (rv);
3471 	}
3472 
3473 	if ((lockflags & UVM_LK_EXIT) == 0) {
3474 		vm_map_unbusy(map);
3475 	} else {
3476 
3477 		/*
3478 		 * Get back to an exclusive (write) lock.
3479 		 */
3480 
3481 		vm_map_lock(map);
3482 		vm_map_unbusy(map);
3483 	}
3484 
3485 	UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
3486 	return 0;
3487 }
3488 
3489 /*
3490  * uvm_map_pageable_all: special case of uvm_map_pageable - affects
3491  * all mapped regions.
3492  *
3493  * => map must not be locked.
3494  * => if no flags are specified, all regions are unwired.
3495  * => XXXJRT: has some of the same problems as uvm_map_pageable() above.
3496  */
3497 
3498 int
3499 uvm_map_pageable_all(struct vm_map *map, int flags, vsize_t limit)
3500 {
3501 	struct vm_map_entry *entry, *failed_entry;
3502 	vsize_t size;
3503 	int rv;
3504 #ifdef DIAGNOSTIC
3505 	u_int timestamp_save;
3506 #endif
3507 	UVMHIST_FUNC("uvm_map_pageable_all"); UVMHIST_CALLED(maphist);
3508 	UVMHIST_LOG(maphist,"(map=%p,flags=%#x)", map, flags, 0, 0);
3509 
3510 	KASSERT(map->flags & VM_MAP_PAGEABLE);
3511 
3512 	vm_map_lock(map);
3513 
3514 	/*
3515 	 * handle wiring and unwiring separately.
3516 	 */
3517 
3518 	if (flags == 0) {			/* unwire */
3519 
3520 		/*
3521 		 * POSIX 1003.1b -- munlockall unlocks all regions,
3522 		 * regardless of how many times mlockall has been called.
3523 		 */
3524 
3525 		for (entry = map->header.next; entry != &map->header;
3526 		     entry = entry->next) {
3527 			if (VM_MAPENT_ISWIRED(entry))
3528 				uvm_map_entry_unwire(map, entry);
3529 		}
3530 		map->flags &= ~VM_MAP_WIREFUTURE;
3531 		vm_map_unlock(map);
3532 		UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
3533 		return 0;
3534 	}
3535 
3536 	if (flags & MCL_FUTURE) {
3537 
3538 		/*
3539 		 * must wire all future mappings; remember this.
3540 		 */
3541 
3542 		map->flags |= VM_MAP_WIREFUTURE;
3543 	}
3544 
3545 	if ((flags & MCL_CURRENT) == 0) {
3546 
3547 		/*
3548 		 * no more work to do!
3549 		 */
3550 
3551 		UVMHIST_LOG(maphist,"<- done (OK no wire)",0,0,0,0);
3552 		vm_map_unlock(map);
3553 		return 0;
3554 	}
3555 
3556 	/*
3557 	 * wire case: in three passes [XXXCDC: ugly block of code here]
3558 	 *
3559 	 * 1: holding the write lock, count all pages mapped by non-wired
3560 	 *    entries.  if this would cause us to go over our limit, we fail.
3561 	 *
3562 	 * 2: still holding the write lock, we create any anonymous maps that
3563 	 *    need to be created.  then we increment its wiring count.
3564 	 *
3565 	 * 3: we downgrade to a read lock, and call uvm_fault_wire to fault
3566 	 *    in the pages for any newly wired area (wired_count == 1).
3567 	 *
3568 	 *    downgrading to a read lock for uvm_fault_wire avoids a possible
3569 	 *    deadlock with another thread that may have faulted on one of
3570 	 *    the pages to be wired (it would mark the page busy, blocking
3571 	 *    us, then in turn block on the map lock that we hold).  because
3572 	 *    of problems in the recursive lock package, we cannot upgrade
3573 	 *    to a write lock in vm_map_lookup.  thus, any actions that
3574 	 *    require the write lock must be done beforehand.  because we
3575 	 *    keep the read lock on the map, the copy-on-write status of the
3576 	 *    entries we modify here cannot change.
3577 	 */
3578 
3579 	for (size = 0, entry = map->header.next; entry != &map->header;
3580 	     entry = entry->next) {
3581 		if (entry->protection != VM_PROT_NONE &&
3582 		    VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3583 			size += entry->end - entry->start;
3584 		}
3585 	}
3586 
3587 	if (atop(size) + uvmexp.wired > uvmexp.wiredmax) {
3588 		vm_map_unlock(map);
3589 		return ENOMEM;
3590 	}
3591 
3592 	if (limit != 0 &&
3593 	    (size + ptoa(pmap_wired_count(vm_map_pmap(map))) > limit)) {
3594 		vm_map_unlock(map);
3595 		return ENOMEM;
3596 	}
3597 
3598 	/*
3599 	 * Pass 2.
3600 	 */
3601 
3602 	for (entry = map->header.next; entry != &map->header;
3603 	     entry = entry->next) {
3604 		if (entry->protection == VM_PROT_NONE)
3605 			continue;
3606 		if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
3607 
3608 			/*
3609 			 * perform actions of vm_map_lookup that need the
3610 			 * write lock on the map: create an anonymous map
3611 			 * for a copy-on-write region, or an anonymous map
3612 			 * for a zero-fill region.  (XXXCDC: submap case
3613 			 * ok?)
3614 			 */
3615 
3616 			if (!UVM_ET_ISSUBMAP(entry)) {	/* not submap */
3617 				if (UVM_ET_ISNEEDSCOPY(entry) &&
3618 				    ((entry->max_protection & VM_PROT_WRITE) ||
3619 				     (entry->object.uvm_obj == NULL))) {
3620 					amap_copy(map, entry, 0, entry->start,
3621 					    entry->end);
3622 					/* XXXCDC: wait OK? */
3623 				}
3624 			}
3625 		}
3626 		entry->wired_count++;
3627 	}
3628 
3629 	/*
3630 	 * Pass 3.
3631 	 */
3632 
3633 #ifdef DIAGNOSTIC
3634 	timestamp_save = map->timestamp;
3635 #endif
3636 	vm_map_busy(map);
3637 	vm_map_unlock(map);
3638 
3639 	rv = 0;
3640 	for (entry = map->header.next; entry != &map->header;
3641 	     entry = entry->next) {
3642 		if (entry->wired_count == 1) {
3643 			rv = uvm_fault_wire(map, entry->start, entry->end,
3644 			    entry->max_protection, 1);
3645 			if (rv) {
3646 
3647 				/*
3648 				 * wiring failed.  break out of the loop.
3649 				 * we'll clean up the map below, once we
3650 				 * have a write lock again.
3651 				 */
3652 
3653 				break;
3654 			}
3655 		}
3656 	}
3657 
3658 	if (rv) {
3659 
3660 		/*
3661 		 * Get back an exclusive (write) lock.
3662 		 */
3663 
3664 		vm_map_lock(map);
3665 		vm_map_unbusy(map);
3666 
3667 #ifdef DIAGNOSTIC
3668 		if (timestamp_save + 1 != map->timestamp)
3669 			panic("uvm_map_pageable_all: stale map");
3670 #endif
3671 
3672 		/*
3673 		 * first drop the wiring count on all the entries
3674 		 * which haven't actually been wired yet.
3675 		 *
3676 		 * Skip VM_PROT_NONE entries like we did above.
3677 		 */
3678 
3679 		failed_entry = entry;
3680 		for (/* nothing */; entry != &map->header;
3681 		     entry = entry->next) {
3682 			if (entry->protection == VM_PROT_NONE)
3683 				continue;
3684 			entry->wired_count--;
3685 		}
3686 
3687 		/*
3688 		 * now, unwire all the entries that were successfully
3689 		 * wired above.
3690 		 *
3691 		 * Skip VM_PROT_NONE entries like we did above.
3692 		 */
3693 
3694 		for (entry = map->header.next; entry != failed_entry;
3695 		     entry = entry->next) {
3696 			if (entry->protection == VM_PROT_NONE)
3697 				continue;
3698 			entry->wired_count--;
3699 			if (VM_MAPENT_ISWIRED(entry))
3700 				uvm_map_entry_unwire(map, entry);
3701 		}
3702 		vm_map_unlock(map);
3703 		UVMHIST_LOG(maphist,"<- done (RV=%d)", rv,0,0,0);
3704 		return (rv);
3705 	}
3706 
3707 	vm_map_unbusy(map);
3708 
3709 	UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
3710 	return 0;
3711 }
3712 
3713 /*
3714  * uvm_map_clean: clean out a map range
3715  *
3716  * => valid flags:
3717  *   if (flags & PGO_CLEANIT): dirty pages are cleaned first
3718  *   if (flags & PGO_SYNCIO): dirty pages are written synchronously
3719  *   if (flags & PGO_DEACTIVATE): any cached pages are deactivated after clean
3720  *   if (flags & PGO_FREE): any cached pages are freed after clean
3721  * => returns an error if any part of the specified range isn't mapped
3722  * => never a need to flush amap layer since the anonymous memory has
3723  *	no permanent home, but may deactivate pages there
3724  * => called from sys_msync() and sys_madvise()
3725  * => caller must not write-lock map (read OK).
3726  * => we may sleep while cleaning if SYNCIO [with map read-locked]
3727  */
3728 
3729 int
3730 uvm_map_clean(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
3731 {
3732 	struct vm_map_entry *current, *entry;
3733 	struct uvm_object *uobj;
3734 	struct vm_amap *amap;
3735 	struct vm_anon *anon, *anon_tofree;
3736 	struct vm_page *pg;
3737 	vaddr_t offset;
3738 	vsize_t size;
3739 	voff_t uoff;
3740 	int error, refs;
3741 	UVMHIST_FUNC("uvm_map_clean"); UVMHIST_CALLED(maphist);
3742 
3743 	UVMHIST_LOG(maphist,"(map=%p,start=%#lx,end=%#lx,flags=%#x)",
3744 		    map, start, end, flags);
3745 	KASSERT((flags & (PGO_FREE|PGO_DEACTIVATE)) !=
3746 		(PGO_FREE|PGO_DEACTIVATE));
3747 
3748 	vm_map_lock_read(map);
3749 	VM_MAP_RANGE_CHECK(map, start, end);
3750 	if (uvm_map_lookup_entry(map, start, &entry) == false) {
3751 		vm_map_unlock_read(map);
3752 		return EFAULT;
3753 	}
3754 
3755 	/*
3756 	 * Make a first pass to check for holes and wiring problems.
3757 	 */
3758 
3759 	for (current = entry; current->start < end; current = current->next) {
3760 		if (UVM_ET_ISSUBMAP(current)) {
3761 			vm_map_unlock_read(map);
3762 			return EINVAL;
3763 		}
3764 		if ((flags & PGO_FREE) != 0 && VM_MAPENT_ISWIRED(entry)) {
3765 			vm_map_unlock_read(map);
3766 			return EBUSY;
3767 		}
3768 		if (end <= current->end) {
3769 			break;
3770 		}
3771 		if (current->end != current->next->start) {
3772 			vm_map_unlock_read(map);
3773 			return EFAULT;
3774 		}
3775 	}
3776 
3777 	error = 0;
3778 	for (current = entry; start < end; current = current->next) {
3779 		amap = current->aref.ar_amap;	/* upper layer */
3780 		uobj = current->object.uvm_obj;	/* lower layer */
3781 		KASSERT(start >= current->start);
3782 
3783 		/*
3784 		 * No amap cleaning necessary if:
3785 		 *
3786 		 *	(1) There's no amap.
3787 		 *
3788 		 *	(2) We're not deactivating or freeing pages.
3789 		 */
3790 
3791 		if (amap == NULL || (flags & (PGO_DEACTIVATE|PGO_FREE)) == 0)
3792 			goto flush_object;
3793 
3794 		offset = start - current->start;
3795 		size = MIN(end, current->end) - start;
3796 		anon_tofree = NULL;
3797 
3798 		amap_lock(amap);
3799 		for ( ; size != 0; size -= PAGE_SIZE, offset += PAGE_SIZE) {
3800 			anon = amap_lookup(&current->aref, offset);
3801 			if (anon == NULL)
3802 				continue;
3803 
3804 			KASSERT(anon->an_lock == amap->am_lock);
3805 			pg = anon->an_page;
3806 			if (pg == NULL) {
3807 				continue;
3808 			}
3809 			if (pg->flags & PG_BUSY) {
3810 				continue;
3811 			}
3812 
3813 			switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
3814 
3815 			/*
3816 			 * In these first 3 cases, we just deactivate the page.
3817 			 */
3818 
3819 			case PGO_CLEANIT|PGO_FREE:
3820 			case PGO_CLEANIT|PGO_DEACTIVATE:
3821 			case PGO_DEACTIVATE:
3822  deactivate_it:
3823 				/*
3824 				 * skip the page if it's loaned or wired,
3825 				 * since it shouldn't be on a paging queue
3826 				 * at all in these cases.
3827 				 */
3828 
3829 				mutex_enter(&uvm_pageqlock);
3830 				if (pg->loan_count != 0 ||
3831 				    pg->wire_count != 0) {
3832 					mutex_exit(&uvm_pageqlock);
3833 					continue;
3834 				}
3835 				KASSERT(pg->uanon == anon);
3836 				uvm_pagedeactivate(pg);
3837 				mutex_exit(&uvm_pageqlock);
3838 				continue;
3839 
3840 			case PGO_FREE:
3841 
3842 				/*
3843 				 * If there are multiple references to
3844 				 * the amap, just deactivate the page.
3845 				 */
3846 
3847 				if (amap_refs(amap) > 1)
3848 					goto deactivate_it;
3849 
3850 				/* skip the page if it's wired */
3851 				if (pg->wire_count != 0) {
3852 					continue;
3853 				}
3854 				amap_unadd(&current->aref, offset);
3855 				refs = --anon->an_ref;
3856 				if (refs == 0) {
3857 					anon->an_link = anon_tofree;
3858 					anon_tofree = anon;
3859 				}
3860 				continue;
3861 			}
3862 		}
3863 		uvm_anon_freelst(amap, anon_tofree);
3864 
3865  flush_object:
3866 		/*
3867 		 * flush pages if we've got a valid backing object.
3868 		 * note that we must always clean object pages before
3869 		 * freeing them since otherwise we could reveal stale
3870 		 * data from files.
3871 		 */
3872 
3873 		uoff = current->offset + (start - current->start);
3874 		size = MIN(end, current->end) - start;
3875 		if (uobj != NULL) {
3876 			mutex_enter(uobj->vmobjlock);
3877 			if (uobj->pgops->pgo_put != NULL)
3878 				error = (uobj->pgops->pgo_put)(uobj, uoff,
3879 				    uoff + size, flags | PGO_CLEANIT);
3880 			else
3881 				error = 0;
3882 		}
3883 		start += size;
3884 	}
3885 	vm_map_unlock_read(map);
3886 	return (error);
3887 }
3888 
3889 
3890 /*
3891  * uvm_map_checkprot: check protection in map
3892  *
3893  * => must allow specified protection in a fully allocated region.
3894  * => map must be read or write locked by caller.
3895  */
3896 
3897 bool
3898 uvm_map_checkprot(struct vm_map *map, vaddr_t start, vaddr_t end,
3899     vm_prot_t protection)
3900 {
3901 	struct vm_map_entry *entry;
3902 	struct vm_map_entry *tmp_entry;
3903 
3904 	if (!uvm_map_lookup_entry(map, start, &tmp_entry)) {
3905 		return (false);
3906 	}
3907 	entry = tmp_entry;
3908 	while (start < end) {
3909 		if (entry == &map->header) {
3910 			return (false);
3911 		}
3912 
3913 		/*
3914 		 * no holes allowed
3915 		 */
3916 
3917 		if (start < entry->start) {
3918 			return (false);
3919 		}
3920 
3921 		/*
3922 		 * check protection associated with entry
3923 		 */
3924 
3925 		if ((entry->protection & protection) != protection) {
3926 			return (false);
3927 		}
3928 		start = entry->end;
3929 		entry = entry->next;
3930 	}
3931 	return (true);
3932 }
3933 
3934 /*
3935  * uvmspace_alloc: allocate a vmspace structure.
3936  *
3937  * - structure includes vm_map and pmap
3938  * - XXX: no locking on this structure
3939  * - refcnt set to 1, rest must be init'd by caller
3940  */
3941 struct vmspace *
3942 uvmspace_alloc(vaddr_t vmin, vaddr_t vmax, bool topdown)
3943 {
3944 	struct vmspace *vm;
3945 	UVMHIST_FUNC("uvmspace_alloc"); UVMHIST_CALLED(maphist);
3946 
3947 	vm = pool_cache_get(&uvm_vmspace_cache, PR_WAITOK);
3948 	uvmspace_init(vm, NULL, vmin, vmax, topdown);
3949 	UVMHIST_LOG(maphist,"<- done (vm=%p)", vm,0,0,0);
3950 	return (vm);
3951 }
3952 
3953 /*
3954  * uvmspace_init: initialize a vmspace structure.
3955  *
3956  * - XXX: no locking on this structure
3957  * - refcnt set to 1, rest must be init'd by caller
3958  */
3959 void
3960 uvmspace_init(struct vmspace *vm, struct pmap *pmap, vaddr_t vmin,
3961     vaddr_t vmax, bool topdown)
3962 {
3963 	UVMHIST_FUNC("uvmspace_init"); UVMHIST_CALLED(maphist);
3964 
3965 	UVMHIST_LOG(maphist, "(vm=%p, pmap=%p, vmin=%#lx, vmax=%#lx",
3966 	    vm, pmap, vmin, vmax);
3967 	UVMHIST_LOG(maphist, "   topdown=%u)", topdown, 0, 0, 0);
3968 
3969 	memset(vm, 0, sizeof(*vm));
3970 	uvm_map_setup(&vm->vm_map, vmin, vmax, VM_MAP_PAGEABLE
3971 	    | (topdown ? VM_MAP_TOPDOWN : 0)
3972 	    );
3973 	if (pmap)
3974 		pmap_reference(pmap);
3975 	else
3976 		pmap = pmap_create();
3977 	vm->vm_map.pmap = pmap;
3978 	vm->vm_refcnt = 1;
3979 	UVMHIST_LOG(maphist,"<- done",0,0,0,0);
3980 }
3981 
3982 /*
3983  * uvmspace_share: share a vmspace between two processes
3984  *
3985  * - used for vfork, threads(?)
3986  */
3987 
3988 void
3989 uvmspace_share(struct proc *p1, struct proc *p2)
3990 {
3991 
3992 	uvmspace_addref(p1->p_vmspace);
3993 	p2->p_vmspace = p1->p_vmspace;
3994 }
3995 
3996 #if 0
3997 
3998 /*
3999  * uvmspace_unshare: ensure that process "p" has its own, unshared, vmspace
4000  *
4001  * - XXX: no locking on vmspace
4002  */
4003 
4004 void
4005 uvmspace_unshare(struct lwp *l)
4006 {
4007 	struct proc *p = l->l_proc;
4008 	struct vmspace *nvm, *ovm = p->p_vmspace;
4009 
4010 	if (ovm->vm_refcnt == 1)
4011 		/* nothing to do: vmspace isn't shared in the first place */
4012 		return;
4013 
4014 	/* make a new vmspace, still holding old one */
4015 	nvm = uvmspace_fork(ovm);
4016 
4017 	kpreempt_disable();
4018 	pmap_deactivate(l);		/* unbind old vmspace */
4019 	p->p_vmspace = nvm;
4020 	pmap_activate(l);		/* switch to new vmspace */
4021 	kpreempt_enable();
4022 
4023 	uvmspace_free(ovm);		/* drop reference to old vmspace */
4024 }
4025 
4026 #endif
4027 
4028 
4029 /*
4030  * uvmspace_spawn: a new process has been spawned and needs a vmspace
4031  */
4032 
4033 void
4034 uvmspace_spawn(struct lwp *l, vaddr_t start, vaddr_t end, bool topdown)
4035 {
4036 	struct proc *p = l->l_proc;
4037 	struct vmspace *nvm;
4038 
4039 #ifdef __HAVE_CPU_VMSPACE_EXEC
4040 	cpu_vmspace_exec(l, start, end);
4041 #endif
4042 
4043 	nvm = uvmspace_alloc(start, end, topdown);
4044 	kpreempt_disable();
4045 	p->p_vmspace = nvm;
4046 	pmap_activate(l);
4047 	kpreempt_enable();
4048 }
4049 
4050 /*
4051  * uvmspace_exec: the process wants to exec a new program
4052  */
4053 
4054 void
4055 uvmspace_exec(struct lwp *l, vaddr_t start, vaddr_t end, bool topdown)
4056 {
4057 	struct proc *p = l->l_proc;
4058 	struct vmspace *nvm, *ovm = p->p_vmspace;
4059 	struct vm_map *map;
4060 
4061 	KASSERT(ovm != NULL);
4062 #ifdef __HAVE_CPU_VMSPACE_EXEC
4063 	cpu_vmspace_exec(l, start, end);
4064 #endif
4065 
4066 	map = &ovm->vm_map;
4067 	/*
4068 	 * see if more than one process is using this vmspace...
4069 	 */
4070 
4071 	if (ovm->vm_refcnt == 1
4072 	    && topdown == ((ovm->vm_map.flags & VM_MAP_TOPDOWN) != 0)) {
4073 
4074 		/*
4075 		 * if p is the only process using its vmspace then we can safely
4076 		 * recycle that vmspace for the program that is being exec'd.
4077 		 * But only if TOPDOWN matches the requested value for the new
4078 		 * vm space!
4079 		 */
4080 
4081 		/*
4082 		 * SYSV SHM semantics require us to kill all segments on an exec
4083 		 */
4084 		if (uvm_shmexit && ovm->vm_shm)
4085 			(*uvm_shmexit)(ovm);
4086 
4087 		/*
4088 		 * POSIX 1003.1b -- "lock future mappings" is revoked
4089 		 * when a process execs another program image.
4090 		 */
4091 
4092 		map->flags &= ~VM_MAP_WIREFUTURE;
4093 
4094 		/*
4095 		 * now unmap the old program
4096 		 */
4097 
4098 		pmap_remove_all(map->pmap);
4099 		uvm_unmap(map, vm_map_min(map), vm_map_max(map));
4100 		KASSERT(map->header.prev == &map->header);
4101 		KASSERT(map->nentries == 0);
4102 
4103 		/*
4104 		 * resize the map
4105 		 */
4106 
4107 		vm_map_setmin(map, start);
4108 		vm_map_setmax(map, end);
4109 	} else {
4110 
4111 		/*
4112 		 * p's vmspace is being shared, so we can't reuse it for p since
4113 		 * it is still being used for others.   allocate a new vmspace
4114 		 * for p
4115 		 */
4116 
4117 		nvm = uvmspace_alloc(start, end, topdown);
4118 
4119 		/*
4120 		 * install new vmspace and drop our ref to the old one.
4121 		 */
4122 
4123 		kpreempt_disable();
4124 		pmap_deactivate(l);
4125 		p->p_vmspace = nvm;
4126 		pmap_activate(l);
4127 		kpreempt_enable();
4128 
4129 		uvmspace_free(ovm);
4130 	}
4131 }
4132 
4133 /*
4134  * uvmspace_addref: add a referece to a vmspace.
4135  */
4136 
4137 void
4138 uvmspace_addref(struct vmspace *vm)
4139 {
4140 	struct vm_map *map = &vm->vm_map;
4141 
4142 	KASSERT((map->flags & VM_MAP_DYING) == 0);
4143 
4144 	mutex_enter(&map->misc_lock);
4145 	KASSERT(vm->vm_refcnt > 0);
4146 	vm->vm_refcnt++;
4147 	mutex_exit(&map->misc_lock);
4148 }
4149 
4150 /*
4151  * uvmspace_free: free a vmspace data structure
4152  */
4153 
4154 void
4155 uvmspace_free(struct vmspace *vm)
4156 {
4157 	struct vm_map_entry *dead_entries;
4158 	struct vm_map *map = &vm->vm_map;
4159 	int n;
4160 
4161 	UVMHIST_FUNC("uvmspace_free"); UVMHIST_CALLED(maphist);
4162 
4163 	UVMHIST_LOG(maphist,"(vm=%p) ref=%d", vm, vm->vm_refcnt,0,0);
4164 	mutex_enter(&map->misc_lock);
4165 	n = --vm->vm_refcnt;
4166 	mutex_exit(&map->misc_lock);
4167 	if (n > 0)
4168 		return;
4169 
4170 	/*
4171 	 * at this point, there should be no other references to the map.
4172 	 * delete all of the mappings, then destroy the pmap.
4173 	 */
4174 
4175 	map->flags |= VM_MAP_DYING;
4176 	pmap_remove_all(map->pmap);
4177 
4178 	/* Get rid of any SYSV shared memory segments. */
4179 	if (uvm_shmexit && vm->vm_shm != NULL)
4180 		(*uvm_shmexit)(vm);
4181 
4182 	if (map->nentries) {
4183 		uvm_unmap_remove(map, vm_map_min(map), vm_map_max(map),
4184 		    &dead_entries, 0);
4185 		if (dead_entries != NULL)
4186 			uvm_unmap_detach(dead_entries, 0);
4187 	}
4188 	KASSERT(map->nentries == 0);
4189 	KASSERT(map->size == 0);
4190 
4191 	mutex_destroy(&map->misc_lock);
4192 	rw_destroy(&map->lock);
4193 	cv_destroy(&map->cv);
4194 	pmap_destroy(map->pmap);
4195 	pool_cache_put(&uvm_vmspace_cache, vm);
4196 }
4197 
4198 static struct vm_map_entry *
4199 uvm_mapent_clone(struct vm_map *new_map, struct vm_map_entry *old_entry,
4200     int flags)
4201 {
4202 	struct vm_map_entry *new_entry;
4203 
4204 	new_entry = uvm_mapent_alloc(new_map, 0);
4205 	/* old_entry -> new_entry */
4206 	uvm_mapent_copy(old_entry, new_entry);
4207 
4208 	/* new pmap has nothing wired in it */
4209 	new_entry->wired_count = 0;
4210 
4211 	/*
4212 	 * gain reference to object backing the map (can't
4213 	 * be a submap, already checked this case).
4214 	 */
4215 
4216 	if (new_entry->aref.ar_amap)
4217 		uvm_map_reference_amap(new_entry, flags);
4218 
4219 	if (new_entry->object.uvm_obj &&
4220 	    new_entry->object.uvm_obj->pgops->pgo_reference)
4221 		new_entry->object.uvm_obj->pgops->pgo_reference(
4222 			new_entry->object.uvm_obj);
4223 
4224 	/* insert entry at end of new_map's entry list */
4225 	uvm_map_entry_link(new_map, new_map->header.prev,
4226 	    new_entry);
4227 
4228 	return new_entry;
4229 }
4230 
4231 /*
4232  * share the mapping: this means we want the old and
4233  * new entries to share amaps and backing objects.
4234  */
4235 static void
4236 uvm_mapent_forkshared(struct vm_map *new_map, struct vm_map *old_map,
4237     struct vm_map_entry *old_entry)
4238 {
4239 	/*
4240 	 * if the old_entry needs a new amap (due to prev fork)
4241 	 * then we need to allocate it now so that we have
4242 	 * something we own to share with the new_entry.   [in
4243 	 * other words, we need to clear needs_copy]
4244 	 */
4245 
4246 	if (UVM_ET_ISNEEDSCOPY(old_entry)) {
4247 		/* get our own amap, clears needs_copy */
4248 		amap_copy(old_map, old_entry, AMAP_COPY_NOCHUNK,
4249 		    0, 0);
4250 		/* XXXCDC: WAITOK??? */
4251 	}
4252 
4253 	uvm_mapent_clone(new_map, old_entry, AMAP_SHARED);
4254 }
4255 
4256 
4257 static void
4258 uvm_mapent_forkcopy(struct vm_map *new_map, struct vm_map *old_map,
4259     struct vm_map_entry *old_entry)
4260 {
4261 	struct vm_map_entry *new_entry;
4262 
4263 	/*
4264 	 * copy-on-write the mapping (using mmap's
4265 	 * MAP_PRIVATE semantics)
4266 	 *
4267 	 * allocate new_entry, adjust reference counts.
4268 	 * (note that new references are read-only).
4269 	 */
4270 
4271 	new_entry = uvm_mapent_clone(new_map, old_entry, 0);
4272 
4273 	new_entry->etype |=
4274 	    (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
4275 
4276 	/*
4277 	 * the new entry will need an amap.  it will either
4278 	 * need to be copied from the old entry or created
4279 	 * from scratch (if the old entry does not have an
4280 	 * amap).  can we defer this process until later
4281 	 * (by setting "needs_copy") or do we need to copy
4282 	 * the amap now?
4283 	 *
4284 	 * we must copy the amap now if any of the following
4285 	 * conditions hold:
4286 	 * 1. the old entry has an amap and that amap is
4287 	 *    being shared.  this means that the old (parent)
4288 	 *    process is sharing the amap with another
4289 	 *    process.  if we do not clear needs_copy here
4290 	 *    we will end up in a situation where both the
4291 	 *    parent and child process are refering to the
4292 	 *    same amap with "needs_copy" set.  if the
4293 	 *    parent write-faults, the fault routine will
4294 	 *    clear "needs_copy" in the parent by allocating
4295 	 *    a new amap.   this is wrong because the
4296 	 *    parent is supposed to be sharing the old amap
4297 	 *    and the new amap will break that.
4298 	 *
4299 	 * 2. if the old entry has an amap and a non-zero
4300 	 *    wire count then we are going to have to call
4301 	 *    amap_cow_now to avoid page faults in the
4302 	 *    parent process.   since amap_cow_now requires
4303 	 *    "needs_copy" to be clear we might as well
4304 	 *    clear it here as well.
4305 	 *
4306 	 */
4307 
4308 	if (old_entry->aref.ar_amap != NULL) {
4309 		if ((amap_flags(old_entry->aref.ar_amap) & AMAP_SHARED) != 0 ||
4310 		    VM_MAPENT_ISWIRED(old_entry)) {
4311 
4312 			amap_copy(new_map, new_entry,
4313 			    AMAP_COPY_NOCHUNK, 0, 0);
4314 			/* XXXCDC: M_WAITOK ... ok? */
4315 		}
4316 	}
4317 
4318 	/*
4319 	 * if the parent's entry is wired down, then the
4320 	 * parent process does not want page faults on
4321 	 * access to that memory.  this means that we
4322 	 * cannot do copy-on-write because we can't write
4323 	 * protect the old entry.   in this case we
4324 	 * resolve all copy-on-write faults now, using
4325 	 * amap_cow_now.   note that we have already
4326 	 * allocated any needed amap (above).
4327 	 */
4328 
4329 	if (VM_MAPENT_ISWIRED(old_entry)) {
4330 
4331 		/*
4332 		 * resolve all copy-on-write faults now
4333 		 * (note that there is nothing to do if
4334 		 * the old mapping does not have an amap).
4335 		 */
4336 		if (old_entry->aref.ar_amap)
4337 			amap_cow_now(new_map, new_entry);
4338 
4339 	} else {
4340 		/*
4341 		 * setup mappings to trigger copy-on-write faults
4342 		 * we must write-protect the parent if it has
4343 		 * an amap and it is not already "needs_copy"...
4344 		 * if it is already "needs_copy" then the parent
4345 		 * has already been write-protected by a previous
4346 		 * fork operation.
4347 		 */
4348 		if (old_entry->aref.ar_amap &&
4349 		    !UVM_ET_ISNEEDSCOPY(old_entry)) {
4350 			if (old_entry->max_protection & VM_PROT_WRITE) {
4351 				pmap_protect(old_map->pmap,
4352 				    old_entry->start, old_entry->end,
4353 				    old_entry->protection & ~VM_PROT_WRITE);
4354 			}
4355 			old_entry->etype |= UVM_ET_NEEDSCOPY;
4356 		}
4357 	}
4358 }
4359 
4360 /*
4361  * zero the mapping: the new entry will be zero initialized
4362  */
4363 static void
4364 uvm_mapent_forkzero(struct vm_map *new_map, struct vm_map *old_map,
4365     struct vm_map_entry *old_entry)
4366 {
4367 	struct vm_map_entry *new_entry;
4368 
4369 	new_entry = uvm_mapent_clone(new_map, old_entry, 0);
4370 
4371 	new_entry->etype |=
4372 	    (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
4373 
4374 	if (new_entry->aref.ar_amap) {
4375 		uvm_map_unreference_amap(new_entry, 0);
4376 		new_entry->aref.ar_pageoff = 0;
4377 		new_entry->aref.ar_amap = NULL;
4378 	}
4379 
4380 	if (UVM_ET_ISOBJ(new_entry)) {
4381 		if (new_entry->object.uvm_obj->pgops->pgo_detach)
4382 			new_entry->object.uvm_obj->pgops->pgo_detach(
4383 			    new_entry->object.uvm_obj);
4384 		new_entry->object.uvm_obj = NULL;
4385 		new_entry->etype &= ~UVM_ET_OBJ;
4386 	}
4387 }
4388 
4389 /*
4390  *   F O R K   -   m a i n   e n t r y   p o i n t
4391  */
4392 /*
4393  * uvmspace_fork: fork a process' main map
4394  *
4395  * => create a new vmspace for child process from parent.
4396  * => parent's map must not be locked.
4397  */
4398 
4399 struct vmspace *
4400 uvmspace_fork(struct vmspace *vm1)
4401 {
4402 	struct vmspace *vm2;
4403 	struct vm_map *old_map = &vm1->vm_map;
4404 	struct vm_map *new_map;
4405 	struct vm_map_entry *old_entry;
4406 	UVMHIST_FUNC("uvmspace_fork"); UVMHIST_CALLED(maphist);
4407 
4408 	vm_map_lock(old_map);
4409 
4410 	vm2 = uvmspace_alloc(vm_map_min(old_map), vm_map_max(old_map),
4411 	    vm1->vm_map.flags & VM_MAP_TOPDOWN);
4412 	memcpy(&vm2->vm_startcopy, &vm1->vm_startcopy,
4413 	    (char *) (vm1 + 1) - (char *) &vm1->vm_startcopy);
4414 	new_map = &vm2->vm_map;		  /* XXX */
4415 
4416 	old_entry = old_map->header.next;
4417 	new_map->size = old_map->size;
4418 
4419 	/*
4420 	 * go entry-by-entry
4421 	 */
4422 
4423 	while (old_entry != &old_map->header) {
4424 
4425 		/*
4426 		 * first, some sanity checks on the old entry
4427 		 */
4428 
4429 		KASSERT(!UVM_ET_ISSUBMAP(old_entry));
4430 		KASSERT(UVM_ET_ISCOPYONWRITE(old_entry) ||
4431 			!UVM_ET_ISNEEDSCOPY(old_entry));
4432 
4433 		switch (old_entry->inheritance) {
4434 		case MAP_INHERIT_NONE:
4435 			/*
4436 			 * drop the mapping, modify size
4437 			 */
4438 			new_map->size -= old_entry->end - old_entry->start;
4439 			break;
4440 
4441 		case MAP_INHERIT_SHARE:
4442 			uvm_mapent_forkshared(new_map, old_map, old_entry);
4443 			break;
4444 
4445 		case MAP_INHERIT_COPY:
4446 			uvm_mapent_forkcopy(new_map, old_map, old_entry);
4447 			break;
4448 
4449 		case MAP_INHERIT_ZERO:
4450 			uvm_mapent_forkzero(new_map, old_map, old_entry);
4451 			break;
4452 		default:
4453 			KASSERT(0);
4454 			break;
4455 		}
4456 		old_entry = old_entry->next;
4457 	}
4458 
4459 	pmap_update(old_map->pmap);
4460 	vm_map_unlock(old_map);
4461 
4462 	if (uvm_shmfork && vm1->vm_shm)
4463 		(*uvm_shmfork)(vm1, vm2);
4464 
4465 #ifdef PMAP_FORK
4466 	pmap_fork(vm1->vm_map.pmap, vm2->vm_map.pmap);
4467 #endif
4468 
4469 	UVMHIST_LOG(maphist,"<- done",0,0,0,0);
4470 	return (vm2);
4471 }
4472 
4473 
4474 /*
4475  * uvm_mapent_trymerge: try to merge an entry with its neighbors.
4476  *
4477  * => called with map locked.
4478  * => return non zero if successfully merged.
4479  */
4480 
4481 int
4482 uvm_mapent_trymerge(struct vm_map *map, struct vm_map_entry *entry, int flags)
4483 {
4484 	struct uvm_object *uobj;
4485 	struct vm_map_entry *next;
4486 	struct vm_map_entry *prev;
4487 	vsize_t size;
4488 	int merged = 0;
4489 	bool copying;
4490 	int newetype;
4491 
4492 	if (entry->aref.ar_amap != NULL) {
4493 		return 0;
4494 	}
4495 	if ((entry->flags & UVM_MAP_NOMERGE) != 0) {
4496 		return 0;
4497 	}
4498 
4499 	uobj = entry->object.uvm_obj;
4500 	size = entry->end - entry->start;
4501 	copying = (flags & UVM_MERGE_COPYING) != 0;
4502 	newetype = copying ? (entry->etype & ~UVM_ET_NEEDSCOPY) : entry->etype;
4503 
4504 	next = entry->next;
4505 	if (next != &map->header &&
4506 	    next->start == entry->end &&
4507 	    ((copying && next->aref.ar_amap != NULL &&
4508 	    amap_refs(next->aref.ar_amap) == 1) ||
4509 	    (!copying && next->aref.ar_amap == NULL)) &&
4510 	    UVM_ET_ISCOMPATIBLE(next, newetype,
4511 	    uobj, entry->flags, entry->protection,
4512 	    entry->max_protection, entry->inheritance, entry->advice,
4513 	    entry->wired_count) &&
4514 	    (uobj == NULL || entry->offset + size == next->offset)) {
4515 		int error;
4516 
4517 		if (copying) {
4518 			error = amap_extend(next, size,
4519 			    AMAP_EXTEND_NOWAIT|AMAP_EXTEND_BACKWARDS);
4520 		} else {
4521 			error = 0;
4522 		}
4523 		if (error == 0) {
4524 			if (uobj) {
4525 				if (uobj->pgops->pgo_detach) {
4526 					uobj->pgops->pgo_detach(uobj);
4527 				}
4528 			}
4529 
4530 			entry->end = next->end;
4531 			clear_hints(map, next);
4532 			uvm_map_entry_unlink(map, next);
4533 			if (copying) {
4534 				entry->aref = next->aref;
4535 				entry->etype &= ~UVM_ET_NEEDSCOPY;
4536 			}
4537 			uvm_map_check(map, "trymerge forwardmerge");
4538 			uvm_mapent_free(next);
4539 			merged++;
4540 		}
4541 	}
4542 
4543 	prev = entry->prev;
4544 	if (prev != &map->header &&
4545 	    prev->end == entry->start &&
4546 	    ((copying && !merged && prev->aref.ar_amap != NULL &&
4547 	    amap_refs(prev->aref.ar_amap) == 1) ||
4548 	    (!copying && prev->aref.ar_amap == NULL)) &&
4549 	    UVM_ET_ISCOMPATIBLE(prev, newetype,
4550 	    uobj, entry->flags, entry->protection,
4551 	    entry->max_protection, entry->inheritance, entry->advice,
4552 	    entry->wired_count) &&
4553 	    (uobj == NULL ||
4554 	    prev->offset + prev->end - prev->start == entry->offset)) {
4555 		int error;
4556 
4557 		if (copying) {
4558 			error = amap_extend(prev, size,
4559 			    AMAP_EXTEND_NOWAIT|AMAP_EXTEND_FORWARDS);
4560 		} else {
4561 			error = 0;
4562 		}
4563 		if (error == 0) {
4564 			if (uobj) {
4565 				if (uobj->pgops->pgo_detach) {
4566 					uobj->pgops->pgo_detach(uobj);
4567 				}
4568 				entry->offset = prev->offset;
4569 			}
4570 
4571 			entry->start = prev->start;
4572 			clear_hints(map, prev);
4573 			uvm_map_entry_unlink(map, prev);
4574 			if (copying) {
4575 				entry->aref = prev->aref;
4576 				entry->etype &= ~UVM_ET_NEEDSCOPY;
4577 			}
4578 			uvm_map_check(map, "trymerge backmerge");
4579 			uvm_mapent_free(prev);
4580 			merged++;
4581 		}
4582 	}
4583 
4584 	return merged;
4585 }
4586 
4587 /*
4588  * uvm_map_setup: init map
4589  *
4590  * => map must not be in service yet.
4591  */
4592 
4593 void
4594 uvm_map_setup(struct vm_map *map, vaddr_t vmin, vaddr_t vmax, int flags)
4595 {
4596 
4597 	rb_tree_init(&map->rb_tree, &uvm_map_tree_ops);
4598 	map->header.next = map->header.prev = &map->header;
4599 	map->nentries = 0;
4600 	map->size = 0;
4601 	map->ref_count = 1;
4602 	vm_map_setmin(map, vmin);
4603 	vm_map_setmax(map, vmax);
4604 	map->flags = flags;
4605 	map->first_free = &map->header;
4606 	map->hint = &map->header;
4607 	map->timestamp = 0;
4608 	map->busy = NULL;
4609 
4610 	rw_init(&map->lock);
4611 	cv_init(&map->cv, "vm_map");
4612 	mutex_init(&map->misc_lock, MUTEX_DRIVER, IPL_NONE);
4613 }
4614 
4615 /*
4616  *   U N M A P   -   m a i n   e n t r y   p o i n t
4617  */
4618 
4619 /*
4620  * uvm_unmap1: remove mappings from a vm_map (from "start" up to "stop")
4621  *
4622  * => caller must check alignment and size
4623  * => map must be unlocked (we will lock it)
4624  * => flags is UVM_FLAG_QUANTUM or 0.
4625  */
4626 
4627 void
4628 uvm_unmap1(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
4629 {
4630 	struct vm_map_entry *dead_entries;
4631 	UVMHIST_FUNC("uvm_unmap"); UVMHIST_CALLED(maphist);
4632 
4633 	UVMHIST_LOG(maphist, "  (map=%p, start=%#lx, end=%#lx)",
4634 	    map, start, end, 0);
4635 	if (map == kernel_map) {
4636 		LOCKDEBUG_MEM_CHECK((void *)start, end - start);
4637 	}
4638 	/*
4639 	 * work now done by helper functions.   wipe the pmap's and then
4640 	 * detach from the dead entries...
4641 	 */
4642 	vm_map_lock(map);
4643 	uvm_unmap_remove(map, start, end, &dead_entries, flags);
4644 	vm_map_unlock(map);
4645 
4646 	if (dead_entries != NULL)
4647 		uvm_unmap_detach(dead_entries, 0);
4648 
4649 	UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
4650 }
4651 
4652 
4653 /*
4654  * uvm_map_reference: add reference to a map
4655  *
4656  * => map need not be locked (we use misc_lock).
4657  */
4658 
4659 void
4660 uvm_map_reference(struct vm_map *map)
4661 {
4662 	mutex_enter(&map->misc_lock);
4663 	map->ref_count++;
4664 	mutex_exit(&map->misc_lock);
4665 }
4666 
4667 bool
4668 vm_map_starved_p(struct vm_map *map)
4669 {
4670 
4671 	if ((map->flags & VM_MAP_WANTVA) != 0) {
4672 		return true;
4673 	}
4674 	/* XXX */
4675 	if ((vm_map_max(map) - vm_map_min(map)) / 16 * 15 < map->size) {
4676 		return true;
4677 	}
4678 	return false;
4679 }
4680 
4681 void
4682 uvm_map_lock_entry(struct vm_map_entry *entry)
4683 {
4684 
4685 	if (entry->aref.ar_amap != NULL) {
4686 		amap_lock(entry->aref.ar_amap);
4687 	}
4688 	if (UVM_ET_ISOBJ(entry)) {
4689 		mutex_enter(entry->object.uvm_obj->vmobjlock);
4690 	}
4691 }
4692 
4693 void
4694 uvm_map_unlock_entry(struct vm_map_entry *entry)
4695 {
4696 
4697 	if (UVM_ET_ISOBJ(entry)) {
4698 		mutex_exit(entry->object.uvm_obj->vmobjlock);
4699 	}
4700 	if (entry->aref.ar_amap != NULL) {
4701 		amap_unlock(entry->aref.ar_amap);
4702 	}
4703 }
4704 
4705 #if defined(DDB) || defined(DEBUGPRINT)
4706 
4707 /*
4708  * uvm_map_printit: actually prints the map
4709  */
4710 
4711 void
4712 uvm_map_printit(struct vm_map *map, bool full,
4713     void (*pr)(const char *, ...))
4714 {
4715 	struct vm_map_entry *entry;
4716 
4717 	(*pr)("MAP %p: [%#lx->%#lx]\n", map, vm_map_min(map),
4718 	    vm_map_max(map));
4719 	(*pr)("\t#ent=%d, sz=%d, ref=%d, version=%d, flags=%#x\n",
4720 	    map->nentries, map->size, map->ref_count, map->timestamp,
4721 	    map->flags);
4722 	(*pr)("\tpmap=%p(resident=%ld, wired=%ld)\n", map->pmap,
4723 	    pmap_resident_count(map->pmap), pmap_wired_count(map->pmap));
4724 	if (!full)
4725 		return;
4726 	for (entry = map->header.next; entry != &map->header;
4727 	    entry = entry->next) {
4728 		(*pr)(" - %p: %#lx->%#lx: obj=%p/%#llx, amap=%p/%d\n",
4729 		    entry, entry->start, entry->end, entry->object.uvm_obj,
4730 		    (long long)entry->offset, entry->aref.ar_amap,
4731 		    entry->aref.ar_pageoff);
4732 		(*pr)(
4733 		    "\tsubmap=%c, cow=%c, nc=%c, prot(max)=%d/%d, inh=%d, "
4734 		    "wc=%d, adv=%d\n",
4735 		    (entry->etype & UVM_ET_SUBMAP) ? 'T' : 'F',
4736 		    (entry->etype & UVM_ET_COPYONWRITE) ? 'T' : 'F',
4737 		    (entry->etype & UVM_ET_NEEDSCOPY) ? 'T' : 'F',
4738 		    entry->protection, entry->max_protection,
4739 		    entry->inheritance, entry->wired_count, entry->advice);
4740 	}
4741 }
4742 
4743 void
4744 uvm_whatis(uintptr_t addr, void (*pr)(const char *, ...))
4745 {
4746 	struct vm_map *map;
4747 
4748 	for (map = kernel_map;;) {
4749 		struct vm_map_entry *entry;
4750 
4751 		if (!uvm_map_lookup_entry_bytree(map, (vaddr_t)addr, &entry)) {
4752 			break;
4753 		}
4754 		(*pr)("%p is %p+%zu from VMMAP %p\n",
4755 		    (void *)addr, (void *)entry->start,
4756 		    (size_t)(addr - (uintptr_t)entry->start), map);
4757 		if (!UVM_ET_ISSUBMAP(entry)) {
4758 			break;
4759 		}
4760 		map = entry->object.sub_map;
4761 	}
4762 }
4763 
4764 #endif /* DDB || DEBUGPRINT */
4765 
4766 #ifndef __USER_VA0_IS_SAFE
4767 static int
4768 sysctl_user_va0_disable(SYSCTLFN_ARGS)
4769 {
4770 	struct sysctlnode node;
4771 	int t, error;
4772 
4773 	node = *rnode;
4774 	node.sysctl_data = &t;
4775 	t = user_va0_disable;
4776 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
4777 	if (error || newp == NULL)
4778 		return (error);
4779 
4780 	if (!t && user_va0_disable &&
4781 	    kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MAP_VA_ZERO, 0,
4782 	    NULL, NULL, NULL))
4783 		return EPERM;
4784 
4785 	user_va0_disable = !!t;
4786 	return 0;
4787 }
4788 #endif
4789 
4790 static int
4791 fill_vmentry(struct lwp *l, struct proc *p, struct kinfo_vmentry *kve,
4792     struct vm_map *m, struct vm_map_entry *e)
4793 {
4794 #ifndef _RUMPKERNEL
4795 	int error;
4796 
4797 	memset(kve, 0, sizeof(*kve));
4798 	KASSERT(e != NULL);
4799 	if (UVM_ET_ISOBJ(e)) {
4800 		struct uvm_object *uobj = e->object.uvm_obj;
4801 		KASSERT(uobj != NULL);
4802 		kve->kve_ref_count = uobj->uo_refs;
4803 		kve->kve_count = uobj->uo_npages;
4804 		if (UVM_OBJ_IS_VNODE(uobj)) {
4805 			struct vattr va;
4806 			struct vnode *vp = (struct vnode *)uobj;
4807 			vn_lock(vp, LK_SHARED | LK_RETRY);
4808 			error = VOP_GETATTR(vp, &va, l->l_cred);
4809 			VOP_UNLOCK(vp);
4810 			kve->kve_type = KVME_TYPE_VNODE;
4811 			if (error == 0) {
4812 				kve->kve_vn_size = vp->v_size;
4813 				kve->kve_vn_type = (int)vp->v_type;
4814 				kve->kve_vn_mode = va.va_mode;
4815 				kve->kve_vn_rdev = va.va_rdev;
4816 				kve->kve_vn_fileid = va.va_fileid;
4817 				kve->kve_vn_fsid = va.va_fsid;
4818 				error = vnode_to_path(kve->kve_path,
4819 				    sizeof(kve->kve_path) / 2, vp, l, p);
4820 #ifdef DIAGNOSTIC
4821 				if (error)
4822 					printf("%s: vp %p error %d\n", __func__,
4823 						vp, error);
4824 #endif
4825 			}
4826 		} else if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
4827 			kve->kve_type = KVME_TYPE_KERN;
4828 		} else if (UVM_OBJ_IS_DEVICE(uobj)) {
4829 			kve->kve_type = KVME_TYPE_DEVICE;
4830 		} else if (UVM_OBJ_IS_AOBJ(uobj)) {
4831 			kve->kve_type = KVME_TYPE_ANON;
4832 		} else {
4833 			kve->kve_type = KVME_TYPE_OBJECT;
4834 		}
4835 	} else if (UVM_ET_ISSUBMAP(e)) {
4836 		struct vm_map *map = e->object.sub_map;
4837 		KASSERT(map != NULL);
4838 		kve->kve_ref_count = map->ref_count;
4839 		kve->kve_count = map->nentries;
4840 		kve->kve_type = KVME_TYPE_SUBMAP;
4841 	} else
4842 		kve->kve_type = KVME_TYPE_UNKNOWN;
4843 
4844 	kve->kve_start = e->start;
4845 	kve->kve_end = e->end;
4846 	kve->kve_offset = e->offset;
4847 	kve->kve_wired_count = e->wired_count;
4848 	kve->kve_inheritance = e->inheritance;
4849 	kve->kve_attributes = e->map_attrib;
4850 	kve->kve_advice = e->advice;
4851 #define PROT(p) (((p) & VM_PROT_READ) ? KVME_PROT_READ : 0) | \
4852 	(((p) & VM_PROT_WRITE) ? KVME_PROT_WRITE : 0) | \
4853 	(((p) & VM_PROT_EXECUTE) ? KVME_PROT_EXEC : 0)
4854 	kve->kve_protection = PROT(e->protection);
4855 	kve->kve_max_protection = PROT(e->max_protection);
4856 	kve->kve_flags |= (e->etype & UVM_ET_COPYONWRITE)
4857 	    ? KVME_FLAG_COW : 0;
4858 	kve->kve_flags |= (e->etype & UVM_ET_NEEDSCOPY)
4859 	    ? KVME_FLAG_NEEDS_COPY : 0;
4860 	kve->kve_flags |= (m->flags & VM_MAP_TOPDOWN)
4861 	    ? KVME_FLAG_GROWS_DOWN : KVME_FLAG_GROWS_UP;
4862 	kve->kve_flags |= (m->flags & VM_MAP_PAGEABLE)
4863 	    ? KVME_FLAG_PAGEABLE : 0;
4864 #endif
4865 	return 0;
4866 }
4867 
4868 static int
4869 fill_vmentries(struct lwp *l, pid_t pid, u_int elem_size, void *oldp,
4870     size_t *oldlenp)
4871 {
4872 	int error;
4873 	struct proc *p;
4874 	struct kinfo_vmentry vme;
4875 	struct vmspace *vm;
4876 	struct vm_map *map;
4877 	struct vm_map_entry *entry;
4878 	char *dp;
4879 	size_t count;
4880 
4881 	count = 0;
4882 
4883 	if ((error = proc_find_locked(l, &p, pid)) != 0)
4884 		return error;
4885 
4886 	if ((error = proc_vmspace_getref(p, &vm)) != 0)
4887 		goto out;
4888 
4889 	map = &vm->vm_map;
4890 	vm_map_lock_read(map);
4891 
4892 	dp = oldp;
4893 	for (entry = map->header.next; entry != &map->header;
4894 	    entry = entry->next) {
4895 		if (oldp && (dp - (char *)oldp) < *oldlenp + elem_size) {
4896 			error = fill_vmentry(l, p, &vme, map, entry);
4897 			if (error)
4898 				break;
4899 			error = sysctl_copyout(l, &vme, dp,
4900 			    min(elem_size, sizeof(vme)));
4901 			if (error)
4902 				break;
4903 			dp += elem_size;
4904 		}
4905 		count++;
4906 	}
4907 	vm_map_unlock_read(map);
4908 	uvmspace_free(vm);
4909 out:
4910 	if (pid != -1)
4911 		mutex_exit(p->p_lock);
4912 	if (error == 0) {
4913 		count *= elem_size;
4914 		if (oldp != NULL && *oldlenp < count)
4915 			error = ENOSPC;
4916 		*oldlenp = count;
4917 	}
4918 	return error;
4919 }
4920 
4921 static int
4922 sysctl_vmproc(SYSCTLFN_ARGS)
4923 {
4924 	int error;
4925 
4926 	if (namelen == 1 && name[0] == CTL_QUERY)
4927 		return (sysctl_query(SYSCTLFN_CALL(rnode)));
4928 
4929 	if (namelen == 0)
4930 		return EINVAL;
4931 
4932 	switch (name[0]) {
4933 	case VM_PROC_MAP:
4934 		if (namelen != 3)
4935 			return EINVAL;
4936 		sysctl_unlock();
4937 		error = fill_vmentries(l, name[1], name[2],
4938 		    oldp, oldlenp);
4939 		sysctl_relock();
4940 		return error;
4941 	default:
4942 		return EINVAL;
4943 	}
4944 }
4945 
4946 SYSCTL_SETUP(sysctl_uvmmap_setup, "sysctl uvmmap setup")
4947 {
4948 
4949 	sysctl_createv(clog, 0, NULL, NULL,
4950 		       CTLFLAG_PERMANENT,
4951 		       CTLTYPE_STRUCT, "proc",
4952 		       SYSCTL_DESCR("Process vm information"),
4953 		       sysctl_vmproc, 0, NULL, 0,
4954 		       CTL_VM, VM_PROC, CTL_EOL);
4955 #ifndef __USER_VA0_IS_SAFE
4956         sysctl_createv(clog, 0, NULL, NULL,
4957                        CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4958                        CTLTYPE_INT, "user_va0_disable",
4959                        SYSCTL_DESCR("Disable VA 0"),
4960                        sysctl_user_va0_disable, 0, &user_va0_disable, 0,
4961                        CTL_VM, CTL_CREATE, CTL_EOL);
4962 #endif
4963 }
4964