xref: /openbsd-src/sys/uvm/uvm_map.c (revision db3296cf5c1dd9058ceecc3a29fe4aaa0bd26000)
1 /*	$OpenBSD: uvm_map.c,v 1.60 2003/06/29 17:31:12 avsm Exp $	*/
2 /*	$NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $	*/
3 
4 /*
5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
6  * Copyright (c) 1991, 1993, The Regents of the University of California.
7  *
8  * All rights reserved.
9  *
10  * This code is derived from software contributed to Berkeley by
11  * The Mach Operating System project at Carnegie-Mellon University.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. All advertising materials mentioning features or use of this software
22  *    must display the following acknowledgement:
23  *	This product includes software developed by Charles D. Cranor,
24  *      Washington University, the University of California, Berkeley and
25  *      its contributors.
26  * 4. Neither the name of the University nor the names of its contributors
27  *    may be used to endorse or promote products derived from this software
28  *    without specific prior written permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40  * SUCH DAMAGE.
41  *
42  *	@(#)vm_map.c    8.3 (Berkeley) 1/12/94
43  * from: Id: uvm_map.c,v 1.1.2.27 1998/02/07 01:16:54 chs Exp
44  *
45  *
46  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
47  * All rights reserved.
48  *
49  * Permission to use, copy, modify and distribute this software and
50  * its documentation is hereby granted, provided that both the copyright
51  * notice and this permission notice appear in all copies of the
52  * software, derivative works or modified versions, and any portions
53  * thereof, and that both notices appear in supporting documentation.
54  *
55  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
56  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
57  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
58  *
59  * Carnegie Mellon requests users of this software to return to
60  *
61  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
62  *  School of Computer Science
63  *  Carnegie Mellon University
64  *  Pittsburgh PA 15213-3890
65  *
66  * any improvements or extensions that they make and grant Carnegie the
67  * rights to redistribute these changes.
68  */
69 
70 /*
71  * uvm_map.c: uvm map operations
72  */
73 
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/mman.h>
77 #include <sys/proc.h>
78 #include <sys/malloc.h>
79 #include <sys/pool.h>
80 #include <sys/kernel.h>
81 
82 #ifdef SYSVSHM
83 #include <sys/shm.h>
84 #endif
85 
86 #define RB_AUGMENT(x) uvm_rb_augment(x)
87 #define UVM_MAP
88 #include <uvm/uvm.h>
89 
90 #ifdef DDB
91 #include <uvm/uvm_ddb.h>
92 #endif
93 
94 struct uvm_cnt uvm_map_call, map_backmerge, map_forwmerge;
95 struct uvm_cnt uvm_mlk_call, uvm_mlk_hint;
96 const char vmmapbsy[] = "vmmapbsy";
97 
98 /*
99  * pool for vmspace structures.
100  */
101 
102 struct pool uvm_vmspace_pool;
103 
104 /*
105  * pool for dynamically-allocated map entries.
106  */
107 
108 struct pool uvm_map_entry_pool;
109 struct pool uvm_map_entry_kmem_pool;
110 
111 #ifdef PMAP_GROWKERNEL
112 /*
113  * This global represents the end of the kernel virtual address
114  * space.  If we want to exceed this, we must grow the kernel
115  * virtual address space dynamically.
116  *
117  * Note, this variable is locked by kernel_map's lock.
118  */
119 vaddr_t uvm_maxkaddr;
120 #endif
121 
122 /*
123  * macros
124  */
125 
126 /*
127  * uvm_map_entry_link: insert entry into a map
128  *
129  * => map must be locked
130  */
131 #define uvm_map_entry_link(map, after_where, entry) do { \
132 	(map)->nentries++; \
133 	(entry)->prev = (after_where); \
134 	(entry)->next = (after_where)->next; \
135 	(entry)->prev->next = (entry); \
136 	(entry)->next->prev = (entry); \
137 	uvm_rb_insert(map, entry); \
138 } while (0)
139 
140 /*
141  * uvm_map_entry_unlink: remove entry from a map
142  *
143  * => map must be locked
144  */
145 #define uvm_map_entry_unlink(map, entry) do { \
146 	(map)->nentries--; \
147 	(entry)->next->prev = (entry)->prev; \
148 	(entry)->prev->next = (entry)->next; \
149 	uvm_rb_remove(map, entry); \
150 } while (0)
151 
152 /*
153  * SAVE_HINT: saves the specified entry as the hint for future lookups.
154  *
155  * => map need not be locked (protected by hint_lock).
156  */
157 #define SAVE_HINT(map,check,value) do { \
158 	simple_lock(&(map)->hint_lock); \
159 	if ((map)->hint == (check)) \
160 		(map)->hint = (value); \
161 	simple_unlock(&(map)->hint_lock); \
162 } while (0)
163 
164 /*
165  * VM_MAP_RANGE_CHECK: check and correct range
166  *
167  * => map must at least be read locked
168  */
169 
170 #define VM_MAP_RANGE_CHECK(map, start, end) do { \
171 	if (start < vm_map_min(map)) 		\
172 		start = vm_map_min(map);        \
173 	if (end > vm_map_max(map))              \
174 		end = vm_map_max(map);          \
175 	if (start > end)                        \
176 		start = end;                    \
177 } while (0)
178 
179 /*
180  * local prototypes
181  */
182 
183 static vm_map_entry_t	uvm_mapent_alloc(vm_map_t);
184 static void		uvm_mapent_copy(vm_map_entry_t,vm_map_entry_t);
185 static void		uvm_mapent_free(vm_map_entry_t);
186 static void		uvm_map_entry_unwire(vm_map_t, vm_map_entry_t);
187 static void		uvm_map_reference_amap(vm_map_entry_t, int);
188 static void		uvm_map_unreference_amap(vm_map_entry_t, int);
189 
190 int			uvm_map_spacefits(vm_map_t, vaddr_t *, vsize_t, vm_map_entry_t, voff_t, vsize_t);
191 
192 int _uvm_tree_sanity(vm_map_t map, const char *name);
193 static vsize_t		uvm_rb_subtree_space(vm_map_entry_t);
194 
195 static __inline int
196 uvm_compare(vm_map_entry_t a, vm_map_entry_t b)
197 {
198 	if (a->start < b->start)
199 		return (-1);
200 	else if (a->start > b->start)
201 		return (1);
202 
203 	return (0);
204 }
205 
206 
207 static __inline void
208 uvm_rb_augment(vm_map_entry_t entry)
209 {
210 	entry->space = uvm_rb_subtree_space(entry);
211 }
212 
213 RB_PROTOTYPE(uvm_tree, vm_map_entry, rb_entry, uvm_compare);
214 
215 RB_GENERATE(uvm_tree, vm_map_entry, rb_entry, uvm_compare);
216 
217 static __inline vsize_t
218 uvm_rb_space(vm_map_t map, vm_map_entry_t entry)
219 {
220 	vm_map_entry_t next;
221 	vaddr_t space;
222 
223 	if ((next = entry->next) == &map->header)
224 		space = map->max_offset - entry->end;
225 	else {
226 		KASSERT(next);
227 		space = next->start - entry->end;
228 	}
229 	return (space);
230 }
231 
232 static vsize_t
233 uvm_rb_subtree_space(vm_map_entry_t entry)
234 {
235 	vaddr_t space, tmp;
236 
237 	space = entry->ownspace;
238 	if (RB_LEFT(entry, rb_entry)) {
239 		tmp = RB_LEFT(entry, rb_entry)->space;
240 		if (tmp > space)
241 			space = tmp;
242 	}
243 
244 	if (RB_RIGHT(entry, rb_entry)) {
245 		tmp = RB_RIGHT(entry, rb_entry)->space;
246 		if (tmp > space)
247 			space = tmp;
248 	}
249 
250 	return (space);
251 }
252 
253 static __inline void
254 uvm_rb_fixup(vm_map_t map, vm_map_entry_t entry)
255 {
256 	/* We need to traverse to the very top */
257 	do {
258 		entry->ownspace = uvm_rb_space(map, entry);
259 		entry->space = uvm_rb_subtree_space(entry);
260 	} while ((entry = RB_PARENT(entry, rb_entry)) != NULL);
261 }
262 
263 static __inline void
264 uvm_rb_insert(vm_map_t map, vm_map_entry_t entry)
265 {
266 	vaddr_t space = uvm_rb_space(map, entry);
267 	vm_map_entry_t tmp;
268 
269 	entry->ownspace = entry->space = space;
270 	tmp = RB_INSERT(uvm_tree, &(map)->rbhead, entry);
271 #ifdef DIAGNOSTIC
272 	if (tmp != NULL)
273 		panic("uvm_rb_insert: duplicate entry?");
274 #endif
275 	uvm_rb_fixup(map, entry);
276 	if (entry->prev != &map->header)
277 		uvm_rb_fixup(map, entry->prev);
278 }
279 
280 static __inline void
281 uvm_rb_remove(vm_map_t map, vm_map_entry_t entry)
282 {
283 	vm_map_entry_t parent;
284 
285 	parent = RB_PARENT(entry, rb_entry);
286 	RB_REMOVE(uvm_tree, &(map)->rbhead, entry);
287 	if (entry->prev != &map->header)
288 		uvm_rb_fixup(map, entry->prev);
289 	if (parent)
290 		uvm_rb_fixup(map, parent);
291 }
292 
293 #ifdef DEBUG
294 #define uvm_tree_sanity(x,y) _uvm_tree_sanity(x,y)
295 #else
296 #define uvm_tree_sanity(x,y)
297 #endif
298 
299 int
300 _uvm_tree_sanity(vm_map_t map, const char *name)
301 {
302 	vm_map_entry_t tmp, trtmp;
303 	int n = 0, i = 1;
304 
305 	RB_FOREACH(tmp, uvm_tree, &map->rbhead) {
306 		if (tmp->ownspace != uvm_rb_space(map, tmp)) {
307 			printf("%s: %d/%d ownspace %x != %x %s\n",
308 			    name, n + 1, map->nentries,
309 			    tmp->ownspace, uvm_rb_space(map, tmp),
310 			    tmp->next == &map->header ? "(last)" : "");
311 			goto error;
312 		}
313 	}
314 	trtmp = NULL;
315 	RB_FOREACH(tmp, uvm_tree, &map->rbhead) {
316 		if (tmp->space != uvm_rb_subtree_space(tmp)) {
317 			printf("%s: space %d != %d\n",
318 			    name, tmp->space, uvm_rb_subtree_space(tmp));
319 			goto error;
320 		}
321 		if (trtmp != NULL && trtmp->start >= tmp->start) {
322 			printf("%s: corrupt: 0x%lx >= 0x%lx\n",
323 			    name, trtmp->start, tmp->start);
324 			goto error;
325 		}
326 		n++;
327 
328 	    trtmp = tmp;
329 	}
330 
331 	if (n != map->nentries) {
332 		printf("%s: nentries: %d vs %d\n",
333 		    name, n, map->nentries);
334 		goto error;
335 	}
336 
337 	for (tmp = map->header.next; tmp && tmp != &map->header;
338 	    tmp = tmp->next, i++) {
339 		trtmp = RB_FIND(uvm_tree, &map->rbhead, tmp);
340 		if (trtmp != tmp) {
341 			printf("%s: lookup: %d: %p - %p: %p\n",
342 			    name, i, tmp, trtmp,
343 			    RB_PARENT(tmp, rb_entry));
344 			goto error;
345 		}
346 	}
347 
348 	return (0);
349  error:
350 #ifdef	DDB
351 	/* handy breakpoint location for error case */
352 	__asm(".globl treesanity_label\ntreesanity_label:");
353 #endif
354 	return (-1);
355 }
356 
357 /*
358  * local inlines
359  */
360 
361 /*
362  * uvm_mapent_alloc: allocate a map entry
363  */
364 
365 static __inline vm_map_entry_t
366 uvm_mapent_alloc(map)
367 	vm_map_t map;
368 {
369 	struct vm_map_entry *me;
370 	int s;
371 	UVMHIST_FUNC("uvm_mapent_alloc"); UVMHIST_CALLED(maphist);
372 
373 	if (map->flags & VM_MAP_INTRSAFE || cold) {
374 		s = splvm();
375 		simple_lock(&uvm.kentry_lock);
376 		me = uvm.kentry_free;
377 		if (me) uvm.kentry_free = me->next;
378 		simple_unlock(&uvm.kentry_lock);
379 		splx(s);
380 		if (me == NULL) {
381 			panic("uvm_mapent_alloc: out of static map entries, "
382 			      "check MAX_KMAPENT (currently %d)",
383 			      MAX_KMAPENT);
384 		}
385 		me->flags = UVM_MAP_STATIC;
386 	} else if (map == kernel_map) {
387 		splassert(IPL_NONE);
388 		me = pool_get(&uvm_map_entry_kmem_pool, PR_WAITOK);
389 		me->flags = UVM_MAP_KMEM;
390 	} else {
391 		splassert(IPL_NONE);
392 		me = pool_get(&uvm_map_entry_pool, PR_WAITOK);
393 		me->flags = 0;
394 	}
395 
396 	UVMHIST_LOG(maphist, "<- new entry=0x%x [kentry=%d]", me,
397 	    ((map->flags & VM_MAP_INTRSAFE) != 0 || map == kernel_map), 0, 0);
398 	return(me);
399 }
400 
401 /*
402  * uvm_mapent_free: free map entry
403  *
404  * => XXX: static pool for kernel map?
405  */
406 
407 static __inline void
408 uvm_mapent_free(me)
409 	vm_map_entry_t me;
410 {
411 	int s;
412 	UVMHIST_FUNC("uvm_mapent_free"); UVMHIST_CALLED(maphist);
413 
414 	UVMHIST_LOG(maphist,"<- freeing map entry=0x%x [flags=%d]",
415 		me, me->flags, 0, 0);
416 	if (me->flags & UVM_MAP_STATIC) {
417 		s = splvm();
418 		simple_lock(&uvm.kentry_lock);
419 		me->next = uvm.kentry_free;
420 		uvm.kentry_free = me;
421 		simple_unlock(&uvm.kentry_lock);
422 		splx(s);
423 	} else if (me->flags & UVM_MAP_KMEM) {
424 		splassert(IPL_NONE);
425 		pool_put(&uvm_map_entry_kmem_pool, me);
426 	} else {
427 		splassert(IPL_NONE);
428 		pool_put(&uvm_map_entry_pool, me);
429 	}
430 }
431 
432 /*
433  * uvm_mapent_copy: copy a map entry, preserving flags
434  */
435 
436 static __inline void
437 uvm_mapent_copy(src, dst)
438 	vm_map_entry_t src;
439 	vm_map_entry_t dst;
440 {
441 
442 	memcpy(dst, src, ((char *)&src->uvm_map_entry_stop_copy) - ((char *)src));
443 }
444 
445 /*
446  * uvm_map_entry_unwire: unwire a map entry
447  *
448  * => map should be locked by caller
449  */
450 
451 static __inline void
452 uvm_map_entry_unwire(map, entry)
453 	vm_map_t map;
454 	vm_map_entry_t entry;
455 {
456 
457 	entry->wired_count = 0;
458 	uvm_fault_unwire_locked(map, entry->start, entry->end);
459 }
460 
461 
462 /*
463  * wrapper for calling amap_ref()
464  */
465 static __inline void
466 uvm_map_reference_amap(entry, flags)
467 	vm_map_entry_t entry;
468 	int flags;
469 {
470     amap_ref(entry->aref.ar_amap, entry->aref.ar_pageoff,
471 	     (entry->end - entry->start) >> PAGE_SHIFT, flags);
472 }
473 
474 
475 /*
476  * wrapper for calling amap_unref()
477  */
478 static __inline void
479 uvm_map_unreference_amap(entry, flags)
480 	vm_map_entry_t entry;
481 	int flags;
482 {
483     amap_unref(entry->aref.ar_amap, entry->aref.ar_pageoff,
484 	     (entry->end - entry->start) >> PAGE_SHIFT, flags);
485 }
486 
487 
488 /*
489  * uvm_map_init: init mapping system at boot time.   note that we allocate
490  * and init the static pool of vm_map_entry_t's for the kernel here.
491  */
492 
493 void
494 uvm_map_init()
495 {
496 	static struct vm_map_entry kernel_map_entry[MAX_KMAPENT];
497 #if defined(UVMHIST)
498 	static struct uvm_history_ent maphistbuf[100];
499 	static struct uvm_history_ent pdhistbuf[100];
500 #endif
501 	int lcv;
502 
503 	/*
504 	 * first, init logging system.
505 	 */
506 
507 	UVMHIST_FUNC("uvm_map_init");
508 	UVMHIST_INIT_STATIC(maphist, maphistbuf);
509 	UVMHIST_INIT_STATIC(pdhist, pdhistbuf);
510 	UVMHIST_CALLED(maphist);
511 	UVMHIST_LOG(maphist,"<starting uvm map system>", 0, 0, 0, 0);
512 	UVMCNT_INIT(uvm_map_call,  UVMCNT_CNT, 0,
513 	    "# uvm_map() successful calls", 0);
514 	UVMCNT_INIT(map_backmerge, UVMCNT_CNT, 0, "# uvm_map() back merges", 0);
515 	UVMCNT_INIT(map_forwmerge, UVMCNT_CNT, 0, "# uvm_map() missed forward",
516 	    0);
517 	UVMCNT_INIT(uvm_mlk_call,  UVMCNT_CNT, 0, "# map lookup calls", 0);
518 	UVMCNT_INIT(uvm_mlk_hint,  UVMCNT_CNT, 0, "# map lookup hint hits", 0);
519 
520 	/*
521 	 * now set up static pool of kernel map entrys ...
522 	 */
523 
524 	simple_lock_init(&uvm.kentry_lock);
525 	uvm.kentry_free = NULL;
526 	for (lcv = 0 ; lcv < MAX_KMAPENT ; lcv++) {
527 		kernel_map_entry[lcv].next = uvm.kentry_free;
528 		uvm.kentry_free = &kernel_map_entry[lcv];
529 	}
530 
531 	/*
532 	 * initialize the map-related pools.
533 	 */
534 	pool_init(&uvm_vmspace_pool, sizeof(struct vmspace),
535 	    0, 0, 0, "vmsppl", &pool_allocator_nointr);
536 	pool_init(&uvm_map_entry_pool, sizeof(struct vm_map_entry),
537 	    0, 0, 0, "vmmpepl", &pool_allocator_nointr);
538 	pool_init(&uvm_map_entry_kmem_pool, sizeof(struct vm_map_entry),
539 	    0, 0, 0, "vmmpekpl", NULL);
540 }
541 
542 /*
543  * clippers
544  */
545 
546 /*
547  * uvm_map_clip_start: ensure that the entry begins at or after
548  *	the starting address, if it doesn't we split the entry.
549  *
550  * => caller should use UVM_MAP_CLIP_START macro rather than calling
551  *    this directly
552  * => map must be locked by caller
553  */
554 
555 void uvm_map_clip_start(map, entry, start)
556 	vm_map_t       map;
557 	vm_map_entry_t entry;
558 	vaddr_t    start;
559 {
560 	vm_map_entry_t new_entry;
561 	vaddr_t new_adj;
562 
563 	/* uvm_map_simplify_entry(map, entry); */ /* XXX */
564 
565 	uvm_tree_sanity(map, "clip_start entry");
566 
567 	/*
568 	 * Split off the front portion.  note that we must insert the new
569 	 * entry BEFORE this one, so that this entry has the specified
570 	 * starting address.
571 	 */
572 
573 	new_entry = uvm_mapent_alloc(map);
574 	uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
575 
576 	new_entry->end = start;
577 	new_adj = start - new_entry->start;
578 	if (entry->object.uvm_obj)
579 		entry->offset += new_adj;	/* shift start over */
580 
581 	/* Does not change order for the RB tree */
582 	entry->start = start;
583 
584 	if (new_entry->aref.ar_amap) {
585 		amap_splitref(&new_entry->aref, &entry->aref, new_adj);
586 	}
587 
588 	uvm_map_entry_link(map, entry->prev, new_entry);
589 
590 	if (UVM_ET_ISSUBMAP(entry)) {
591 		/* ... unlikely to happen, but play it safe */
592 		 uvm_map_reference(new_entry->object.sub_map);
593 	} else {
594 		if (UVM_ET_ISOBJ(entry) &&
595 		    entry->object.uvm_obj->pgops &&
596 		    entry->object.uvm_obj->pgops->pgo_reference)
597 			entry->object.uvm_obj->pgops->pgo_reference(
598 			    entry->object.uvm_obj);
599 	}
600 
601 	uvm_tree_sanity(map, "clip_start leave");
602 }
603 
604 /*
605  * uvm_map_clip_end: ensure that the entry ends at or before
606  *	the ending address, if it doesn't we split the reference
607  *
608  * => caller should use UVM_MAP_CLIP_END macro rather than calling
609  *    this directly
610  * => map must be locked by caller
611  */
612 
613 void
614 uvm_map_clip_end(map, entry, end)
615 	vm_map_t	map;
616 	vm_map_entry_t	entry;
617 	vaddr_t	end;
618 {
619 	vm_map_entry_t	new_entry;
620 	vaddr_t new_adj; /* #bytes we move start forward */
621 
622 	uvm_tree_sanity(map, "clip_end entry");
623 	/*
624 	 *	Create a new entry and insert it
625 	 *	AFTER the specified entry
626 	 */
627 
628 	new_entry = uvm_mapent_alloc(map);
629 	uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
630 
631 	new_entry->start = entry->end = end;
632 	new_adj = end - entry->start;
633 	if (new_entry->object.uvm_obj)
634 		new_entry->offset += new_adj;
635 
636 	if (entry->aref.ar_amap)
637 		amap_splitref(&entry->aref, &new_entry->aref, new_adj);
638 
639 	uvm_rb_fixup(map, entry);
640 
641 	uvm_map_entry_link(map, entry, new_entry);
642 
643 	if (UVM_ET_ISSUBMAP(entry)) {
644 		/* ... unlikely to happen, but play it safe */
645 	 	uvm_map_reference(new_entry->object.sub_map);
646 	} else {
647 		if (UVM_ET_ISOBJ(entry) &&
648 		    entry->object.uvm_obj->pgops &&
649 		    entry->object.uvm_obj->pgops->pgo_reference)
650 			entry->object.uvm_obj->pgops->pgo_reference(
651 			    entry->object.uvm_obj);
652 	}
653 	uvm_tree_sanity(map, "clip_end leave");
654 }
655 
656 
657 /*
658  *   M A P   -   m a i n   e n t r y   p o i n t
659  */
660 /*
661  * uvm_map: establish a valid mapping in a map
662  *
663  * => assume startp is page aligned.
664  * => assume size is a multiple of PAGE_SIZE.
665  * => assume sys_mmap provides enough of a "hint" to have us skip
666  *	over text/data/bss area.
667  * => map must be unlocked (we will lock it)
668  * => <uobj,uoffset> value meanings (4 cases):
669  *	 [1] <NULL,uoffset> 		== uoffset is a hint for PMAP_PREFER
670  *	 [2] <NULL,UVM_UNKNOWN_OFFSET>	== don't PMAP_PREFER
671  *	 [3] <uobj,uoffset>		== normal mapping
672  *	 [4] <uobj,UVM_UNKNOWN_OFFSET>	== uvm_map finds offset based on VA
673  *
674  *    case [4] is for kernel mappings where we don't know the offset until
675  *    we've found a virtual address.   note that kernel object offsets are
676  *    always relative to vm_map_min(kernel_map).
677  *
678  * => if `align' is non-zero, we try to align the virtual address to
679  *	the specified alignment.  this is only a hint; if we can't
680  *	do it, the address will be unaligned.  this is provided as
681  *	a mechanism for large pages.
682  *
683  * => XXXCDC: need way to map in external amap?
684  */
685 
686 int
687 uvm_map(map, startp, size, uobj, uoffset, align, flags)
688 	vm_map_t map;
689 	vaddr_t *startp;	/* IN/OUT */
690 	vsize_t size;
691 	struct uvm_object *uobj;
692 	voff_t uoffset;
693 	vsize_t align;
694 	uvm_flag_t flags;
695 {
696 	vm_map_entry_t prev_entry, new_entry;
697 	vm_prot_t prot = UVM_PROTECTION(flags), maxprot =
698 	    UVM_MAXPROTECTION(flags);
699 	vm_inherit_t inherit = UVM_INHERIT(flags);
700 	int advice = UVM_ADVICE(flags);
701 	UVMHIST_FUNC("uvm_map");
702 	UVMHIST_CALLED(maphist);
703 
704 	UVMHIST_LOG(maphist, "(map=0x%x, *startp=0x%x, size=%d, flags=0x%x)",
705 	    map, *startp, size, flags);
706 	UVMHIST_LOG(maphist, "  uobj/offset 0x%x/%d", uobj, uoffset,0,0);
707 
708 	uvm_tree_sanity(map, "map entry");
709 
710 	/*
711 	 * step 0: sanity check of protection code
712 	 */
713 
714 	if ((prot & maxprot) != prot) {
715 		UVMHIST_LOG(maphist, "<- prot. failure:  prot=0x%x, max=0x%x",
716 		prot, maxprot,0,0);
717 		return(KERN_PROTECTION_FAILURE);
718 	}
719 
720 	/*
721 	 * step 1: figure out where to put new VM range
722 	 */
723 
724 	if (vm_map_lock_try(map) == FALSE) {
725 		if (flags & UVM_FLAG_TRYLOCK)
726 			return(KERN_FAILURE);
727 		vm_map_lock(map); /* could sleep here */
728 	}
729 	if ((prev_entry = uvm_map_findspace(map, *startp, size, startp,
730 	    uobj, uoffset, align, flags)) == NULL) {
731 		UVMHIST_LOG(maphist,"<- uvm_map_findspace failed!",0,0,0,0);
732 		vm_map_unlock(map);
733 		return (KERN_NO_SPACE);
734 	}
735 
736 #ifdef PMAP_GROWKERNEL
737 	{
738 		/*
739 		 * If the kernel pmap can't map the requested space,
740 		 * then allocate more resources for it.
741 		 */
742 		if (map == kernel_map && uvm_maxkaddr < (*startp + size))
743 			uvm_maxkaddr = pmap_growkernel(*startp + size);
744 	}
745 #endif
746 
747 	UVMCNT_INCR(uvm_map_call);
748 
749 	/*
750 	 * if uobj is null, then uoffset is either a VAC hint for PMAP_PREFER
751 	 * [typically from uvm_map_reserve] or it is UVM_UNKNOWN_OFFSET.   in
752 	 * either case we want to zero it  before storing it in the map entry
753 	 * (because it looks strange and confusing when debugging...)
754 	 *
755 	 * if uobj is not null
756 	 *   if uoffset is not UVM_UNKNOWN_OFFSET then we have a normal mapping
757 	 *      and we do not need to change uoffset.
758 	 *   if uoffset is UVM_UNKNOWN_OFFSET then we need to find the offset
759 	 *      now (based on the starting address of the map).   this case is
760 	 *      for kernel object mappings where we don't know the offset until
761 	 *      the virtual address is found (with uvm_map_findspace).   the
762 	 *      offset is the distance we are from the start of the map.
763 	 */
764 
765 	if (uobj == NULL) {
766 		uoffset = 0;
767 	} else {
768 		if (uoffset == UVM_UNKNOWN_OFFSET) {
769 			KASSERT(UVM_OBJ_IS_KERN_OBJECT(uobj));
770 			uoffset = *startp - vm_map_min(kernel_map);
771 		}
772 	}
773 
774 	/*
775 	 * step 2: try and insert in map by extending previous entry, if
776 	 * possible
777 	 * XXX: we don't try and pull back the next entry.   might be useful
778 	 * for a stack, but we are currently allocating our stack in advance.
779 	 */
780 
781 	if ((flags & UVM_FLAG_NOMERGE) == 0 &&
782 	    prev_entry->end == *startp && prev_entry != &map->header &&
783 	    prev_entry->object.uvm_obj == uobj) {
784 
785 		if (uobj && prev_entry->offset +
786 		    (prev_entry->end - prev_entry->start) != uoffset)
787 			goto step3;
788 
789 		if (UVM_ET_ISSUBMAP(prev_entry))
790 			goto step3;
791 
792 		if (prev_entry->protection != prot ||
793 		    prev_entry->max_protection != maxprot)
794 			goto step3;
795 
796 		if (prev_entry->inheritance != inherit ||
797 		    prev_entry->advice != advice)
798 			goto step3;
799 
800 		/* wiring status must match (new area is unwired) */
801 		if (VM_MAPENT_ISWIRED(prev_entry))
802 			goto step3;
803 
804 		/*
805 		 * can't extend a shared amap.  note: no need to lock amap to
806 		 * look at refs since we don't care about its exact value.
807 		 * if it is one (i.e. we have only reference) it will stay there
808 		 */
809 
810 		if (prev_entry->aref.ar_amap &&
811 		    amap_refs(prev_entry->aref.ar_amap) != 1) {
812 			goto step3;
813 		}
814 
815 		/* got it! */
816 
817 		UVMCNT_INCR(map_backmerge);
818 		UVMHIST_LOG(maphist,"  starting back merge", 0, 0, 0, 0);
819 
820 		/*
821 		 * drop our reference to uobj since we are extending a reference
822 		 * that we already have (the ref count can not drop to zero).
823 		 */
824 		if (uobj && uobj->pgops->pgo_detach)
825 			uobj->pgops->pgo_detach(uobj);
826 
827 		if (prev_entry->aref.ar_amap) {
828 			amap_extend(prev_entry, size);
829 		}
830 
831 		prev_entry->end += size;
832 		uvm_rb_fixup(map, prev_entry);
833 		map->size += size;
834 
835 		uvm_tree_sanity(map, "map leave 2");
836 
837 		UVMHIST_LOG(maphist,"<- done (via backmerge)!", 0, 0, 0, 0);
838 		vm_map_unlock(map);
839 		return (KERN_SUCCESS);
840 
841 	}
842 step3:
843 	UVMHIST_LOG(maphist,"  allocating new map entry", 0, 0, 0, 0);
844 
845 	/*
846 	 * check for possible forward merge (which we don't do) and count
847 	 * the number of times we missed a *possible* chance to merge more
848 	 */
849 
850 	if ((flags & UVM_FLAG_NOMERGE) == 0 &&
851 	    prev_entry->next != &map->header &&
852 	    prev_entry->next->start == (*startp + size))
853 		UVMCNT_INCR(map_forwmerge);
854 
855 	/*
856 	 * step 3: allocate new entry and link it in
857 	 */
858 
859 	new_entry = uvm_mapent_alloc(map);
860 	new_entry->start = *startp;
861 	new_entry->end = new_entry->start + size;
862 	new_entry->object.uvm_obj = uobj;
863 	new_entry->offset = uoffset;
864 
865 	if (uobj)
866 		new_entry->etype = UVM_ET_OBJ;
867 	else
868 		new_entry->etype = 0;
869 
870 	if (flags & UVM_FLAG_COPYONW) {
871 		new_entry->etype |= UVM_ET_COPYONWRITE;
872 		if ((flags & UVM_FLAG_OVERLAY) == 0)
873 			new_entry->etype |= UVM_ET_NEEDSCOPY;
874 	}
875 
876 	new_entry->protection = prot;
877 	new_entry->max_protection = maxprot;
878 	new_entry->inheritance = inherit;
879 	new_entry->wired_count = 0;
880 	new_entry->advice = advice;
881 	if (flags & UVM_FLAG_OVERLAY) {
882 		/*
883 		 * to_add: for BSS we overallocate a little since we
884 		 * are likely to extend
885 		 */
886 		vaddr_t to_add = (flags & UVM_FLAG_AMAPPAD) ?
887 			UVM_AMAP_CHUNK << PAGE_SHIFT : 0;
888 		struct vm_amap *amap = amap_alloc(size, to_add, M_WAITOK);
889 		new_entry->aref.ar_pageoff = 0;
890 		new_entry->aref.ar_amap = amap;
891 	} else {
892 		new_entry->aref.ar_pageoff = 0;
893 		new_entry->aref.ar_amap = NULL;
894 	}
895 
896 	uvm_map_entry_link(map, prev_entry, new_entry);
897 
898 	map->size += size;
899 
900 	/*
901 	 *      Update the free space hint
902 	 */
903 
904 	if ((map->first_free == prev_entry) &&
905 	    (prev_entry->end >= new_entry->start))
906 		map->first_free = new_entry;
907 
908 	uvm_tree_sanity(map, "map leave");
909 
910 	UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
911 	vm_map_unlock(map);
912 	return(KERN_SUCCESS);
913 }
914 
915 /*
916  * uvm_map_lookup_entry: find map entry at or before an address
917  *
918  * => map must at least be read-locked by caller
919  * => entry is returned in "entry"
920  * => return value is true if address is in the returned entry
921  */
922 
923 boolean_t
924 uvm_map_lookup_entry(map, address, entry)
925 	vm_map_t	map;
926 	vaddr_t	address;
927 	vm_map_entry_t		*entry;		/* OUT */
928 {
929 	vm_map_entry_t		cur;
930 	vm_map_entry_t		last;
931 	int			use_tree = 0;
932 	UVMHIST_FUNC("uvm_map_lookup_entry");
933 	UVMHIST_CALLED(maphist);
934 
935 	UVMHIST_LOG(maphist,"(map=0x%x,addr=0x%x,ent=0x%x)",
936 	    map, address, entry, 0);
937 
938 	/*
939 	 * start looking either from the head of the
940 	 * list, or from the hint.
941 	 */
942 
943 	simple_lock(&map->hint_lock);
944 	cur = map->hint;
945 	simple_unlock(&map->hint_lock);
946 
947 	if (cur == &map->header)
948 		cur = cur->next;
949 
950 	UVMCNT_INCR(uvm_mlk_call);
951 	if (address >= cur->start) {
952 	    	/*
953 		 * go from hint to end of list.
954 		 *
955 		 * but first, make a quick check to see if
956 		 * we are already looking at the entry we
957 		 * want (which is usually the case).
958 		 * note also that we don't need to save the hint
959 		 * here... it is the same hint (unless we are
960 		 * at the header, in which case the hint didn't
961 		 * buy us anything anyway).
962 		 */
963 		last = &map->header;
964 		if ((cur != last) && (cur->end > address)) {
965 			UVMCNT_INCR(uvm_mlk_hint);
966 			*entry = cur;
967 			UVMHIST_LOG(maphist,"<- got it via hint (0x%x)",
968 			    cur, 0, 0, 0);
969 			return (TRUE);
970 		}
971 
972 		if (map->nentries > 30)
973 			use_tree = 1;
974 	} else {
975 	    	/*
976 		 * go from start to hint, *inclusively*
977 		 */
978 		last = cur->next;
979 		cur = map->header.next;
980 		use_tree = 1;
981 	}
982 
983 	uvm_tree_sanity(map, __func__);
984 
985 	if (use_tree) {
986 		vm_map_entry_t prev = &map->header;
987 		cur = RB_ROOT(&map->rbhead);
988 
989 		/*
990 		 * Simple lookup in the tree.  Happens when the hint is
991 		 * invalid, or nentries reach a threshold.
992 		 */
993 		while (cur) {
994 			if (address >= cur->start) {
995 				if (address < cur->end) {
996 					*entry = cur;
997 					SAVE_HINT(map, map->hint, cur);
998 					return (TRUE);
999 				}
1000 				prev = cur;
1001 				cur = RB_RIGHT(cur, rb_entry);
1002 			} else
1003 				cur = RB_LEFT(cur, rb_entry);
1004 		}
1005 		*entry = prev;
1006 		UVMHIST_LOG(maphist,"<- failed!",0,0,0,0);
1007 		return (FALSE);
1008 	}
1009 
1010 	/*
1011 	 * search linearly
1012 	 */
1013 
1014 	while (cur != last) {
1015 		if (cur->end > address) {
1016 			if (address >= cur->start) {
1017 			    	/*
1018 				 * save this lookup for future
1019 				 * hints, and return
1020 				 */
1021 
1022 				*entry = cur;
1023 				SAVE_HINT(map, map->hint, cur);
1024 				UVMHIST_LOG(maphist,"<- search got it (0x%x)",
1025 					cur, 0, 0, 0);
1026 				return (TRUE);
1027 			}
1028 			break;
1029 		}
1030 		cur = cur->next;
1031 	}
1032 
1033 	*entry = cur->prev;
1034 	SAVE_HINT(map, map->hint, *entry);
1035 	UVMHIST_LOG(maphist,"<- failed!",0,0,0,0);
1036 	return (FALSE);
1037 }
1038 
1039 /*
1040  * Checks if address pointed to be phint fits into the empty
1041  * space before the vm_map_entry after.  Takes aligment and
1042  * offset into consideration.
1043  */
1044 
1045 int
1046 uvm_map_spacefits(vm_map_t map, vaddr_t *phint, vsize_t length,
1047     vm_map_entry_t after, voff_t uoffset, vsize_t align)
1048 {
1049 	vaddr_t hint = *phint;
1050 	vaddr_t end;
1051 
1052 #ifdef PMAP_PREFER
1053 	/*
1054 	 * push hint forward as needed to avoid VAC alias problems.
1055 	 * we only do this if a valid offset is specified.
1056 	 */
1057 	if (uoffset != UVM_UNKNOWN_OFFSET)
1058 		PMAP_PREFER(uoffset, &hint);
1059 #endif
1060 	if (align != 0)
1061 		if ((hint & (align - 1)) != 0)
1062 			hint = roundup(hint, align);
1063 	*phint = hint;
1064 
1065 	end = hint + length;
1066 	if (end > map->max_offset || end < hint)
1067 		return (FALSE);
1068 	if (after != NULL && after != &map->header && after->start < end)
1069 		return (FALSE);
1070 
1071 	return (TRUE);
1072 }
1073 
1074 /*
1075  * uvm_map_hint: return the beginning of the best area suitable for
1076  * creating a new mapping with "prot" protection.
1077  */
1078 vaddr_t
1079 uvm_map_hint(struct proc *p, vm_prot_t prot)
1080 {
1081 #ifdef __i386__
1082 	/*
1083 	 * If executable skip first two pages, otherwise start
1084 	 * after data + heap region.
1085 	 */
1086 	if ((prot & VM_PROT_EXECUTE) &&
1087 	    ((vaddr_t)p->p_vmspace->vm_daddr >= I386_MAX_EXE_ADDR))
1088 		return (round_page(PAGE_SIZE*2));
1089 #endif
1090 	return (round_page((vaddr_t)p->p_vmspace->vm_daddr + MAXDSIZ));
1091 }
1092 
1093 /*
1094  * uvm_map_findspace: find "length" sized space in "map".
1095  *
1096  * => "hint" is a hint about where we want it, unless FINDSPACE_FIXED is
1097  *	set (in which case we insist on using "hint").
1098  * => "result" is VA returned
1099  * => uobj/uoffset are to be used to handle VAC alignment, if required
1100  * => if `align' is non-zero, we attempt to align to that value.
1101  * => caller must at least have read-locked map
1102  * => returns NULL on failure, or pointer to prev. map entry if success
1103  * => note this is a cross between the old vm_map_findspace and vm_map_find
1104  */
1105 
1106 vm_map_entry_t
1107 uvm_map_findspace(map, hint, length, result, uobj, uoffset, align, flags)
1108 	vm_map_t map;
1109 	vaddr_t hint;
1110 	vsize_t length;
1111 	vaddr_t *result; /* OUT */
1112 	struct uvm_object *uobj;
1113 	voff_t uoffset;
1114 	vsize_t align;
1115 	int flags;
1116 {
1117 	vm_map_entry_t entry, next, tmp;
1118 	vm_map_entry_t child, prev = NULL;
1119 
1120 	vaddr_t end, orig_hint;
1121 	UVMHIST_FUNC("uvm_map_findspace");
1122 	UVMHIST_CALLED(maphist);
1123 
1124 	UVMHIST_LOG(maphist, "(map=0x%x, hint=0x%x, len=%d, flags=0x%x)",
1125 		    map, hint, length, flags);
1126 	KASSERT((align & (align - 1)) == 0);
1127 	KASSERT((flags & UVM_FLAG_FIXED) == 0 || align == 0);
1128 
1129 	uvm_tree_sanity(map, "map_findspace entry");
1130 
1131 	/*
1132 	 * remember the original hint.  if we are aligning, then we
1133 	 * may have to try again with no alignment constraint if
1134 	 * we fail the first time.
1135 	 */
1136 
1137 	orig_hint = hint;
1138 	if (hint < map->min_offset) {	/* check ranges ... */
1139 		if (flags & UVM_FLAG_FIXED) {
1140 			UVMHIST_LOG(maphist,"<- VA below map range",0,0,0,0);
1141 			return(NULL);
1142 		}
1143 		hint = map->min_offset;
1144 	}
1145 	if (hint > map->max_offset) {
1146 		UVMHIST_LOG(maphist,"<- VA 0x%x > range [0x%x->0x%x]",
1147 				hint, map->min_offset, map->max_offset, 0);
1148 		return(NULL);
1149 	}
1150 
1151 	/*
1152 	 * Look for the first possible address; if there's already
1153 	 * something at this address, we have to start after it.
1154 	 */
1155 
1156 	if ((flags & UVM_FLAG_FIXED) == 0 && hint == map->min_offset) {
1157 		if ((entry = map->first_free) != &map->header)
1158 			hint = entry->end;
1159 	} else {
1160 		if (uvm_map_lookup_entry(map, hint, &tmp)) {
1161 			/* "hint" address already in use ... */
1162 			if (flags & UVM_FLAG_FIXED) {
1163 				UVMHIST_LOG(maphist,"<- fixed & VA in use",
1164 				    0, 0, 0, 0);
1165 				return(NULL);
1166 			}
1167 			hint = tmp->end;
1168 		}
1169 		entry = tmp;
1170 	}
1171 
1172 	if (flags & UVM_FLAG_FIXED) {
1173 		end = hint + length;
1174 		if (end > map->max_offset || end < hint) {
1175 			UVMHIST_LOG(maphist,"<- failed (off end)", 0,0,0,0);
1176 			goto error;
1177 		}
1178 		next = entry->next;
1179 		if (next == &map->header || next->start >= end)
1180 			goto found;
1181 		UVMHIST_LOG(maphist,"<- fixed mapping failed", 0,0,0,0);
1182 		return(NULL); /* only one shot at it ... */
1183 	}
1184 
1185 	/* Try to find the space in the red-black tree */
1186 
1187 	/* Check slot before any entry */
1188 	if (uvm_map_spacefits(map, &hint, length, entry->next, uoffset, align))
1189 		goto found;
1190 
1191 	/* If there is not enough space in the whole tree, we fail */
1192 	tmp = RB_ROOT(&map->rbhead);
1193 	if (tmp == NULL || tmp->space < length)
1194 		goto error;
1195 
1196 	/* Find an entry close to hint that has enough space */
1197 	for (; tmp;) {
1198 		if (tmp->end >= hint &&
1199 		    (prev == NULL || tmp->end < prev->end)) {
1200 			if (tmp->ownspace >= length)
1201 				prev = tmp;
1202 			else if ((child = RB_RIGHT(tmp, rb_entry)) != NULL &&
1203 			    child->space >= length)
1204 				prev = tmp;
1205 		}
1206 		if (tmp->end < hint)
1207 			child = RB_RIGHT(tmp, rb_entry);
1208 		else if (tmp->end > hint)
1209 			child = RB_LEFT(tmp, rb_entry);
1210 		else {
1211 			if (tmp->ownspace >= length)
1212 				break;
1213 			child = RB_RIGHT(tmp, rb_entry);
1214 		}
1215 		if (child == NULL || child->space < length)
1216 			break;
1217 		tmp = child;
1218 	}
1219 
1220 	if (tmp != NULL && hint < tmp->end + tmp->ownspace) {
1221 		/*
1222 		 * Check if the entry that we found satifies the
1223 		 * space requirement
1224 		 */
1225 		if (hint < tmp->end)
1226 			hint = tmp->end;
1227 		if (uvm_map_spacefits(map, &hint, length, tmp->next, uoffset,
1228 			align)) {
1229 			entry = tmp;
1230 			goto found;
1231 		} else if (tmp->ownspace >= length)
1232 			goto listsearch;
1233 	}
1234 	if (prev == NULL)
1235 		goto error;
1236 
1237 	hint = prev->end;
1238 	if (uvm_map_spacefits(map, &hint, length, prev->next, uoffset,
1239 		align)) {
1240 		entry = prev;
1241 		goto found;
1242 	} else if (prev->ownspace >= length)
1243 		goto listsearch;
1244 
1245 	tmp = RB_RIGHT(prev, rb_entry);
1246 	for (;;) {
1247 		KASSERT(tmp && tmp->space >= length);
1248 		child = RB_LEFT(tmp, rb_entry);
1249 		if (child && child->space >= length) {
1250 			tmp = child;
1251 			continue;
1252 		}
1253 		if (tmp->ownspace >= length)
1254 			break;
1255 		tmp = RB_RIGHT(tmp, rb_entry);
1256 	}
1257 
1258 	hint = tmp->end;
1259 	if (uvm_map_spacefits(map, &hint, length, tmp->next, uoffset, align)) {
1260 		entry = tmp;
1261 		goto found;
1262 	}
1263 
1264 	/*
1265 	 * The tree fails to find an entry because of offset or alignment
1266 	 * restrictions.  Search the list instead.
1267 	 */
1268  listsearch:
1269 	/*
1270 	 * Look through the rest of the map, trying to fit a new region in
1271 	 * the gap between existing regions, or after the very last region.
1272 	 * note: entry->end   = base VA of current gap,
1273 	 *	 next->start  = VA of end of current gap
1274 	 */
1275 	for (;; hint = (entry = next)->end) {
1276 		/*
1277 		 * Find the end of the proposed new region.  Be sure we didn't
1278 		 * go beyond the end of the map, or wrap around the address;
1279 		 * if so, we lose.  Otherwise, if this is the last entry, or
1280 		 * if the proposed new region fits before the next entry, we
1281 		 * win.
1282 		 */
1283 
1284 #ifdef PMAP_PREFER
1285 		/*
1286 		 * push hint forward as needed to avoid VAC alias problems.
1287 		 * we only do this if a valid offset is specified.
1288 		 */
1289 		if (uoffset != UVM_UNKNOWN_OFFSET)
1290 			PMAP_PREFER(uoffset, &hint);
1291 #endif
1292 		if (align != 0) {
1293 			if ((hint & (align - 1)) != 0)
1294 				hint = roundup(hint, align);
1295 			/*
1296 			 * XXX Should we PMAP_PREFER() here again?
1297 			 */
1298 		}
1299 		end = hint + length;
1300 		if (end > map->max_offset || end < hint) {
1301 			UVMHIST_LOG(maphist,"<- failed (off end)", 0,0,0,0);
1302 			goto error;
1303 		}
1304 		next = entry->next;
1305 		if (next == &map->header || next->start >= end)
1306 			break;
1307 	}
1308  found:
1309 	SAVE_HINT(map, map->hint, entry);
1310 	*result = hint;
1311 	UVMHIST_LOG(maphist,"<- got it!  (result=0x%x)", hint, 0,0,0);
1312 	return (entry);
1313 
1314  error:
1315 	if (align != 0) {
1316 		UVMHIST_LOG(maphist,
1317 		    "calling recursively, no align",
1318 		    0,0,0,0);
1319 		return (uvm_map_findspace(map, orig_hint,
1320 			    length, result, uobj, uoffset, 0, flags));
1321 	}
1322 	return (NULL);
1323 }
1324 
1325 /*
1326  *   U N M A P   -   m a i n   h e l p e r   f u n c t i o n s
1327  */
1328 
1329 /*
1330  * uvm_unmap_remove: remove mappings from a vm_map (from "start" up to "stop")
1331  *
1332  * => caller must check alignment and size
1333  * => map must be locked by caller
1334  * => we return a list of map entries that we've remove from the map
1335  *    in "entry_list"
1336  */
1337 
1338 void
1339 uvm_unmap_remove(map, start, end, entry_list)
1340 	vm_map_t map;
1341 	vaddr_t start,end;
1342 	vm_map_entry_t *entry_list;	/* OUT */
1343 {
1344 	vm_map_entry_t entry, first_entry, next;
1345 	vaddr_t len;
1346 	UVMHIST_FUNC("uvm_unmap_remove");
1347 	UVMHIST_CALLED(maphist);
1348 
1349 	UVMHIST_LOG(maphist,"(map=0x%x, start=0x%x, end=0x%x)",
1350 	    map, start, end, 0);
1351 
1352 	VM_MAP_RANGE_CHECK(map, start, end);
1353 
1354 	uvm_tree_sanity(map, "unmap_remove entry");
1355 
1356 	/*
1357 	 * find first entry
1358 	 */
1359 	if (uvm_map_lookup_entry(map, start, &first_entry) == TRUE) {
1360 		/* clip and go... */
1361 		entry = first_entry;
1362 		UVM_MAP_CLIP_START(map, entry, start);
1363 		/* critical!  prevents stale hint */
1364 		SAVE_HINT(map, entry, entry->prev);
1365 
1366 	} else {
1367 		entry = first_entry->next;
1368 	}
1369 
1370 	/*
1371 	 * Save the free space hint
1372 	 */
1373 
1374 	if (map->first_free->start >= start)
1375 		map->first_free = entry->prev;
1376 
1377 	/*
1378 	 * note: we now re-use first_entry for a different task.  we remove
1379 	 * a number of map entries from the map and save them in a linked
1380 	 * list headed by "first_entry".  once we remove them from the map
1381 	 * the caller should unlock the map and drop the references to the
1382 	 * backing objects [c.f. uvm_unmap_detach].  the object is to
1383 	 * separate unmapping from reference dropping.  why?
1384 	 *   [1] the map has to be locked for unmapping
1385 	 *   [2] the map need not be locked for reference dropping
1386 	 *   [3] dropping references may trigger pager I/O, and if we hit
1387 	 *       a pager that does synchronous I/O we may have to wait for it.
1388 	 *   [4] we would like all waiting for I/O to occur with maps unlocked
1389 	 *       so that we don't block other threads.
1390 	 */
1391 	first_entry = NULL;
1392 	*entry_list = NULL;		/* to be safe */
1393 
1394 	/*
1395 	 * break up the area into map entry sized regions and unmap.  note
1396 	 * that all mappings have to be removed before we can even consider
1397 	 * dropping references to amaps or VM objects (otherwise we could end
1398 	 * up with a mapping to a page on the free list which would be very bad)
1399 	 */
1400 
1401 	while ((entry != &map->header) && (entry->start < end)) {
1402 
1403 		UVM_MAP_CLIP_END(map, entry, end);
1404 		next = entry->next;
1405 		len = entry->end - entry->start;
1406 
1407 		/*
1408 		 * unwire before removing addresses from the pmap; otherwise
1409 		 * unwiring will put the entries back into the pmap (XXX).
1410 		 */
1411 
1412 		if (VM_MAPENT_ISWIRED(entry))
1413 			uvm_map_entry_unwire(map, entry);
1414 
1415 		/*
1416 		 * special case: handle mappings to anonymous kernel objects.
1417 		 * we want to free these pages right away...
1418 		 */
1419 		if (UVM_ET_ISOBJ(entry) &&
1420 		    UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) {
1421 			KASSERT(vm_map_pmap(map) == pmap_kernel());
1422 
1423 			/*
1424 			 * note: kernel object mappings are currently used in
1425 			 * two ways:
1426 			 *  [1] "normal" mappings of pages in the kernel object
1427 			 *  [2] uvm_km_valloc'd allocations in which we
1428 			 *      pmap_enter in some non-kernel-object page
1429 			 *      (e.g. vmapbuf).
1430 			 *
1431 			 * for case [1], we need to remove the mapping from
1432 			 * the pmap and then remove the page from the kernel
1433 			 * object (because, once pages in a kernel object are
1434 			 * unmapped they are no longer needed, unlike, say,
1435 			 * a vnode where you might want the data to persist
1436 			 * until flushed out of a queue).
1437 			 *
1438 			 * for case [2], we need to remove the mapping from
1439 			 * the pmap.  there shouldn't be any pages at the
1440 			 * specified offset in the kernel object [but it
1441 			 * doesn't hurt to call uvm_km_pgremove just to be
1442 			 * safe?]
1443 			 *
1444 			 * uvm_km_pgremove currently does the following:
1445 			 *   for pages in the kernel object in range:
1446 			 *     - drops the swap slot
1447 			 *     - uvm_pagefree the page
1448 			 *
1449 			 * note there is version of uvm_km_pgremove() that
1450 			 * is used for "intrsafe" objects.
1451 			 */
1452 
1453 			/*
1454 			 * remove mappings from pmap and drop the pages
1455 			 * from the object.  offsets are always relative
1456 			 * to vm_map_min(kernel_map).
1457 			 */
1458 			if (UVM_OBJ_IS_INTRSAFE_OBJECT(entry->object.uvm_obj)) {
1459 				pmap_kremove(entry->start, len);
1460 				uvm_km_pgremove_intrsafe(entry->object.uvm_obj,
1461 				    entry->start - vm_map_min(kernel_map),
1462 				    entry->end - vm_map_min(kernel_map));
1463 			} else {
1464 				pmap_remove(pmap_kernel(), entry->start,
1465 				    entry->end);
1466 				uvm_km_pgremove(entry->object.uvm_obj,
1467 				    entry->start - vm_map_min(kernel_map),
1468 				    entry->end - vm_map_min(kernel_map));
1469 			}
1470 
1471 			/*
1472 			 * null out kernel_object reference, we've just
1473 			 * dropped it
1474 			 */
1475 			entry->etype &= ~UVM_ET_OBJ;
1476 			entry->object.uvm_obj = NULL;	/* to be safe */
1477 
1478 		} else {
1479 			/*
1480 		 	 * remove mappings the standard way.
1481 		 	 */
1482 			pmap_remove(map->pmap, entry->start, entry->end);
1483 		}
1484 
1485 		/*
1486 		 * remove entry from map and put it on our list of entries
1487 		 * that we've nuked.  then go do next entry.
1488 		 */
1489 		UVMHIST_LOG(maphist, "  removed map entry 0x%x", entry, 0, 0,0);
1490 
1491 		/* critical! prevents stale hint */
1492 		SAVE_HINT(map, entry, entry->prev);
1493 
1494 		uvm_map_entry_unlink(map, entry);
1495 		map->size -= len;
1496 		entry->next = first_entry;
1497 		first_entry = entry;
1498 		entry = next;		/* next entry, please */
1499 	}
1500 
1501 	uvm_tree_sanity(map, "unmap_remove leave");
1502 
1503 	/*
1504 	 * now we've cleaned up the map and are ready for the caller to drop
1505 	 * references to the mapped objects.
1506 	 */
1507 
1508 	*entry_list = first_entry;
1509 	UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
1510 }
1511 
1512 /*
1513  * uvm_unmap_detach: drop references in a chain of map entries
1514  *
1515  * => we will free the map entries as we traverse the list.
1516  */
1517 
1518 void
1519 uvm_unmap_detach(first_entry, flags)
1520 	vm_map_entry_t first_entry;
1521 	int flags;
1522 {
1523 	vm_map_entry_t next_entry;
1524 	UVMHIST_FUNC("uvm_unmap_detach"); UVMHIST_CALLED(maphist);
1525 
1526 	while (first_entry) {
1527 		KASSERT(!VM_MAPENT_ISWIRED(first_entry));
1528 		UVMHIST_LOG(maphist,
1529 		    "  detach 0x%x: amap=0x%x, obj=0x%x, submap?=%d",
1530 		    first_entry, first_entry->aref.ar_amap,
1531 		    first_entry->object.uvm_obj,
1532 		    UVM_ET_ISSUBMAP(first_entry));
1533 
1534 		/*
1535 		 * drop reference to amap, if we've got one
1536 		 */
1537 
1538 		if (first_entry->aref.ar_amap)
1539 			uvm_map_unreference_amap(first_entry, flags);
1540 
1541 		/*
1542 		 * drop reference to our backing object, if we've got one
1543 		 */
1544 
1545 		if (UVM_ET_ISSUBMAP(first_entry)) {
1546 			/* ... unlikely to happen, but play it safe */
1547 			uvm_map_deallocate(first_entry->object.sub_map);
1548 		} else {
1549 			if (UVM_ET_ISOBJ(first_entry) &&
1550 			    first_entry->object.uvm_obj->pgops->pgo_detach)
1551 				first_entry->object.uvm_obj->pgops->
1552 				    pgo_detach(first_entry->object.uvm_obj);
1553 		}
1554 
1555 		next_entry = first_entry->next;
1556 		uvm_mapent_free(first_entry);
1557 		first_entry = next_entry;
1558 	}
1559 	UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
1560 }
1561 
1562 /*
1563  *   E X T R A C T I O N   F U N C T I O N S
1564  */
1565 
1566 /*
1567  * uvm_map_reserve: reserve space in a vm_map for future use.
1568  *
1569  * => we reserve space in a map by putting a dummy map entry in the
1570  *    map (dummy means obj=NULL, amap=NULL, prot=VM_PROT_NONE)
1571  * => map should be unlocked (we will write lock it)
1572  * => we return true if we were able to reserve space
1573  * => XXXCDC: should be inline?
1574  */
1575 
1576 int
1577 uvm_map_reserve(map, size, offset, align, raddr)
1578 	vm_map_t map;
1579 	vsize_t size;
1580 	vaddr_t offset;	/* hint for pmap_prefer */
1581 	vsize_t align;	/* alignment hint */
1582 	vaddr_t *raddr;	/* IN:hint, OUT: reserved VA */
1583 {
1584 	UVMHIST_FUNC("uvm_map_reserve"); UVMHIST_CALLED(maphist);
1585 
1586 	UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x, offset=0x%x,addr=0x%x)",
1587 	      map,size,offset,raddr);
1588 
1589 	size = round_page(size);
1590 	if (*raddr < vm_map_min(map))
1591 		*raddr = vm_map_min(map);                /* hint */
1592 
1593 	/*
1594 	 * reserve some virtual space.
1595 	 */
1596 
1597 	if (uvm_map(map, raddr, size, NULL, offset, 0,
1598 	    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
1599 	    UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != KERN_SUCCESS) {
1600 	    UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
1601 		return (FALSE);
1602 	}
1603 
1604 	UVMHIST_LOG(maphist, "<- done (*raddr=0x%x)", *raddr,0,0,0);
1605 	return (TRUE);
1606 }
1607 
1608 /*
1609  * uvm_map_replace: replace a reserved (blank) area of memory with
1610  * real mappings.
1611  *
1612  * => caller must WRITE-LOCK the map
1613  * => we return TRUE if replacement was a success
1614  * => we expect the newents chain to have nnewents entrys on it and
1615  *    we expect newents->prev to point to the last entry on the list
1616  * => note newents is allowed to be NULL
1617  */
1618 
1619 int
1620 uvm_map_replace(map, start, end, newents, nnewents)
1621 	struct vm_map *map;
1622 	vaddr_t start, end;
1623 	vm_map_entry_t newents;
1624 	int nnewents;
1625 {
1626 	vm_map_entry_t oldent, last;
1627 
1628 	uvm_tree_sanity(map, "map_replace entry");
1629 
1630 	/*
1631 	 * first find the blank map entry at the specified address
1632 	 */
1633 
1634 	if (!uvm_map_lookup_entry(map, start, &oldent)) {
1635 		return(FALSE);
1636 	}
1637 
1638 	/*
1639 	 * check to make sure we have a proper blank entry
1640 	 */
1641 
1642 	if (oldent->start != start || oldent->end != end ||
1643 	    oldent->object.uvm_obj != NULL || oldent->aref.ar_amap != NULL) {
1644 		return (FALSE);
1645 	}
1646 
1647 #ifdef DIAGNOSTIC
1648 	/*
1649 	 * sanity check the newents chain
1650 	 */
1651 	{
1652 		vm_map_entry_t tmpent = newents;
1653 		int nent = 0;
1654 		vaddr_t cur = start;
1655 
1656 		while (tmpent) {
1657 			nent++;
1658 			if (tmpent->start < cur)
1659 				panic("uvm_map_replace1");
1660 			if (tmpent->start > tmpent->end || tmpent->end > end) {
1661 		printf("tmpent->start=0x%lx, tmpent->end=0x%lx, end=0x%lx\n",
1662 			    tmpent->start, tmpent->end, end);
1663 				panic("uvm_map_replace2");
1664 			}
1665 			cur = tmpent->end;
1666 			if (tmpent->next) {
1667 				if (tmpent->next->prev != tmpent)
1668 					panic("uvm_map_replace3");
1669 			} else {
1670 				if (newents->prev != tmpent)
1671 					panic("uvm_map_replace4");
1672 			}
1673 			tmpent = tmpent->next;
1674 		}
1675 		if (nent != nnewents)
1676 			panic("uvm_map_replace5");
1677 	}
1678 #endif
1679 
1680 	/*
1681 	 * map entry is a valid blank!   replace it.   (this does all the
1682 	 * work of map entry link/unlink...).
1683 	 */
1684 
1685 	if (newents) {
1686 		last = newents->prev;		/* we expect this */
1687 
1688 		/* critical: flush stale hints out of map */
1689 		SAVE_HINT(map, map->hint, newents);
1690 		if (map->first_free == oldent)
1691 			map->first_free = last;
1692 
1693 		last->next = oldent->next;
1694 		last->next->prev = last;
1695 
1696 		/* Fix RB tree */
1697 		uvm_rb_remove(map, oldent);
1698 
1699 		newents->prev = oldent->prev;
1700 		newents->prev->next = newents;
1701 		map->nentries = map->nentries + (nnewents - 1);
1702 
1703 		/* Fixup the RB tree */
1704 		{
1705 			int i;
1706 			vm_map_entry_t tmp;
1707 
1708 			tmp = newents;
1709 			for (i = 0; i < nnewents && tmp; i++) {
1710 				uvm_rb_insert(map, tmp);
1711 				tmp = tmp->next;
1712 			}
1713 		}
1714 	} else {
1715 
1716 		/* critical: flush stale hints out of map */
1717 		SAVE_HINT(map, map->hint, oldent->prev);
1718 		if (map->first_free == oldent)
1719 			map->first_free = oldent->prev;
1720 
1721 		/* NULL list of new entries: just remove the old one */
1722 		uvm_map_entry_unlink(map, oldent);
1723 	}
1724 
1725 
1726 	uvm_tree_sanity(map, "map_replace leave");
1727 
1728 	/*
1729 	 * now we can free the old blank entry, unlock the map and return.
1730 	 */
1731 
1732 	uvm_mapent_free(oldent);
1733 	return(TRUE);
1734 }
1735 
1736 /*
1737  * uvm_map_extract: extract a mapping from a map and put it somewhere
1738  *	(maybe removing the old mapping)
1739  *
1740  * => maps should be unlocked (we will write lock them)
1741  * => returns 0 on success, error code otherwise
1742  * => start must be page aligned
1743  * => len must be page sized
1744  * => flags:
1745  *      UVM_EXTRACT_REMOVE: remove mappings from srcmap
1746  *      UVM_EXTRACT_CONTIG: abort if unmapped area (advisory only)
1747  *      UVM_EXTRACT_QREF: for a temporary extraction do quick obj refs
1748  *      UVM_EXTRACT_FIXPROT: set prot to maxprot as we go
1749  *    >>>NOTE: if you set REMOVE, you are not allowed to use CONTIG or QREF!<<<
1750  *    >>>NOTE: QREF's must be unmapped via the QREF path, thus should only
1751  *             be used from within the kernel in a kernel level map <<<
1752  */
1753 
1754 int
1755 uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
1756 	vm_map_t srcmap, dstmap;
1757 	vaddr_t start, *dstaddrp;
1758 	vsize_t len;
1759 	int flags;
1760 {
1761 	vaddr_t dstaddr, end, newend, oldoffset, fudge, orig_fudge,
1762 	    oldstart;
1763 	vm_map_entry_t chain, endchain, entry, orig_entry, newentry, deadentry;
1764 	vm_map_entry_t oldentry;
1765 	vsize_t elen;
1766 	int nchain, error, copy_ok;
1767 	UVMHIST_FUNC("uvm_map_extract"); UVMHIST_CALLED(maphist);
1768 
1769 	UVMHIST_LOG(maphist,"(srcmap=0x%x,start=0x%x, len=0x%x", srcmap, start,
1770 	    len,0);
1771 	UVMHIST_LOG(maphist," ...,dstmap=0x%x, flags=0x%x)", dstmap,flags,0,0);
1772 
1773 	uvm_tree_sanity(srcmap, "map_extract src enter");
1774 	uvm_tree_sanity(dstmap, "map_extract dst enter");
1775 
1776 	/*
1777 	 * step 0: sanity check: start must be on a page boundary, length
1778 	 * must be page sized.  can't ask for CONTIG/QREF if you asked for
1779 	 * REMOVE.
1780 	 */
1781 
1782 	KASSERT((start & PAGE_MASK) == 0 && (len & PAGE_MASK) == 0);
1783 	KASSERT((flags & UVM_EXTRACT_REMOVE) == 0 ||
1784 		(flags & (UVM_EXTRACT_CONTIG|UVM_EXTRACT_QREF)) == 0);
1785 
1786 	/*
1787 	 * step 1: reserve space in the target map for the extracted area
1788 	 */
1789 
1790 	dstaddr = vm_map_min(dstmap);
1791 	if (uvm_map_reserve(dstmap, len, start, 0, &dstaddr) == FALSE)
1792 		return(ENOMEM);
1793 	*dstaddrp = dstaddr;	/* pass address back to caller */
1794 	UVMHIST_LOG(maphist, "  dstaddr=0x%x", dstaddr,0,0,0);
1795 
1796 	/*
1797 	 * step 2: setup for the extraction process loop by init'ing the
1798 	 * map entry chain, locking src map, and looking up the first useful
1799 	 * entry in the map.
1800 	 */
1801 
1802 	end = start + len;
1803 	newend = dstaddr + len;
1804 	chain = endchain = NULL;
1805 	nchain = 0;
1806 	vm_map_lock(srcmap);
1807 
1808 	if (uvm_map_lookup_entry(srcmap, start, &entry)) {
1809 
1810 		/* "start" is within an entry */
1811 		if (flags & UVM_EXTRACT_QREF) {
1812 
1813 			/*
1814 			 * for quick references we don't clip the entry, so
1815 			 * the entry may map space "before" the starting
1816 			 * virtual address... this is the "fudge" factor
1817 			 * (which can be non-zero only the first time
1818 			 * through the "while" loop in step 3).
1819 			 */
1820 
1821 			fudge = start - entry->start;
1822 		} else {
1823 
1824 			/*
1825 			 * normal reference: we clip the map to fit (thus
1826 			 * fudge is zero)
1827 			 */
1828 
1829 			UVM_MAP_CLIP_START(srcmap, entry, start);
1830 			SAVE_HINT(srcmap, srcmap->hint, entry->prev);
1831 			fudge = 0;
1832 		}
1833 	} else {
1834 
1835 		/* "start" is not within an entry ... skip to next entry */
1836 		if (flags & UVM_EXTRACT_CONTIG) {
1837 			error = EINVAL;
1838 			goto bad;    /* definite hole here ... */
1839 		}
1840 
1841 		entry = entry->next;
1842 		fudge = 0;
1843 	}
1844 
1845 	/* save values from srcmap for step 6 */
1846 	orig_entry = entry;
1847 	orig_fudge = fudge;
1848 
1849 	/*
1850 	 * step 3: now start looping through the map entries, extracting
1851 	 * as we go.
1852 	 */
1853 
1854 	while (entry->start < end && entry != &srcmap->header) {
1855 
1856 		/* if we are not doing a quick reference, clip it */
1857 		if ((flags & UVM_EXTRACT_QREF) == 0)
1858 			UVM_MAP_CLIP_END(srcmap, entry, end);
1859 
1860 		/* clear needs_copy (allow chunking) */
1861 		if (UVM_ET_ISNEEDSCOPY(entry)) {
1862 			if (fudge)
1863 				oldstart = entry->start;
1864 			else
1865 				oldstart = 0;	/* XXX: gcc */
1866 			amap_copy(srcmap, entry, M_NOWAIT, TRUE, start, end);
1867 			if (UVM_ET_ISNEEDSCOPY(entry)) {  /* failed? */
1868 				error = ENOMEM;
1869 				goto bad;
1870 			}
1871 
1872 			/* amap_copy could clip (during chunk)!  update fudge */
1873 			if (fudge) {
1874 				fudge = fudge - (entry->start - oldstart);
1875 				orig_fudge = fudge;
1876 			}
1877 		}
1878 
1879 		/* calculate the offset of this from "start" */
1880 		oldoffset = (entry->start + fudge) - start;
1881 
1882 		/* allocate a new map entry */
1883 		newentry = uvm_mapent_alloc(dstmap);
1884 		if (newentry == NULL) {
1885 			error = ENOMEM;
1886 			goto bad;
1887 		}
1888 
1889 		/* set up new map entry */
1890 		newentry->next = NULL;
1891 		newentry->prev = endchain;
1892 		newentry->start = dstaddr + oldoffset;
1893 		newentry->end =
1894 		    newentry->start + (entry->end - (entry->start + fudge));
1895 		if (newentry->end > newend || newentry->end < newentry->start)
1896 			newentry->end = newend;
1897 		newentry->object.uvm_obj = entry->object.uvm_obj;
1898 		if (newentry->object.uvm_obj) {
1899 			if (newentry->object.uvm_obj->pgops->pgo_reference)
1900 				newentry->object.uvm_obj->pgops->
1901 				    pgo_reference(newentry->object.uvm_obj);
1902 				newentry->offset = entry->offset + fudge;
1903 		} else {
1904 			newentry->offset = 0;
1905 		}
1906 		newentry->etype = entry->etype;
1907 		newentry->protection = (flags & UVM_EXTRACT_FIXPROT) ?
1908 			entry->max_protection : entry->protection;
1909 		newentry->max_protection = entry->max_protection;
1910 		newentry->inheritance = entry->inheritance;
1911 		newentry->wired_count = 0;
1912 		newentry->aref.ar_amap = entry->aref.ar_amap;
1913 		if (newentry->aref.ar_amap) {
1914 			newentry->aref.ar_pageoff =
1915 			    entry->aref.ar_pageoff + (fudge >> PAGE_SHIFT);
1916 			uvm_map_reference_amap(newentry, AMAP_SHARED |
1917 			    ((flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0));
1918 		} else {
1919 			newentry->aref.ar_pageoff = 0;
1920 		}
1921 		newentry->advice = entry->advice;
1922 
1923 		/* now link it on the chain */
1924 		nchain++;
1925 		if (endchain == NULL) {
1926 			chain = endchain = newentry;
1927 		} else {
1928 			endchain->next = newentry;
1929 			endchain = newentry;
1930 		}
1931 
1932 		/* end of 'while' loop! */
1933 		if ((flags & UVM_EXTRACT_CONTIG) && entry->end < end &&
1934 		    (entry->next == &srcmap->header ||
1935 		    entry->next->start != entry->end)) {
1936 			error = EINVAL;
1937 			goto bad;
1938 		}
1939 		entry = entry->next;
1940 		fudge = 0;
1941 	}
1942 
1943 	/*
1944 	 * step 4: close off chain (in format expected by uvm_map_replace)
1945 	 */
1946 
1947 	if (chain)
1948 		chain->prev = endchain;
1949 
1950 	/*
1951 	 * step 5: attempt to lock the dest map so we can pmap_copy.
1952 	 * note usage of copy_ok:
1953 	 *   1 => dstmap locked, pmap_copy ok, and we "replace" here (step 5)
1954 	 *   0 => dstmap unlocked, NO pmap_copy, and we will "replace" in step 7
1955 	 */
1956 
1957 	if (srcmap == dstmap || vm_map_lock_try(dstmap) == TRUE) {
1958 		copy_ok = 1;
1959 		if (!uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
1960 		    nchain)) {
1961 			if (srcmap != dstmap)
1962 				vm_map_unlock(dstmap);
1963 			error = EIO;
1964 			goto bad;
1965 		}
1966 	} else {
1967 		copy_ok = 0;
1968 		/* replace defered until step 7 */
1969 	}
1970 
1971 	/*
1972 	 * step 6: traverse the srcmap a second time to do the following:
1973 	 *  - if we got a lock on the dstmap do pmap_copy
1974 	 *  - if UVM_EXTRACT_REMOVE remove the entries
1975 	 * we make use of orig_entry and orig_fudge (saved in step 2)
1976 	 */
1977 
1978 	if (copy_ok || (flags & UVM_EXTRACT_REMOVE)) {
1979 
1980 		/* purge possible stale hints from srcmap */
1981 		if (flags & UVM_EXTRACT_REMOVE) {
1982 			SAVE_HINT(srcmap, srcmap->hint, orig_entry->prev);
1983 			if (srcmap->first_free->start >= start)
1984 				srcmap->first_free = orig_entry->prev;
1985 		}
1986 
1987 		entry = orig_entry;
1988 		fudge = orig_fudge;
1989 		deadentry = NULL;	/* for UVM_EXTRACT_REMOVE */
1990 
1991 		while (entry->start < end && entry != &srcmap->header) {
1992 			if (copy_ok) {
1993 				oldoffset = (entry->start + fudge) - start;
1994 				elen = MIN(end, entry->end) -
1995 				    (entry->start + fudge);
1996 				pmap_copy(dstmap->pmap, srcmap->pmap,
1997 				    dstaddr + oldoffset, elen,
1998 				    entry->start + fudge);
1999 			}
2000 
2001 			/* we advance "entry" in the following if statement */
2002 			if (flags & UVM_EXTRACT_REMOVE) {
2003 				pmap_remove(srcmap->pmap, entry->start,
2004 						entry->end);
2005         			oldentry = entry;	/* save entry */
2006         			entry = entry->next;	/* advance */
2007 				uvm_map_entry_unlink(srcmap, oldentry);
2008 							/* add to dead list */
2009 				oldentry->next = deadentry;
2010 				deadentry = oldentry;
2011       			} else {
2012         			entry = entry->next;		/* advance */
2013 			}
2014 
2015 			/* end of 'while' loop */
2016 			fudge = 0;
2017 		}
2018 
2019 		/*
2020 		 * unlock dstmap.  we will dispose of deadentry in
2021 		 * step 7 if needed
2022 		 */
2023 
2024 		if (copy_ok && srcmap != dstmap)
2025 			vm_map_unlock(dstmap);
2026 
2027 	}
2028 	else
2029 		deadentry = NULL; /* XXX: gcc */
2030 
2031 	/*
2032 	 * step 7: we are done with the source map, unlock.   if copy_ok
2033 	 * is 0 then we have not replaced the dummy mapping in dstmap yet
2034 	 * and we need to do so now.
2035 	 */
2036 
2037 	vm_map_unlock(srcmap);
2038 	if ((flags & UVM_EXTRACT_REMOVE) && deadentry)
2039 		uvm_unmap_detach(deadentry, 0);   /* dispose of old entries */
2040 
2041 	/* now do the replacement if we didn't do it in step 5 */
2042 	if (copy_ok == 0) {
2043 		vm_map_lock(dstmap);
2044 		error = uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
2045 		    nchain);
2046 		vm_map_unlock(dstmap);
2047 
2048 		if (error == FALSE) {
2049 			error = EIO;
2050 			goto bad2;
2051 		}
2052 	}
2053 
2054 	uvm_tree_sanity(srcmap, "map_extract src leave");
2055 	uvm_tree_sanity(dstmap, "map_extract dst leave");
2056 
2057 	return(0);
2058 
2059 	/*
2060 	 * bad: failure recovery
2061 	 */
2062 bad:
2063 	vm_map_unlock(srcmap);
2064 bad2:			/* src already unlocked */
2065 	if (chain)
2066 		uvm_unmap_detach(chain,
2067 		    (flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0);
2068 
2069 	uvm_tree_sanity(srcmap, "map_extract src err leave");
2070 	uvm_tree_sanity(dstmap, "map_extract dst err leave");
2071 
2072 	uvm_unmap(dstmap, dstaddr, dstaddr+len);   /* ??? */
2073 	return(error);
2074 }
2075 
2076 /* end of extraction functions */
2077 
2078 /*
2079  * uvm_map_submap: punch down part of a map into a submap
2080  *
2081  * => only the kernel_map is allowed to be submapped
2082  * => the purpose of submapping is to break up the locking granularity
2083  *	of a larger map
2084  * => the range specified must have been mapped previously with a uvm_map()
2085  *	call [with uobj==NULL] to create a blank map entry in the main map.
2086  *	[And it had better still be blank!]
2087  * => maps which contain submaps should never be copied or forked.
2088  * => to remove a submap, use uvm_unmap() on the main map
2089  *	and then uvm_map_deallocate() the submap.
2090  * => main map must be unlocked.
2091  * => submap must have been init'd and have a zero reference count.
2092  *	[need not be locked as we don't actually reference it]
2093  */
2094 
2095 int
2096 uvm_map_submap(map, start, end, submap)
2097 	vm_map_t map, submap;
2098 	vaddr_t start, end;
2099 {
2100 	vm_map_entry_t entry;
2101 	int result;
2102 
2103 	vm_map_lock(map);
2104 
2105 	VM_MAP_RANGE_CHECK(map, start, end);
2106 
2107 	if (uvm_map_lookup_entry(map, start, &entry)) {
2108 		UVM_MAP_CLIP_START(map, entry, start);
2109 		UVM_MAP_CLIP_END(map, entry, end);		/* to be safe */
2110 	} else {
2111 		entry = NULL;
2112 	}
2113 
2114 	if (entry != NULL &&
2115 	    entry->start == start && entry->end == end &&
2116 	    entry->object.uvm_obj == NULL && entry->aref.ar_amap == NULL &&
2117 	    !UVM_ET_ISCOPYONWRITE(entry) && !UVM_ET_ISNEEDSCOPY(entry)) {
2118 		entry->etype |= UVM_ET_SUBMAP;
2119 		entry->object.sub_map = submap;
2120 		entry->offset = 0;
2121 		uvm_map_reference(submap);
2122 		result = KERN_SUCCESS;
2123 	} else {
2124 		result = KERN_INVALID_ARGUMENT;
2125 	}
2126 	vm_map_unlock(map);
2127 	return(result);
2128 }
2129 
2130 
2131 /*
2132  * uvm_map_protect: change map protection
2133  *
2134  * => set_max means set max_protection.
2135  * => map must be unlocked.
2136  */
2137 
2138 #define MASK(entry)     (UVM_ET_ISCOPYONWRITE(entry) ? \
2139 			 ~VM_PROT_WRITE : VM_PROT_ALL)
2140 #define max(a,b)        ((a) > (b) ? (a) : (b))
2141 
2142 int
2143 uvm_map_protect(map, start, end, new_prot, set_max)
2144 	vm_map_t map;
2145 	vaddr_t start, end;
2146 	vm_prot_t new_prot;
2147 	boolean_t set_max;
2148 {
2149 	vm_map_entry_t current, entry;
2150 	int rv = KERN_SUCCESS;
2151 	UVMHIST_FUNC("uvm_map_protect"); UVMHIST_CALLED(maphist);
2152 	UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_prot=0x%x)",
2153 		    map, start, end, new_prot);
2154 
2155 	vm_map_lock(map);
2156 
2157 	VM_MAP_RANGE_CHECK(map, start, end);
2158 
2159 	if (uvm_map_lookup_entry(map, start, &entry)) {
2160 		UVM_MAP_CLIP_START(map, entry, start);
2161 	} else {
2162 		entry = entry->next;
2163 	}
2164 
2165 	/*
2166 	 * make a first pass to check for protection violations.
2167 	 */
2168 
2169 	current = entry;
2170 	while ((current != &map->header) && (current->start < end)) {
2171 		if (UVM_ET_ISSUBMAP(current)) {
2172 			rv = KERN_INVALID_ARGUMENT;
2173 			goto out;
2174 		}
2175 		if ((new_prot & current->max_protection) != new_prot) {
2176 			rv = KERN_PROTECTION_FAILURE;
2177 			goto out;
2178 		}
2179 		current = current->next;
2180 	}
2181 
2182 	/* go back and fix up protections (no need to clip this time). */
2183 
2184 	current = entry;
2185 
2186 	while ((current != &map->header) && (current->start < end)) {
2187 		vm_prot_t old_prot;
2188 
2189 		UVM_MAP_CLIP_END(map, current, end);
2190 
2191 		old_prot = current->protection;
2192 		if (set_max)
2193 			current->protection =
2194 			    (current->max_protection = new_prot) & old_prot;
2195 		else
2196 			current->protection = new_prot;
2197 
2198 		/*
2199 		 * update physical map if necessary.  worry about copy-on-write
2200 		 * here -- CHECK THIS XXX
2201 		 */
2202 
2203 		if (current->protection != old_prot) {
2204 			/* update pmap! */
2205 			pmap_protect(map->pmap, current->start, current->end,
2206 			    current->protection & MASK(entry));
2207 		}
2208 
2209 		/*
2210 		 * If the map is configured to lock any future mappings,
2211 		 * wire this entry now if the old protection was VM_PROT_NONE
2212 		 * and the new protection is not VM_PROT_NONE.
2213 		 */
2214 
2215 		if ((map->flags & VM_MAP_WIREFUTURE) != 0 &&
2216 		    VM_MAPENT_ISWIRED(entry) == 0 &&
2217 		    old_prot == VM_PROT_NONE &&
2218 		    new_prot != VM_PROT_NONE) {
2219 			if (uvm_map_pageable(map, entry->start,
2220 			    entry->end, FALSE,
2221 			    UVM_LK_ENTER|UVM_LK_EXIT) != KERN_SUCCESS) {
2222 				/*
2223 				 * If locking the entry fails, remember the
2224 				 * error if it's the first one.  Note we
2225 				 * still continue setting the protection in
2226 				 * the map, but will return the resource
2227 				 * shortage condition regardless.
2228 				 *
2229 				 * XXX Ignore what the actual error is,
2230 				 * XXX just call it a resource shortage
2231 				 * XXX so that it doesn't get confused
2232 				 * XXX what uvm_map_protect() itself would
2233 				 * XXX normally return.
2234 				 */
2235 				rv = KERN_RESOURCE_SHORTAGE;
2236 			}
2237 		}
2238 
2239 		current = current->next;
2240 	}
2241 
2242  out:
2243 	vm_map_unlock(map);
2244 	UVMHIST_LOG(maphist, "<- done, rv=%d",rv,0,0,0);
2245 	return (rv);
2246 }
2247 
2248 #undef  max
2249 #undef  MASK
2250 
2251 /*
2252  * uvm_map_inherit: set inheritance code for range of addrs in map.
2253  *
2254  * => map must be unlocked
2255  * => note that the inherit code is used during a "fork".  see fork
2256  *	code for details.
2257  */
2258 
2259 int
2260 uvm_map_inherit(map, start, end, new_inheritance)
2261 	vm_map_t map;
2262 	vaddr_t start;
2263 	vaddr_t end;
2264 	vm_inherit_t new_inheritance;
2265 {
2266 	vm_map_entry_t entry, temp_entry;
2267 	UVMHIST_FUNC("uvm_map_inherit"); UVMHIST_CALLED(maphist);
2268 	UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_inh=0x%x)",
2269 	    map, start, end, new_inheritance);
2270 
2271 	switch (new_inheritance) {
2272 	case MAP_INHERIT_NONE:
2273 	case MAP_INHERIT_COPY:
2274 	case MAP_INHERIT_SHARE:
2275 		break;
2276 	default:
2277 		UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
2278 		return (KERN_INVALID_ARGUMENT);
2279 	}
2280 
2281 	vm_map_lock(map);
2282 
2283 	VM_MAP_RANGE_CHECK(map, start, end);
2284 
2285 	if (uvm_map_lookup_entry(map, start, &temp_entry)) {
2286 		entry = temp_entry;
2287 		UVM_MAP_CLIP_START(map, entry, start);
2288 	} else {
2289 		entry = temp_entry->next;
2290 	}
2291 
2292 	while ((entry != &map->header) && (entry->start < end)) {
2293 		UVM_MAP_CLIP_END(map, entry, end);
2294 		entry->inheritance = new_inheritance;
2295 		entry = entry->next;
2296 	}
2297 
2298 	vm_map_unlock(map);
2299 	UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
2300 	return(KERN_SUCCESS);
2301 }
2302 
2303 /*
2304  * uvm_map_advice: set advice code for range of addrs in map.
2305  *
2306  * => map must be unlocked
2307  */
2308 
2309 int
2310 uvm_map_advice(map, start, end, new_advice)
2311 	vm_map_t map;
2312 	vaddr_t start;
2313 	vaddr_t end;
2314 	int new_advice;
2315 {
2316 	vm_map_entry_t entry, temp_entry;
2317 	UVMHIST_FUNC("uvm_map_advice"); UVMHIST_CALLED(maphist);
2318 	UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_adv=0x%x)",
2319 	    map, start, end, new_advice);
2320 
2321 	vm_map_lock(map);
2322 	VM_MAP_RANGE_CHECK(map, start, end);
2323 	if (uvm_map_lookup_entry(map, start, &temp_entry)) {
2324 		entry = temp_entry;
2325 		UVM_MAP_CLIP_START(map, entry, start);
2326 	} else {
2327 		entry = temp_entry->next;
2328 	}
2329 
2330 	/*
2331 	 * XXXJRT: disallow holes?
2332 	 */
2333 
2334 	while ((entry != &map->header) && (entry->start < end)) {
2335 		UVM_MAP_CLIP_END(map, entry, end);
2336 
2337 		switch (new_advice) {
2338 		case MADV_NORMAL:
2339 		case MADV_RANDOM:
2340 		case MADV_SEQUENTIAL:
2341 			/* nothing special here */
2342 			break;
2343 
2344 		default:
2345 			vm_map_unlock(map);
2346 			UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
2347 			return (KERN_INVALID_ARGUMENT);
2348 		}
2349 		entry->advice = new_advice;
2350 		entry = entry->next;
2351 	}
2352 
2353 	vm_map_unlock(map);
2354 	UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
2355 	return (KERN_SUCCESS);
2356 }
2357 
2358 /*
2359  * uvm_map_pageable: sets the pageability of a range in a map.
2360  *
2361  * => wires map entries.  should not be used for transient page locking.
2362  *	for that, use uvm_fault_wire()/uvm_fault_unwire() (see uvm_vslock()).
2363  * => regions sepcified as not pageable require lock-down (wired) memory
2364  *	and page tables.
2365  * => map must never be read-locked
2366  * => if islocked is TRUE, map is already write-locked
2367  * => we always unlock the map, since we must downgrade to a read-lock
2368  *	to call uvm_fault_wire()
2369  * => XXXCDC: check this and try and clean it up.
2370  */
2371 
2372 int
2373 uvm_map_pageable(map, start, end, new_pageable, lockflags)
2374 	vm_map_t map;
2375 	vaddr_t start, end;
2376 	boolean_t new_pageable;
2377 	int lockflags;
2378 {
2379 	vm_map_entry_t entry, start_entry, failed_entry;
2380 	int rv;
2381 #ifdef DIAGNOSTIC
2382 	u_int timestamp_save;
2383 #endif
2384 	UVMHIST_FUNC("uvm_map_pageable"); UVMHIST_CALLED(maphist);
2385 	UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_pageable=0x%x)",
2386 		    map, start, end, new_pageable);
2387 	KASSERT(map->flags & VM_MAP_PAGEABLE);
2388 
2389 	if ((lockflags & UVM_LK_ENTER) == 0)
2390 		vm_map_lock(map);
2391 
2392 	VM_MAP_RANGE_CHECK(map, start, end);
2393 
2394 	/*
2395 	 * only one pageability change may take place at one time, since
2396 	 * uvm_fault_wire assumes it will be called only once for each
2397 	 * wiring/unwiring.  therefore, we have to make sure we're actually
2398 	 * changing the pageability for the entire region.  we do so before
2399 	 * making any changes.
2400 	 */
2401 
2402 	if (uvm_map_lookup_entry(map, start, &start_entry) == FALSE) {
2403 		if ((lockflags & UVM_LK_EXIT) == 0)
2404 			vm_map_unlock(map);
2405 
2406 		UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
2407 		return (KERN_INVALID_ADDRESS);
2408 	}
2409 	entry = start_entry;
2410 
2411 	/*
2412 	 * handle wiring and unwiring separately.
2413 	 */
2414 
2415 	if (new_pageable) {		/* unwire */
2416 		UVM_MAP_CLIP_START(map, entry, start);
2417 
2418 		/*
2419 		 * unwiring.  first ensure that the range to be unwired is
2420 		 * really wired down and that there are no holes.
2421 		 */
2422 
2423 		while ((entry != &map->header) && (entry->start < end)) {
2424 			if (entry->wired_count == 0 ||
2425 			    (entry->end < end &&
2426 			     (entry->next == &map->header ||
2427 			      entry->next->start > entry->end))) {
2428 				if ((lockflags & UVM_LK_EXIT) == 0)
2429 					vm_map_unlock(map);
2430 				UVMHIST_LOG(maphist,
2431 				    "<- done (INVALID UNWIRE ARG)",0,0,0,0);
2432 				return (KERN_INVALID_ARGUMENT);
2433 			}
2434 			entry = entry->next;
2435 		}
2436 
2437 		/*
2438 		 * POSIX 1003.1b - a single munlock call unlocks a region,
2439 		 * regardless of the number of mlock calls made on that
2440 		 * region.
2441 		 */
2442 
2443 		entry = start_entry;
2444 		while ((entry != &map->header) && (entry->start < end)) {
2445 			UVM_MAP_CLIP_END(map, entry, end);
2446 			if (VM_MAPENT_ISWIRED(entry))
2447 				uvm_map_entry_unwire(map, entry);
2448 			entry = entry->next;
2449 		}
2450 		if ((lockflags & UVM_LK_EXIT) == 0)
2451 			vm_map_unlock(map);
2452 		UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
2453 		return(KERN_SUCCESS);
2454 	}
2455 
2456 	/*
2457 	 * wire case: in two passes [XXXCDC: ugly block of code here]
2458 	 *
2459 	 * 1: holding the write lock, we create any anonymous maps that need
2460 	 *    to be created.  then we clip each map entry to the region to
2461 	 *    be wired and increment its wiring count.
2462 	 *
2463 	 * 2: we downgrade to a read lock, and call uvm_fault_wire to fault
2464 	 *    in the pages for any newly wired area (wired_count == 1).
2465 	 *
2466 	 *    downgrading to a read lock for uvm_fault_wire avoids a possible
2467 	 *    deadlock with another thread that may have faulted on one of
2468 	 *    the pages to be wired (it would mark the page busy, blocking
2469 	 *    us, then in turn block on the map lock that we hold).  because
2470 	 *    of problems in the recursive lock package, we cannot upgrade
2471 	 *    to a write lock in vm_map_lookup.  thus, any actions that
2472 	 *    require the write lock must be done beforehand.  because we
2473 	 *    keep the read lock on the map, the copy-on-write status of the
2474 	 *    entries we modify here cannot change.
2475 	 */
2476 
2477 	while ((entry != &map->header) && (entry->start < end)) {
2478 		if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
2479 
2480 			/*
2481 			 * perform actions of vm_map_lookup that need the
2482 			 * write lock on the map: create an anonymous map
2483 			 * for a copy-on-write region, or an anonymous map
2484 			 * for a zero-fill region.  (XXXCDC: submap case
2485 			 * ok?)
2486 			 */
2487 
2488 			if (!UVM_ET_ISSUBMAP(entry)) {  /* not submap */
2489 				if (UVM_ET_ISNEEDSCOPY(entry) &&
2490 				    ((entry->protection & VM_PROT_WRITE) ||
2491 				     (entry->object.uvm_obj == NULL))) {
2492 					amap_copy(map, entry, M_WAITOK, TRUE,
2493 					    start, end);
2494 					/* XXXCDC: wait OK? */
2495 				}
2496 			}
2497 		}
2498 		UVM_MAP_CLIP_START(map, entry, start);
2499 		UVM_MAP_CLIP_END(map, entry, end);
2500 		entry->wired_count++;
2501 
2502 		/*
2503 		 * Check for holes
2504 		 */
2505 
2506 		if (entry->protection == VM_PROT_NONE ||
2507 		    (entry->end < end &&
2508 		     (entry->next == &map->header ||
2509 		      entry->next->start > entry->end))) {
2510 
2511 			/*
2512 			 * found one.  amap creation actions do not need to
2513 			 * be undone, but the wired counts need to be restored.
2514 			 */
2515 
2516 			while (entry != &map->header && entry->end > start) {
2517 				entry->wired_count--;
2518 				entry = entry->prev;
2519 			}
2520 			if ((lockflags & UVM_LK_EXIT) == 0)
2521 				vm_map_unlock(map);
2522 			UVMHIST_LOG(maphist,"<- done (INVALID WIRE)",0,0,0,0);
2523 			return (KERN_INVALID_ARGUMENT);
2524 		}
2525 		entry = entry->next;
2526 	}
2527 
2528 	/*
2529 	 * Pass 2.
2530 	 */
2531 
2532 #ifdef DIAGNOSTIC
2533 	timestamp_save = map->timestamp;
2534 #endif
2535 	vm_map_busy(map);
2536 	vm_map_downgrade(map);
2537 
2538 	rv = 0;
2539 	entry = start_entry;
2540 	while (entry != &map->header && entry->start < end) {
2541 		if (entry->wired_count == 1) {
2542 			rv = uvm_fault_wire(map, entry->start, entry->end,
2543 			    entry->protection);
2544 			if (rv) {
2545 				/*
2546 				 * wiring failed.  break out of the loop.
2547 				 * we'll clean up the map below, once we
2548 				 * have a write lock again.
2549 				 */
2550 				break;
2551 			}
2552 		}
2553 		entry = entry->next;
2554 	}
2555 
2556 	if (rv) {        /* failed? */
2557 
2558 		/*
2559 		 * Get back to an exclusive (write) lock.
2560 		 */
2561 
2562 		vm_map_upgrade(map);
2563 		vm_map_unbusy(map);
2564 
2565 #ifdef DIAGNOSTIC
2566 		if (timestamp_save != map->timestamp)
2567 			panic("uvm_map_pageable: stale map");
2568 #endif
2569 
2570 		/*
2571 		 * first drop the wiring count on all the entries
2572 		 * which haven't actually been wired yet.
2573 		 */
2574 
2575 		failed_entry = entry;
2576 		while (entry != &map->header && entry->start < end) {
2577 			entry->wired_count--;
2578 			entry = entry->next;
2579 		}
2580 
2581 		/*
2582 		 * now, unwire all the entries that were successfully
2583 		 * wired above.
2584 		 */
2585 
2586 		entry = start_entry;
2587 		while (entry != failed_entry) {
2588 			entry->wired_count--;
2589 			if (VM_MAPENT_ISWIRED(entry) == 0)
2590 				uvm_map_entry_unwire(map, entry);
2591 			entry = entry->next;
2592 		}
2593 		if ((lockflags & UVM_LK_EXIT) == 0)
2594 			vm_map_unlock(map);
2595 		UVMHIST_LOG(maphist, "<- done (RV=%d)", rv,0,0,0);
2596 		return(rv);
2597 	}
2598 
2599 	/* We are holding a read lock here. */
2600 	if ((lockflags & UVM_LK_EXIT) == 0) {
2601 		vm_map_unbusy(map);
2602 		vm_map_unlock_read(map);
2603 	} else {
2604 
2605 		/*
2606 		 * Get back to an exclusive (write) lock.
2607 		 */
2608 
2609 		vm_map_upgrade(map);
2610 		vm_map_unbusy(map);
2611 	}
2612 
2613 	UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
2614 	return(KERN_SUCCESS);
2615 }
2616 
2617 /*
2618  * uvm_map_pageable_all: special case of uvm_map_pageable - affects
2619  * all mapped regions.
2620  *
2621  * => map must not be locked.
2622  * => if no flags are specified, all regions are unwired.
2623  * => XXXJRT: has some of the same problems as uvm_map_pageable() above.
2624  */
2625 
2626 int
2627 uvm_map_pageable_all(map, flags, limit)
2628 	vm_map_t map;
2629 	int flags;
2630 	vsize_t limit;
2631 {
2632 	vm_map_entry_t entry, failed_entry;
2633 	vsize_t size;
2634 	int rv;
2635 #ifdef DIAGNOSTIC
2636 	u_int timestamp_save;
2637 #endif
2638 	UVMHIST_FUNC("uvm_map_pageable_all"); UVMHIST_CALLED(maphist);
2639 	UVMHIST_LOG(maphist,"(map=0x%x,flags=0x%x)", map, flags, 0, 0);
2640 
2641 	KASSERT(map->flags & VM_MAP_PAGEABLE);
2642 
2643 	vm_map_lock(map);
2644 
2645 	/*
2646 	 * handle wiring and unwiring separately.
2647 	 */
2648 
2649 	if (flags == 0) {			/* unwire */
2650 		/*
2651 		 * POSIX 1003.1b -- munlockall unlocks all regions,
2652 		 * regardless of how many times mlockall has been called.
2653 		 */
2654 		for (entry = map->header.next; entry != &map->header;
2655 		     entry = entry->next) {
2656 			if (VM_MAPENT_ISWIRED(entry))
2657 				uvm_map_entry_unwire(map, entry);
2658 		}
2659 		vm_map_modflags(map, 0, VM_MAP_WIREFUTURE);
2660 		vm_map_unlock(map);
2661 		UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
2662 		return (KERN_SUCCESS);
2663 
2664 		/*
2665 		 * end of unwire case!
2666 		 */
2667 	}
2668 
2669 	if (flags & MCL_FUTURE) {
2670 		/*
2671 		 * must wire all future mappings; remember this.
2672 		 */
2673 		vm_map_modflags(map, VM_MAP_WIREFUTURE, 0);
2674 	}
2675 
2676 	if ((flags & MCL_CURRENT) == 0) {
2677 		/*
2678 		 * no more work to do!
2679 		 */
2680 		UVMHIST_LOG(maphist,"<- done (OK no wire)",0,0,0,0);
2681 		vm_map_unlock(map);
2682 		return (KERN_SUCCESS);
2683 	}
2684 
2685 	/*
2686 	 * wire case: in three passes [XXXCDC: ugly block of code here]
2687 	 *
2688 	 * 1: holding the write lock, count all pages mapped by non-wired
2689 	 *    entries.  if this would cause us to go over our limit, we fail.
2690 	 *
2691 	 * 2: still holding the write lock, we create any anonymous maps that
2692 	 *    need to be created.  then we increment its wiring count.
2693 	 *
2694 	 * 3: we downgrade to a read lock, and call uvm_fault_wire to fault
2695 	 *    in the pages for any newly wired area (wired_count == 1).
2696 	 *
2697 	 *    downgrading to a read lock for uvm_fault_wire avoids a possible
2698 	 *    deadlock with another thread that may have faulted on one of
2699 	 *    the pages to be wired (it would mark the page busy, blocking
2700 	 *    us, then in turn block on the map lock that we hold).  because
2701 	 *    of problems in the recursive lock package, we cannot upgrade
2702 	 *    to a write lock in vm_map_lookup.  thus, any actions that
2703 	 *    require the write lock must be done beforehand.  because we
2704 	 *    keep the read lock on the map, the copy-on-write status of the
2705 	 *    entries we modify here cannot change.
2706 	 */
2707 
2708 	for (size = 0, entry = map->header.next; entry != &map->header;
2709 	     entry = entry->next) {
2710 		if (entry->protection != VM_PROT_NONE &&
2711 		    VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
2712 			size += entry->end - entry->start;
2713 		}
2714 	}
2715 
2716 	if (atop(size) + uvmexp.wired > uvmexp.wiredmax) {
2717 		vm_map_unlock(map);
2718 		return (KERN_NO_SPACE);		/* XXX overloaded */
2719 	}
2720 
2721 	/* XXX non-pmap_wired_count case must be handled by caller */
2722 #ifdef pmap_wired_count
2723 	if (limit != 0 &&
2724 	    (size + ptoa(pmap_wired_count(vm_map_pmap(map))) > limit)) {
2725 		vm_map_unlock(map);
2726 		return (KERN_NO_SPACE);		/* XXX overloaded */
2727 	}
2728 #endif
2729 
2730 	/*
2731 	 * Pass 2.
2732 	 */
2733 
2734 	for (entry = map->header.next; entry != &map->header;
2735 	     entry = entry->next) {
2736 		if (entry->protection == VM_PROT_NONE)
2737 			continue;
2738 		if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
2739 			/*
2740 			 * perform actions of vm_map_lookup that need the
2741 			 * write lock on the map: create an anonymous map
2742 			 * for a copy-on-write region, or an anonymous map
2743 			 * for a zero-fill region.  (XXXCDC: submap case
2744 			 * ok?)
2745 			 */
2746 			if (!UVM_ET_ISSUBMAP(entry)) {	/* not submap */
2747 				if (UVM_ET_ISNEEDSCOPY(entry) &&
2748 				    ((entry->protection & VM_PROT_WRITE) ||
2749 				     (entry->object.uvm_obj == NULL))) {
2750 					amap_copy(map, entry, M_WAITOK, TRUE,
2751 					    entry->start, entry->end);
2752 					/* XXXCDC: wait OK? */
2753 				}
2754 			}
2755 		}
2756 		entry->wired_count++;
2757 	}
2758 
2759 	/*
2760 	 * Pass 3.
2761 	 */
2762 
2763 #ifdef DIAGNOSTIC
2764 	timestamp_save = map->timestamp;
2765 #endif
2766 	vm_map_busy(map);
2767 	vm_map_downgrade(map);
2768 
2769 	rv = KERN_SUCCESS;
2770 	for (entry = map->header.next; entry != &map->header;
2771 	     entry = entry->next) {
2772 		if (entry->wired_count == 1) {
2773 			rv = uvm_fault_wire(map, entry->start, entry->end,
2774 			     entry->protection);
2775 			if (rv) {
2776 				/*
2777 				 * wiring failed.  break out of the loop.
2778 				 * we'll clean up the map below, once we
2779 				 * have a write lock again.
2780 				 */
2781 				break;
2782 			}
2783 		}
2784 	}
2785 
2786 	if (rv) {	/* failed? */
2787 		/*
2788 		 * Get back an exclusive (write) lock.
2789 		 */
2790 		vm_map_upgrade(map);
2791 		vm_map_unbusy(map);
2792 
2793 #ifdef DIAGNOSTIC
2794 		if (timestamp_save != map->timestamp)
2795 			panic("uvm_map_pageable_all: stale map");
2796 #endif
2797 
2798 		/*
2799 		 * first drop the wiring count on all the entries
2800 		 * which haven't actually been wired yet.
2801 		 *
2802 		 * Skip VM_PROT_NONE entries like we did above.
2803 		 */
2804 		failed_entry = entry;
2805 		for (/* nothing */; entry != &map->header;
2806 		     entry = entry->next) {
2807 			if (entry->protection == VM_PROT_NONE)
2808 				continue;
2809 			entry->wired_count--;
2810 		}
2811 
2812 		/*
2813 		 * now, unwire all the entries that were successfully
2814 		 * wired above.
2815 		 *
2816 		 * Skip VM_PROT_NONE entries like we did above.
2817 		 */
2818 		for (entry = map->header.next; entry != failed_entry;
2819 		     entry = entry->next) {
2820 			if (entry->protection == VM_PROT_NONE)
2821 				continue;
2822 			entry->wired_count--;
2823 			if (VM_MAPENT_ISWIRED(entry))
2824 				uvm_map_entry_unwire(map, entry);
2825 		}
2826 		vm_map_unlock(map);
2827 		UVMHIST_LOG(maphist,"<- done (RV=%d)", rv,0,0,0);
2828 		return (rv);
2829 	}
2830 
2831 	/* We are holding a read lock here. */
2832 	vm_map_unbusy(map);
2833 	vm_map_unlock_read(map);
2834 
2835 	UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
2836 	return (KERN_SUCCESS);
2837 }
2838 
2839 /*
2840  * uvm_map_clean: clean out a map range
2841  *
2842  * => valid flags:
2843  *   if (flags & PGO_CLEANIT): dirty pages are cleaned first
2844  *   if (flags & PGO_SYNCIO): dirty pages are written synchronously
2845  *   if (flags & PGO_DEACTIVATE): any cached pages are deactivated after clean
2846  *   if (flags & PGO_FREE): any cached pages are freed after clean
2847  * => returns an error if any part of the specified range isn't mapped
2848  * => never a need to flush amap layer since the anonymous memory has
2849  *	no permanent home, but may deactivate pages there
2850  * => called from sys_msync() and sys_madvise()
2851  * => caller must not write-lock map (read OK).
2852  * => we may sleep while cleaning if SYNCIO [with map read-locked]
2853  */
2854 
2855 int	amap_clean_works = 1;	/* XXX for now, just in case... */
2856 
2857 int
2858 uvm_map_clean(map, start, end, flags)
2859 	vm_map_t map;
2860 	vaddr_t start, end;
2861 	int flags;
2862 {
2863 	vm_map_entry_t current, entry;
2864 	struct uvm_object *uobj;
2865 	struct vm_amap *amap;
2866 	struct vm_anon *anon;
2867 	struct vm_page *pg;
2868 	vaddr_t offset;
2869 	vsize_t size;
2870 	int rv, error, refs;
2871 	UVMHIST_FUNC("uvm_map_clean"); UVMHIST_CALLED(maphist);
2872 
2873 	UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,flags=0x%x)",
2874 		    map, start, end, flags);
2875 	KASSERT((flags & (PGO_FREE|PGO_DEACTIVATE)) !=
2876 		(PGO_FREE|PGO_DEACTIVATE));
2877 
2878 	vm_map_lock_read(map);
2879 	VM_MAP_RANGE_CHECK(map, start, end);
2880 	if (uvm_map_lookup_entry(map, start, &entry) == FALSE) {
2881 		vm_map_unlock_read(map);
2882 		return(KERN_INVALID_ADDRESS);
2883 	}
2884 
2885 	/*
2886 	 * Make a first pass to check for holes.
2887 	 */
2888 
2889 	for (current = entry; current->start < end; current = current->next) {
2890 		if (UVM_ET_ISSUBMAP(current)) {
2891 			vm_map_unlock_read(map);
2892 			return (KERN_INVALID_ARGUMENT);
2893 		}
2894 		if (end > current->end && (current->next == &map->header ||
2895 		    current->end != current->next->start)) {
2896 			vm_map_unlock_read(map);
2897 			return (KERN_INVALID_ADDRESS);
2898 		}
2899 	}
2900 
2901 	error = KERN_SUCCESS;
2902 
2903 	for (current = entry; current->start < end; current = current->next) {
2904 		amap = current->aref.ar_amap;	/* top layer */
2905 		uobj = current->object.uvm_obj;	/* bottom layer */
2906 		KASSERT(start >= current->start);
2907 
2908 		/*
2909 		 * No amap cleaning necessary if:
2910 		 *
2911 		 *	(1) There's no amap.
2912 		 *
2913 		 *	(2) We're not deactivating or freeing pages.
2914 		 */
2915 
2916 		if (amap == NULL || (flags & (PGO_DEACTIVATE|PGO_FREE)) == 0)
2917 			goto flush_object;
2918 
2919 		/* XXX for now, just in case... */
2920 		if (amap_clean_works == 0)
2921 			goto flush_object;
2922 
2923 		amap_lock(amap);
2924 		offset = start - current->start;
2925 		size = MIN(end, current->end) - start;
2926 		for ( ; size != 0; size -= PAGE_SIZE, offset += PAGE_SIZE) {
2927 			anon = amap_lookup(&current->aref, offset);
2928 			if (anon == NULL)
2929 				continue;
2930 
2931 			simple_lock(&anon->an_lock);
2932 
2933 			pg = anon->u.an_page;
2934 			if (pg == NULL) {
2935 				simple_unlock(&anon->an_lock);
2936 				continue;
2937 			}
2938 
2939 			switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
2940 
2941 			/*
2942 			 * XXX In these first 3 cases, we always just
2943 			 * XXX deactivate the page.  We may want to
2944 			 * XXX handle the different cases more
2945 			 * XXX specifically, in the future.
2946 			 */
2947 
2948 			case PGO_CLEANIT|PGO_FREE:
2949 			case PGO_CLEANIT|PGO_DEACTIVATE:
2950 			case PGO_DEACTIVATE:
2951  deactivate_it:
2952 				/* skip the page if it's loaned or wired */
2953 				if (pg->loan_count != 0 ||
2954 				    pg->wire_count != 0) {
2955 					simple_unlock(&anon->an_lock);
2956 					continue;
2957 				}
2958 
2959 				uvm_lock_pageq();
2960 
2961 				/*
2962 				 * skip the page if it's not actually owned
2963 				 * by the anon (may simply be loaned to the
2964 				 * anon).
2965 				 */
2966 
2967 				if ((pg->pqflags & PQ_ANON) == 0) {
2968 					KASSERT(pg->uobject == NULL);
2969 					uvm_unlock_pageq();
2970 					simple_unlock(&anon->an_lock);
2971 					continue;
2972 				}
2973 				KASSERT(pg->uanon == anon);
2974 
2975 #ifdef UBC
2976 				/* ...and deactivate the page. */
2977 				pmap_clear_reference(pg);
2978 #else
2979 				/* zap all mappings for the page. */
2980 				pmap_page_protect(pg, VM_PROT_NONE);
2981 
2982 				/* ...and deactivate the page. */
2983 #endif
2984 				uvm_pagedeactivate(pg);
2985 
2986 				uvm_unlock_pageq();
2987 				simple_unlock(&anon->an_lock);
2988 				continue;
2989 
2990 			case PGO_FREE:
2991 
2992 				/*
2993 				 * If there are multiple references to
2994 				 * the amap, just deactivate the page.
2995 				 */
2996 
2997 				if (amap_refs(amap) > 1)
2998 					goto deactivate_it;
2999 
3000 				/* XXX skip the page if it's wired */
3001 				if (pg->wire_count != 0) {
3002 					simple_unlock(&anon->an_lock);
3003 					continue;
3004 				}
3005 				amap_unadd(&current->aref, offset);
3006 				refs = --anon->an_ref;
3007 				simple_unlock(&anon->an_lock);
3008 				if (refs == 0)
3009 					uvm_anfree(anon);
3010 				continue;
3011 
3012 			default:
3013 				panic("uvm_map_clean: weird flags");
3014 			}
3015 		}
3016 		amap_unlock(amap);
3017 
3018  flush_object:
3019 		/*
3020 		 * flush pages if we've got a valid backing object.
3021 		 */
3022 
3023 		offset = current->offset + (start - current->start);
3024 		size = MIN(end, current->end) - start;
3025 		if (uobj != NULL) {
3026 			simple_lock(&uobj->vmobjlock);
3027 			rv = uobj->pgops->pgo_flush(uobj, offset,
3028 			    offset + size, flags);
3029 			simple_unlock(&uobj->vmobjlock);
3030 
3031 			if (rv == FALSE)
3032 				error = KERN_FAILURE;
3033 		}
3034 		start += size;
3035 	}
3036 	vm_map_unlock_read(map);
3037 	return (error);
3038 }
3039 
3040 
3041 /*
3042  * uvm_map_checkprot: check protection in map
3043  *
3044  * => must allow specified protection in a fully allocated region.
3045  * => map must be read or write locked by caller.
3046  */
3047 
3048 boolean_t
3049 uvm_map_checkprot(map, start, end, protection)
3050 	vm_map_t       map;
3051 	vaddr_t    start, end;
3052 	vm_prot_t      protection;
3053 {
3054 	 vm_map_entry_t entry;
3055 	 vm_map_entry_t tmp_entry;
3056 
3057 	 if (!uvm_map_lookup_entry(map, start, &tmp_entry)) {
3058 		 return(FALSE);
3059 	 }
3060 	 entry = tmp_entry;
3061 	 while (start < end) {
3062 		 if (entry == &map->header) {
3063 			 return(FALSE);
3064 		 }
3065 
3066 		/*
3067 		 * no holes allowed
3068 		 */
3069 
3070 		 if (start < entry->start) {
3071 			 return(FALSE);
3072 		 }
3073 
3074 		/*
3075 		 * check protection associated with entry
3076 		 */
3077 
3078 		 if ((entry->protection & protection) != protection) {
3079 			 return(FALSE);
3080 		 }
3081 
3082 		 /* go to next entry */
3083 
3084 		 start = entry->end;
3085 		 entry = entry->next;
3086 	 }
3087 	 return(TRUE);
3088 }
3089 
3090 /*
3091  * uvmspace_alloc: allocate a vmspace structure.
3092  *
3093  * - structure includes vm_map and pmap
3094  * - XXX: no locking on this structure
3095  * - refcnt set to 1, rest must be init'd by caller
3096  */
3097 struct vmspace *
3098 uvmspace_alloc(min, max, pageable)
3099 	vaddr_t min, max;
3100 	int pageable;
3101 {
3102 	struct vmspace *vm;
3103 	UVMHIST_FUNC("uvmspace_alloc"); UVMHIST_CALLED(maphist);
3104 
3105 	vm = pool_get(&uvm_vmspace_pool, PR_WAITOK);
3106 	uvmspace_init(vm, NULL, min, max, pageable);
3107 	UVMHIST_LOG(maphist,"<- done (vm=0x%x)", vm,0,0,0);
3108 	return (vm);
3109 }
3110 
3111 /*
3112  * uvmspace_init: initialize a vmspace structure.
3113  *
3114  * - XXX: no locking on this structure
3115  * - refcnt set to 1, rest must me init'd by caller
3116  */
3117 void
3118 uvmspace_init(vm, pmap, min, max, pageable)
3119 	struct vmspace *vm;
3120 	struct pmap *pmap;
3121 	vaddr_t min, max;
3122 	boolean_t pageable;
3123 {
3124 	UVMHIST_FUNC("uvmspace_init"); UVMHIST_CALLED(maphist);
3125 
3126 	memset(vm, 0, sizeof(*vm));
3127 
3128 	uvm_map_setup(&vm->vm_map, min, max, pageable ? VM_MAP_PAGEABLE : 0);
3129 
3130 	if (pmap)
3131 		pmap_reference(pmap);
3132 	else
3133 		pmap = pmap_create();
3134 	vm->vm_map.pmap = pmap;
3135 
3136 	vm->vm_refcnt = 1;
3137 	UVMHIST_LOG(maphist,"<- done",0,0,0,0);
3138 }
3139 
3140 /*
3141  * uvmspace_share: share a vmspace between two proceses
3142  *
3143  * - XXX: no locking on vmspace
3144  * - used for vfork, threads(?)
3145  */
3146 
3147 void
3148 uvmspace_share(p1, p2)
3149 	struct proc *p1, *p2;
3150 {
3151 	p2->p_vmspace = p1->p_vmspace;
3152 	p1->p_vmspace->vm_refcnt++;
3153 }
3154 
3155 /*
3156  * uvmspace_unshare: ensure that process "p" has its own, unshared, vmspace
3157  *
3158  * - XXX: no locking on vmspace
3159  */
3160 
3161 void
3162 uvmspace_unshare(p)
3163 	struct proc *p;
3164 {
3165 	struct vmspace *nvm, *ovm = p->p_vmspace;
3166 
3167 	if (ovm->vm_refcnt == 1)
3168 		/* nothing to do: vmspace isn't shared in the first place */
3169 		return;
3170 
3171 	/* make a new vmspace, still holding old one */
3172 	nvm = uvmspace_fork(ovm);
3173 
3174 	pmap_deactivate(p);		/* unbind old vmspace */
3175 	p->p_vmspace = nvm;
3176 	pmap_activate(p);		/* switch to new vmspace */
3177 
3178 	uvmspace_free(ovm);		/* drop reference to old vmspace */
3179 }
3180 
3181 /*
3182  * uvmspace_exec: the process wants to exec a new program
3183  *
3184  * - XXX: no locking on vmspace
3185  */
3186 
3187 void
3188 uvmspace_exec(p, start, end)
3189 	struct proc *p;
3190 	vaddr_t start, end;
3191 {
3192 	struct vmspace *nvm, *ovm = p->p_vmspace;
3193 	vm_map_t map = &ovm->vm_map;
3194 
3195 #ifdef __sparc__
3196 	/* XXX cgd 960926: the sparc #ifdef should be a MD hook */
3197 	kill_user_windows(p);   /* before stack addresses go away */
3198 #endif
3199 
3200 	/*
3201 	 * see if more than one process is using this vmspace...
3202 	 */
3203 
3204 	if (ovm->vm_refcnt == 1) {
3205 
3206 		/*
3207 		 * if p is the only process using its vmspace then we can safely
3208 		 * recycle that vmspace for the program that is being exec'd.
3209 		 */
3210 
3211 #ifdef SYSVSHM
3212 		/*
3213 		 * SYSV SHM semantics require us to kill all segments on an exec
3214 		 */
3215 		if (ovm->vm_shm)
3216 			shmexit(ovm);
3217 #endif
3218 
3219 		/*
3220 		 * POSIX 1003.1b -- "lock future mappings" is revoked
3221 		 * when a process execs another program image.
3222 		 */
3223 		vm_map_lock(map);
3224 		vm_map_modflags(map, 0, VM_MAP_WIREFUTURE);
3225 		vm_map_unlock(map);
3226 
3227 		/*
3228 		 * now unmap the old program
3229 		 */
3230 		uvm_unmap(map, map->min_offset, map->max_offset);
3231 
3232 		/*
3233 		 * resize the map
3234 		 */
3235 		vm_map_lock(map);
3236 		map->min_offset = start;
3237 		uvm_tree_sanity(map, "resize enter");
3238 		map->max_offset = end;
3239 		if (map->header.prev != &map->header)
3240 			uvm_rb_fixup(map, map->header.prev);
3241 		uvm_tree_sanity(map, "resize leave");
3242 		vm_map_unlock(map);
3243 
3244 
3245 	} else {
3246 
3247 		/*
3248 		 * p's vmspace is being shared, so we can't reuse it for p since
3249 		 * it is still being used for others.   allocate a new vmspace
3250 		 * for p
3251 		 */
3252 		nvm = uvmspace_alloc(start, end,
3253 			 (map->flags & VM_MAP_PAGEABLE) ? TRUE : FALSE);
3254 
3255 		/*
3256 		 * install new vmspace and drop our ref to the old one.
3257 		 */
3258 
3259 		pmap_deactivate(p);
3260 		p->p_vmspace = nvm;
3261 		pmap_activate(p);
3262 
3263 		uvmspace_free(ovm);
3264 	}
3265 }
3266 
3267 /*
3268  * uvmspace_free: free a vmspace data structure
3269  *
3270  * - XXX: no locking on vmspace
3271  */
3272 
3273 void
3274 uvmspace_free(vm)
3275 	struct vmspace *vm;
3276 {
3277 	vm_map_entry_t dead_entries;
3278 	UVMHIST_FUNC("uvmspace_free"); UVMHIST_CALLED(maphist);
3279 
3280 	UVMHIST_LOG(maphist,"(vm=0x%x) ref=%d", vm, vm->vm_refcnt,0,0);
3281 	if (--vm->vm_refcnt == 0) {
3282 		/*
3283 		 * lock the map, to wait out all other references to it.  delete
3284 		 * all of the mappings and pages they hold, then call the pmap
3285 		 * module to reclaim anything left.
3286 		 */
3287 #ifdef SYSVSHM
3288 		/* Get rid of any SYSV shared memory segments. */
3289 		if (vm->vm_shm != NULL)
3290 			shmexit(vm);
3291 #endif
3292 		vm_map_lock(&vm->vm_map);
3293 		if (vm->vm_map.nentries) {
3294 			uvm_unmap_remove(&vm->vm_map,
3295 			    vm->vm_map.min_offset, vm->vm_map.max_offset,
3296 			    &dead_entries);
3297 			if (dead_entries != NULL)
3298 				uvm_unmap_detach(dead_entries, 0);
3299 		}
3300 		pmap_destroy(vm->vm_map.pmap);
3301 		vm->vm_map.pmap = NULL;
3302 		pool_put(&uvm_vmspace_pool, vm);
3303 	}
3304 	UVMHIST_LOG(maphist,"<- done", 0,0,0,0);
3305 }
3306 
3307 /*
3308  *   F O R K   -   m a i n   e n t r y   p o i n t
3309  */
3310 /*
3311  * uvmspace_fork: fork a process' main map
3312  *
3313  * => create a new vmspace for child process from parent.
3314  * => parent's map must not be locked.
3315  */
3316 
3317 struct vmspace *
3318 uvmspace_fork(vm1)
3319 	struct vmspace *vm1;
3320 {
3321 	struct vmspace *vm2;
3322 	vm_map_t        old_map = &vm1->vm_map;
3323 	vm_map_t        new_map;
3324 	vm_map_entry_t  old_entry;
3325 	vm_map_entry_t  new_entry;
3326 	pmap_t          new_pmap;
3327 	boolean_t	protect_child;
3328 	UVMHIST_FUNC("uvmspace_fork"); UVMHIST_CALLED(maphist);
3329 
3330 	vm_map_lock(old_map);
3331 
3332 	vm2 = uvmspace_alloc(old_map->min_offset, old_map->max_offset,
3333 		      (old_map->flags & VM_MAP_PAGEABLE) ? TRUE : FALSE);
3334 	memcpy(&vm2->vm_startcopy, &vm1->vm_startcopy,
3335 	(caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
3336 	new_map = &vm2->vm_map;		  /* XXX */
3337 	new_pmap = new_map->pmap;
3338 
3339 	old_entry = old_map->header.next;
3340 
3341 	/*
3342 	 * go entry-by-entry
3343 	 */
3344 
3345 	while (old_entry != &old_map->header) {
3346 
3347 		/*
3348 		 * first, some sanity checks on the old entry
3349 		 */
3350 		if (UVM_ET_ISSUBMAP(old_entry))
3351 		    panic("fork: encountered a submap during fork (illegal)");
3352 
3353 		if (!UVM_ET_ISCOPYONWRITE(old_entry) &&
3354 			    UVM_ET_ISNEEDSCOPY(old_entry))
3355 	panic("fork: non-copy_on_write map entry marked needs_copy (illegal)");
3356 
3357 
3358 		switch (old_entry->inheritance) {
3359 		case MAP_INHERIT_NONE:
3360 			/*
3361 			 * drop the mapping
3362 			 */
3363 			break;
3364 
3365 		case MAP_INHERIT_SHARE:
3366 			/*
3367 			 * share the mapping: this means we want the old and
3368 			 * new entries to share amaps and backing objects.
3369 			 */
3370 
3371 			/*
3372 			 * if the old_entry needs a new amap (due to prev fork)
3373 			 * then we need to allocate it now so that we have
3374 			 * something we own to share with the new_entry.   [in
3375 			 * other words, we need to clear needs_copy]
3376 			 */
3377 
3378 			if (UVM_ET_ISNEEDSCOPY(old_entry)) {
3379 				/* get our own amap, clears needs_copy */
3380 				amap_copy(old_map, old_entry, M_WAITOK, FALSE,
3381 				    0, 0);
3382 				/* XXXCDC: WAITOK??? */
3383 			}
3384 
3385 			new_entry = uvm_mapent_alloc(new_map);
3386 			/* old_entry -> new_entry */
3387 			uvm_mapent_copy(old_entry, new_entry);
3388 
3389 			/* new pmap has nothing wired in it */
3390 			new_entry->wired_count = 0;
3391 
3392 			/*
3393 			 * gain reference to object backing the map (can't
3394 			 * be a submap, already checked this case).
3395 			 */
3396 			if (new_entry->aref.ar_amap)
3397 				/* share reference */
3398 				uvm_map_reference_amap(new_entry, AMAP_SHARED);
3399 
3400 			if (new_entry->object.uvm_obj &&
3401 			    new_entry->object.uvm_obj->pgops->pgo_reference)
3402 				new_entry->object.uvm_obj->
3403 				    pgops->pgo_reference(
3404 				        new_entry->object.uvm_obj);
3405 
3406 			/* insert entry at end of new_map's entry list */
3407 			uvm_map_entry_link(new_map, new_map->header.prev,
3408 			    new_entry);
3409 
3410 			/*
3411 			 * pmap_copy the mappings: this routine is optional
3412 			 * but if it is there it will reduce the number of
3413 			 * page faults in the new proc.
3414 			 */
3415 
3416 			pmap_copy(new_pmap, old_map->pmap, new_entry->start,
3417 			    (old_entry->end - old_entry->start),
3418 			    old_entry->start);
3419 
3420 			break;
3421 
3422 		case MAP_INHERIT_COPY:
3423 
3424 			/*
3425 			 * copy-on-write the mapping (using mmap's
3426 			 * MAP_PRIVATE semantics)
3427 			 *
3428 			 * allocate new_entry, adjust reference counts.
3429 			 * (note that new references are read-only).
3430 			 */
3431 
3432 			new_entry = uvm_mapent_alloc(new_map);
3433 			/* old_entry -> new_entry */
3434 			uvm_mapent_copy(old_entry, new_entry);
3435 
3436 			if (new_entry->aref.ar_amap)
3437 				uvm_map_reference_amap(new_entry, 0);
3438 
3439 			if (new_entry->object.uvm_obj &&
3440 			    new_entry->object.uvm_obj->pgops->pgo_reference)
3441 				new_entry->object.uvm_obj->pgops->pgo_reference
3442 				    (new_entry->object.uvm_obj);
3443 
3444 			/* new pmap has nothing wired in it */
3445 			new_entry->wired_count = 0;
3446 
3447 			new_entry->etype |=
3448 			    (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
3449 			uvm_map_entry_link(new_map, new_map->header.prev,
3450 			    new_entry);
3451 
3452 			/*
3453 			 * the new entry will need an amap.  it will either
3454 			 * need to be copied from the old entry or created
3455 			 * from scratch (if the old entry does not have an
3456 			 * amap).  can we defer this process until later
3457 			 * (by setting "needs_copy") or do we need to copy
3458 			 * the amap now?
3459 			 *
3460 			 * we must copy the amap now if any of the following
3461 			 * conditions hold:
3462 			 * 1. the old entry has an amap and that amap is
3463 			 *    being shared.  this means that the old (parent)
3464 			 *    process is sharing the amap with another
3465 			 *    process.  if we do not clear needs_copy here
3466 			 *    we will end up in a situation where both the
3467 			 *    parent and child process are referring to the
3468 			 *    same amap with "needs_copy" set.  if the
3469 			 *    parent write-faults, the fault routine will
3470 			 *    clear "needs_copy" in the parent by allocating
3471 			 *    a new amap.   this is wrong because the
3472 			 *    parent is supposed to be sharing the old amap
3473 			 *    and the new amap will break that.
3474 			 *
3475 			 * 2. if the old entry has an amap and a non-zero
3476 			 *    wire count then we are going to have to call
3477 			 *    amap_cow_now to avoid page faults in the
3478 			 *    parent process.   since amap_cow_now requires
3479 			 *    "needs_copy" to be clear we might as well
3480 			 *    clear it here as well.
3481 			 *
3482 			 */
3483 
3484 			if (old_entry->aref.ar_amap != NULL) {
3485 
3486 			  if ((amap_flags(old_entry->aref.ar_amap) &
3487 			       AMAP_SHARED) != 0 ||
3488 			      VM_MAPENT_ISWIRED(old_entry)) {
3489 
3490 			    amap_copy(new_map, new_entry, M_WAITOK, FALSE,
3491 				      0, 0);
3492 			    /* XXXCDC: M_WAITOK ... ok? */
3493 			  }
3494 			}
3495 
3496 			/*
3497 			 * if the parent's entry is wired down, then the
3498 			 * parent process does not want page faults on
3499 			 * access to that memory.  this means that we
3500 			 * cannot do copy-on-write because we can't write
3501 			 * protect the old entry.   in this case we
3502 			 * resolve all copy-on-write faults now, using
3503 			 * amap_cow_now.   note that we have already
3504 			 * allocated any needed amap (above).
3505 			 */
3506 
3507 			if (VM_MAPENT_ISWIRED(old_entry)) {
3508 
3509 			  /*
3510 			   * resolve all copy-on-write faults now
3511 			   * (note that there is nothing to do if
3512 			   * the old mapping does not have an amap).
3513 			   * XXX: is it worthwhile to bother with pmap_copy
3514 			   * in this case?
3515 			   */
3516 			  if (old_entry->aref.ar_amap)
3517 			    amap_cow_now(new_map, new_entry);
3518 
3519 			} else {
3520 
3521 			  /*
3522 			   * setup mappings to trigger copy-on-write faults
3523 			   * we must write-protect the parent if it has
3524 			   * an amap and it is not already "needs_copy"...
3525 			   * if it is already "needs_copy" then the parent
3526 			   * has already been write-protected by a previous
3527 			   * fork operation.
3528 			   *
3529 			   * if we do not write-protect the parent, then
3530 			   * we must be sure to write-protect the child
3531 			   * after the pmap_copy() operation.
3532 			   *
3533 			   * XXX: pmap_copy should have some way of telling
3534 			   * us that it didn't do anything so we can avoid
3535 			   * calling pmap_protect needlessly.
3536 			   */
3537 
3538 			  if (old_entry->aref.ar_amap) {
3539 
3540 			    if (!UVM_ET_ISNEEDSCOPY(old_entry)) {
3541 			      if (old_entry->max_protection & VM_PROT_WRITE) {
3542 				pmap_protect(old_map->pmap,
3543 					     old_entry->start,
3544 					     old_entry->end,
3545 					     old_entry->protection &
3546 					     ~VM_PROT_WRITE);
3547 			      }
3548 			      old_entry->etype |= UVM_ET_NEEDSCOPY;
3549 			    }
3550 
3551 			    /*
3552 			     * parent must now be write-protected
3553 			     */
3554 			    protect_child = FALSE;
3555 			  } else {
3556 
3557 			    /*
3558 			     * we only need to protect the child if the
3559 			     * parent has write access.
3560 			     */
3561 			    if (old_entry->max_protection & VM_PROT_WRITE)
3562 			      protect_child = TRUE;
3563 			    else
3564 			      protect_child = FALSE;
3565 
3566 			  }
3567 
3568 			  /*
3569 			   * copy the mappings
3570 			   * XXX: need a way to tell if this does anything
3571 			   */
3572 
3573 			  pmap_copy(new_pmap, old_map->pmap,
3574 				    new_entry->start,
3575 				    (old_entry->end - old_entry->start),
3576 				    old_entry->start);
3577 
3578 			  /*
3579 			   * protect the child's mappings if necessary
3580 			   */
3581 			  if (protect_child) {
3582 			    pmap_protect(new_pmap, new_entry->start,
3583 					 new_entry->end,
3584 					 new_entry->protection &
3585 					          ~VM_PROT_WRITE);
3586 			  }
3587 
3588 			}
3589 			break;
3590 		}  /* end of switch statement */
3591 		old_entry = old_entry->next;
3592 	}
3593 
3594 	new_map->size = old_map->size;
3595 	vm_map_unlock(old_map);
3596 
3597 #ifdef SYSVSHM
3598 	if (vm1->vm_shm)
3599 		shmfork(vm1, vm2);
3600 #endif
3601 
3602 #ifdef PMAP_FORK
3603 	pmap_fork(vm1->vm_map.pmap, vm2->vm_map.pmap);
3604 #endif
3605 
3606 	UVMHIST_LOG(maphist,"<- done",0,0,0,0);
3607 	return(vm2);
3608 }
3609 
3610 #if defined(DDB)
3611 
3612 /*
3613  * DDB hooks
3614  */
3615 
3616 /*
3617  * uvm_map_printit: actually prints the map
3618  */
3619 
3620 void
3621 uvm_map_printit(map, full, pr)
3622 	vm_map_t map;
3623 	boolean_t full;
3624 	int (*pr)(const char *, ...);
3625 {
3626 	vm_map_entry_t entry;
3627 
3628 	(*pr)("MAP %p: [0x%lx->0x%lx]\n", map, map->min_offset,map->max_offset);
3629 	(*pr)("\t#ent=%d, sz=%u, ref=%d, version=%u, flags=0x%x\n",
3630 	    map->nentries, map->size, map->ref_count, map->timestamp,
3631 	    map->flags);
3632 #ifdef pmap_resident_count
3633 	(*pr)("\tpmap=%p(resident=%d)\n", map->pmap,
3634 	    pmap_resident_count(map->pmap));
3635 #else
3636 	/* XXXCDC: this should be required ... */
3637 	(*pr)("\tpmap=%p(resident=<<NOT SUPPORTED!!!>>)\n", map->pmap);
3638 #endif
3639 	if (!full)
3640 		return;
3641 	for (entry = map->header.next; entry != &map->header;
3642 	    entry = entry->next) {
3643 		(*pr)(" - %p: 0x%lx->0x%lx: obj=%p/0x%llx, amap=%p/%d\n",
3644 		    entry, entry->start, entry->end, entry->object.uvm_obj,
3645 		    (long long)entry->offset, entry->aref.ar_amap,
3646 		    entry->aref.ar_pageoff);
3647 		(*pr)(
3648 		    "\tsubmap=%c, cow=%c, nc=%c, prot(max)=%d/%d, inh=%d, "
3649 		    "wc=%d, adv=%d\n",
3650 		    (entry->etype & UVM_ET_SUBMAP) ? 'T' : 'F',
3651 		    (entry->etype & UVM_ET_COPYONWRITE) ? 'T' : 'F',
3652 		    (entry->etype & UVM_ET_NEEDSCOPY) ? 'T' : 'F',
3653 		    entry->protection, entry->max_protection,
3654 		    entry->inheritance, entry->wired_count, entry->advice);
3655 	}
3656 }
3657 
3658 /*
3659  * uvm_object_printit: actually prints the object
3660  */
3661 
3662 void
3663 uvm_object_printit(uobj, full, pr)
3664 	struct uvm_object *uobj;
3665 	boolean_t full;
3666 	int (*pr)(const char *, ...);
3667 {
3668 	struct vm_page *pg;
3669 	int cnt = 0;
3670 
3671 	(*pr)("OBJECT %p: locked=%d, pgops=%p, npages=%d, ",
3672 	    uobj, uobj->vmobjlock.lock_data, uobj->pgops, uobj->uo_npages);
3673 	if (UVM_OBJ_IS_KERN_OBJECT(uobj))
3674 		(*pr)("refs=<SYSTEM>\n");
3675 	else
3676 		(*pr)("refs=%d\n", uobj->uo_refs);
3677 
3678 	if (!full) {
3679 		return;
3680 	}
3681 	(*pr)("  PAGES <pg,offset>:\n  ");
3682 	for (pg = TAILQ_FIRST(&uobj->memq);
3683 	     pg != NULL;
3684 	     pg = TAILQ_NEXT(pg, listq), cnt++) {
3685 		(*pr)("<%p,0x%llx> ", pg, (long long)pg->offset);
3686 		if ((cnt % 3) == 2) {
3687 			(*pr)("\n  ");
3688 		}
3689 	}
3690 	if ((cnt % 3) != 2) {
3691 		(*pr)("\n");
3692 	}
3693 }
3694 
3695 /*
3696  * uvm_page_printit: actually print the page
3697  */
3698 
3699 static const char page_flagbits[] =
3700 	"\20\1BUSY\2WANTED\3TABLED\4CLEAN\5CLEANCHK\6RELEASED\7FAKE\10RDONLY"
3701 	"\11ZERO\15PAGER1";
3702 static const char page_pqflagbits[] =
3703 	"\20\1FREE\2INACTIVE\3ACTIVE\4LAUNDRY\5ANON\6AOBJ";
3704 
3705 void
3706 uvm_page_printit(pg, full, pr)
3707 	struct vm_page *pg;
3708 	boolean_t full;
3709 	int (*pr)(const char *, ...);
3710 {
3711 	struct vm_page *tpg;
3712 	struct uvm_object *uobj;
3713 	struct pglist *pgl;
3714 	char pgbuf[128];
3715 	char pqbuf[128];
3716 
3717 	(*pr)("PAGE %p:\n", pg);
3718 	snprintf(pgbuf, sizeof(pgbuf), "%b", pg->flags, page_flagbits);
3719 	snprintf(pqbuf, sizeof(pqbuf), "%b", pg->pqflags, page_pqflagbits);
3720 	(*pr)("  flags=%s, pqflags=%s, vers=%d, wire_count=%d, pa=0x%lx\n",
3721 	    pgbuf, pqbuf, pg->version, pg->wire_count, (long)pg->phys_addr);
3722 	(*pr)("  uobject=%p, uanon=%p, offset=0x%llx loan_count=%d\n",
3723 	    pg->uobject, pg->uanon, (long long)pg->offset, pg->loan_count);
3724 #if defined(UVM_PAGE_TRKOWN)
3725 	if (pg->flags & PG_BUSY)
3726 		(*pr)("  owning process = %d, tag=%s\n",
3727 		    pg->owner, pg->owner_tag);
3728 	else
3729 		(*pr)("  page not busy, no owner\n");
3730 #else
3731 	(*pr)("  [page ownership tracking disabled]\n");
3732 #endif
3733 
3734 	if (!full)
3735 		return;
3736 
3737 	/* cross-verify object/anon */
3738 	if ((pg->pqflags & PQ_FREE) == 0) {
3739 		if (pg->pqflags & PQ_ANON) {
3740 			if (pg->uanon == NULL || pg->uanon->u.an_page != pg)
3741 			    (*pr)("  >>> ANON DOES NOT POINT HERE <<< (%p)\n",
3742 				(pg->uanon) ? pg->uanon->u.an_page : NULL);
3743 			else
3744 				(*pr)("  anon backpointer is OK\n");
3745 		} else {
3746 			uobj = pg->uobject;
3747 			if (uobj) {
3748 				(*pr)("  checking object list\n");
3749 				TAILQ_FOREACH(tpg, &uobj->memq, listq) {
3750 					if (tpg == pg) {
3751 						break;
3752 					}
3753 				}
3754 				if (tpg)
3755 					(*pr)("  page found on object list\n");
3756 				else
3757 			(*pr)("  >>> PAGE NOT FOUND ON OBJECT LIST! <<<\n");
3758 			}
3759 		}
3760 	}
3761 
3762 	/* cross-verify page queue */
3763 	if (pg->pqflags & PQ_FREE) {
3764 		int fl = uvm_page_lookup_freelist(pg);
3765 		pgl = &uvm.page_free[fl].pgfl_queues[((pg)->flags & PG_ZERO) ?
3766 		    PGFL_ZEROS : PGFL_UNKNOWN];
3767 	} else if (pg->pqflags & PQ_INACTIVE) {
3768 		pgl = (pg->pqflags & PQ_SWAPBACKED) ?
3769 		    &uvm.page_inactive_swp : &uvm.page_inactive_obj;
3770 	} else if (pg->pqflags & PQ_ACTIVE) {
3771 		pgl = &uvm.page_active;
3772  	} else {
3773 		pgl = NULL;
3774 	}
3775 
3776 	if (pgl) {
3777 		(*pr)("  checking pageq list\n");
3778 		TAILQ_FOREACH(tpg, pgl, pageq) {
3779 			if (tpg == pg) {
3780 				break;
3781 			}
3782 		}
3783 		if (tpg)
3784 			(*pr)("  page found on pageq list\n");
3785 		else
3786 			(*pr)("  >>> PAGE NOT FOUND ON PAGEQ LIST! <<<\n");
3787 	}
3788 }
3789 #endif
3790