xref: /csrg-svn/sys/vm/vm_map.c (revision 68938)
145748Smckusick /*
263379Sbostic  * Copyright (c) 1991, 1993
363379Sbostic  *	The Regents of the University of California.  All rights reserved.
445748Smckusick  *
545748Smckusick  * This code is derived from software contributed to Berkeley by
645748Smckusick  * The Mach Operating System project at Carnegie-Mellon University.
745748Smckusick  *
848493Smckusick  * %sccs.include.redist.c%
945748Smckusick  *
10*68938Smckusick  *	@(#)vm_map.c	8.6 (Berkeley) 04/27/95
1148493Smckusick  *
1248493Smckusick  *
1348493Smckusick  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
1448493Smckusick  * All rights reserved.
1548493Smckusick  *
1648493Smckusick  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
1748493Smckusick  *
1848493Smckusick  * Permission to use, copy, modify and distribute this software and
1948493Smckusick  * its documentation is hereby granted, provided that both the copyright
2048493Smckusick  * notice and this permission notice appear in all copies of the
2148493Smckusick  * software, derivative works or modified versions, and any portions
2248493Smckusick  * thereof, and that both notices appear in supporting documentation.
2348493Smckusick  *
2448493Smckusick  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
2548493Smckusick  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
2648493Smckusick  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
2748493Smckusick  *
2848493Smckusick  * Carnegie Mellon requests users of this software to return to
2948493Smckusick  *
3048493Smckusick  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
3148493Smckusick  *  School of Computer Science
3248493Smckusick  *  Carnegie Mellon University
3348493Smckusick  *  Pittsburgh PA 15213-3890
3448493Smckusick  *
3548493Smckusick  * any improvements or extensions that they make and grant Carnegie the
3648493Smckusick  * rights to redistribute these changes.
3745748Smckusick  */
3845748Smckusick 
3945748Smckusick /*
4045748Smckusick  *	Virtual memory mapping module.
4145748Smckusick  */
4245748Smckusick 
4353357Sbostic #include <sys/param.h>
4453357Sbostic #include <sys/systm.h>
4553357Sbostic #include <sys/malloc.h>
4645748Smckusick 
4753357Sbostic #include <vm/vm.h>
4853357Sbostic #include <vm/vm_page.h>
4953357Sbostic 
5045748Smckusick /*
5145748Smckusick  *	Virtual memory maps provide for the mapping, protection,
5245748Smckusick  *	and sharing of virtual memory objects.  In addition,
5345748Smckusick  *	this module provides for an efficient virtual copy of
5445748Smckusick  *	memory from one map to another.
5545748Smckusick  *
5645748Smckusick  *	Synchronization is required prior to most operations.
5745748Smckusick  *
5845748Smckusick  *	Maps consist of an ordered doubly-linked list of simple
5945748Smckusick  *	entries; a single hint is used to speed up lookups.
6045748Smckusick  *
6145748Smckusick  *	In order to properly represent the sharing of virtual
6245748Smckusick  *	memory regions among maps, the map structure is bi-level.
6345748Smckusick  *	Top-level ("address") maps refer to regions of sharable
6445748Smckusick  *	virtual memory.  These regions are implemented as
6545748Smckusick  *	("sharing") maps, which then refer to the actual virtual
6645748Smckusick  *	memory objects.  When two address maps "share" memory,
6745748Smckusick  *	their top-level maps both have references to the same
6845748Smckusick  *	sharing map.  When memory is virtual-copied from one
6945748Smckusick  *	address map to another, the references in the sharing
7045748Smckusick  *	maps are actually copied -- no copying occurs at the
7145748Smckusick  *	virtual memory object level.
7245748Smckusick  *
7345748Smckusick  *	Since portions of maps are specified by start/end addreses,
7445748Smckusick  *	which may not align with existing map entries, all
7545748Smckusick  *	routines merely "clip" entries to these start/end values.
7645748Smckusick  *	[That is, an entry is split into two, bordering at a
7745748Smckusick  *	start or end value.]  Note that these clippings may not
7845748Smckusick  *	always be necessary (as the two resulting entries are then
7945748Smckusick  *	not changed); however, the clipping is done for convenience.
8045748Smckusick  *	No attempt is currently made to "glue back together" two
8145748Smckusick  *	abutting entries.
8245748Smckusick  *
8345748Smckusick  *	As mentioned above, virtual copy operations are performed
8445748Smckusick  *	by copying VM object references from one sharing map to
8545748Smckusick  *	another, and then marking both regions as copy-on-write.
8645748Smckusick  *	It is important to note that only one writeable reference
8745748Smckusick  *	to a VM object region exists in any map -- this means that
8845748Smckusick  *	shadow object creation can be delayed until a write operation
8945748Smckusick  *	occurs.
9045748Smckusick  */
9145748Smckusick 
9245748Smckusick /*
9348383Skarels  *	vm_map_startup:
9445748Smckusick  *
9545748Smckusick  *	Initialize the vm_map module.  Must be called before
9645748Smckusick  *	any other vm_map routines.
9745748Smckusick  *
9845748Smckusick  *	Map and entry structures are allocated from the general
9945748Smckusick  *	purpose memory pool with some exceptions:
10045748Smckusick  *
10145748Smckusick  *	- The kernel map and kmem submap are allocated statically.
10245748Smckusick  *	- Kernel map entries are allocated out of a static pool.
10345748Smckusick  *
10445748Smckusick  *	These restrictions are necessary since malloc() uses the
10545748Smckusick  *	maps and requires map entries.
10645748Smckusick  */
10745748Smckusick 
10845748Smckusick vm_offset_t	kentry_data;
10945748Smckusick vm_size_t	kentry_data_size;
11045748Smckusick vm_map_entry_t	kentry_free;
11145748Smckusick vm_map_t	kmap_free;
11245748Smckusick 
11353357Sbostic static void	_vm_map_clip_end __P((vm_map_t, vm_map_entry_t, vm_offset_t));
11453357Sbostic static void	_vm_map_clip_start __P((vm_map_t, vm_map_entry_t, vm_offset_t));
11553357Sbostic 
11668162Scgd void
11768162Scgd vm_map_startup()
11845748Smckusick {
11945748Smckusick 	register int i;
12045748Smckusick 	register vm_map_entry_t mep;
12145748Smckusick 	vm_map_t mp;
12245748Smckusick 
12345748Smckusick 	/*
12445748Smckusick 	 * Static map structures for allocation before initialization of
12545748Smckusick 	 * kernel map or kmem map.  vm_map_create knows how to deal with them.
12645748Smckusick 	 */
12745748Smckusick 	kmap_free = mp = (vm_map_t) kentry_data;
12845748Smckusick 	i = MAX_KMAP;
12945748Smckusick 	while (--i > 0) {
13045748Smckusick 		mp->header.next = (vm_map_entry_t) (mp + 1);
13145748Smckusick 		mp++;
13245748Smckusick 	}
13348383Skarels 	mp++->header.next = NULL;
13445748Smckusick 
13545748Smckusick 	/*
13645748Smckusick 	 * Form a free list of statically allocated kernel map entries
13745748Smckusick 	 * with the rest.
13845748Smckusick 	 */
13945748Smckusick 	kentry_free = mep = (vm_map_entry_t) mp;
14045748Smckusick 	i = (kentry_data_size - MAX_KMAP * sizeof *mp) / sizeof *mep;
14145748Smckusick 	while (--i > 0) {
14245748Smckusick 		mep->next = mep + 1;
14345748Smckusick 		mep++;
14445748Smckusick 	}
14548383Skarels 	mep->next = NULL;
14645748Smckusick }
14745748Smckusick 
14845748Smckusick /*
14948383Skarels  * Allocate a vmspace structure, including a vm_map and pmap,
15048383Skarels  * and initialize those structures.  The refcnt is set to 1.
15148383Skarels  * The remaining fields must be initialized by the caller.
15248383Skarels  */
15348383Skarels struct vmspace *
15448383Skarels vmspace_alloc(min, max, pageable)
15548383Skarels 	vm_offset_t min, max;
15648383Skarels 	int pageable;
15748383Skarels {
15848383Skarels 	register struct vmspace *vm;
15948383Skarels 
16048383Skarels 	MALLOC(vm, struct vmspace *, sizeof(struct vmspace), M_VMMAP, M_WAITOK);
16148383Skarels 	bzero(vm, (caddr_t) &vm->vm_startcopy - (caddr_t) vm);
16248383Skarels 	vm_map_init(&vm->vm_map, min, max, pageable);
16348383Skarels 	pmap_pinit(&vm->vm_pmap);
16448383Skarels 	vm->vm_map.pmap = &vm->vm_pmap;		/* XXX */
16548383Skarels 	vm->vm_refcnt = 1;
16648383Skarels 	return (vm);
16748383Skarels }
16848383Skarels 
16948383Skarels void
17048383Skarels vmspace_free(vm)
17148383Skarels 	register struct vmspace *vm;
17248383Skarels {
17348383Skarels 
17448383Skarels 	if (--vm->vm_refcnt == 0) {
17548383Skarels 		/*
17648383Skarels 		 * Lock the map, to wait out all other references to it.
17748383Skarels 		 * Delete all of the mappings and pages they hold,
17848383Skarels 		 * then call the pmap module to reclaim anything left.
17948383Skarels 		 */
18048383Skarels 		vm_map_lock(&vm->vm_map);
18148383Skarels 		(void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
18248383Skarels 		    vm->vm_map.max_offset);
18348383Skarels 		pmap_release(&vm->vm_pmap);
18448383Skarels 		FREE(vm, M_VMMAP);
18548383Skarels 	}
18648383Skarels }
18748383Skarels 
18848383Skarels /*
18945748Smckusick  *	vm_map_create:
19045748Smckusick  *
19145748Smckusick  *	Creates and returns a new empty VM map with
19245748Smckusick  *	the given physical map structure, and having
19345748Smckusick  *	the given lower and upper address bounds.
19445748Smckusick  */
19568162Scgd vm_map_t
19668162Scgd vm_map_create(pmap, min, max, pageable)
19745748Smckusick 	pmap_t		pmap;
19845748Smckusick 	vm_offset_t	min, max;
19945748Smckusick 	boolean_t	pageable;
20045748Smckusick {
20145748Smckusick 	register vm_map_t	result;
20265479Sbostic 	extern vm_map_t		kmem_map;
20345748Smckusick 
20448383Skarels 	if (kmem_map == NULL) {
20545748Smckusick 		result = kmap_free;
20648383Skarels 		if (result == NULL)
20748383Skarels 			panic("vm_map_create: out of maps");
20868162Scgd 		kmap_free = (vm_map_t) result->header.next;
20945748Smckusick 	} else
21045748Smckusick 		MALLOC(result, vm_map_t, sizeof(struct vm_map),
21145748Smckusick 		       M_VMMAP, M_WAITOK);
21245748Smckusick 
21348383Skarels 	vm_map_init(result, min, max, pageable);
21445748Smckusick 	result->pmap = pmap;
21545748Smckusick 	return(result);
21645748Smckusick }
21745748Smckusick 
21845748Smckusick /*
21948383Skarels  * Initialize an existing vm_map structure
22048383Skarels  * such as that in the vmspace structure.
22148383Skarels  * The pmap is set elsewhere.
22248383Skarels  */
22348383Skarels void
22448383Skarels vm_map_init(map, min, max, pageable)
22548383Skarels 	register struct vm_map *map;
22648383Skarels 	vm_offset_t	min, max;
22748383Skarels 	boolean_t	pageable;
22848383Skarels {
22948383Skarels 	map->header.next = map->header.prev = &map->header;
23048383Skarels 	map->nentries = 0;
23148383Skarels 	map->size = 0;
23248383Skarels 	map->ref_count = 1;
23348383Skarels 	map->is_main_map = TRUE;
23448383Skarels 	map->min_offset = min;
23548383Skarels 	map->max_offset = max;
23648383Skarels 	map->entries_pageable = pageable;
23748383Skarels 	map->first_free = &map->header;
23848383Skarels 	map->hint = &map->header;
23948383Skarels 	map->timestamp = 0;
24068795Smckusick 	lock_init(&map->lock, PVM, "thrd_sleep", 0, 0);
24148383Skarels 	simple_lock_init(&map->ref_lock);
24248383Skarels 	simple_lock_init(&map->hint_lock);
24348383Skarels }
24448383Skarels 
24548383Skarels /*
24645748Smckusick  *	vm_map_entry_create:	[ internal use only ]
24745748Smckusick  *
24845748Smckusick  *	Allocates a VM map entry for insertion.
24945748Smckusick  *	No entry fields are filled in.  This routine is
25045748Smckusick  */
25168162Scgd vm_map_entry_t
25268162Scgd vm_map_entry_create(map)
25345748Smckusick 	vm_map_t	map;
25445748Smckusick {
25545748Smckusick 	vm_map_entry_t	entry;
25665686Shibler #ifdef DEBUG
25758374Smckusick 	extern vm_map_t		kernel_map, kmem_map, mb_map, pager_map;
25865686Shibler 	boolean_t		isspecial;
25945748Smckusick 
26065686Shibler 	isspecial = (map == kernel_map || map == kmem_map ||
26165686Shibler 		     map == mb_map || map == pager_map);
26265686Shibler 	if (isspecial && map->entries_pageable ||
26365686Shibler 	    !isspecial && !map->entries_pageable)
26465686Shibler 		panic("vm_map_entry_create: bogus map");
26565686Shibler #endif
26665686Shibler 	if (map->entries_pageable) {
26765686Shibler 		MALLOC(entry, vm_map_entry_t, sizeof(struct vm_map_entry),
26865686Shibler 		       M_VMMAPENT, M_WAITOK);
26965686Shibler 	} else {
27045748Smckusick 		if (entry = kentry_free)
27145748Smckusick 			kentry_free = kentry_free->next;
27265686Shibler 	}
27348383Skarels 	if (entry == NULL)
27445748Smckusick 		panic("vm_map_entry_create: out of map entries");
27545748Smckusick 
27645748Smckusick 	return(entry);
27745748Smckusick }
27845748Smckusick 
27945748Smckusick /*
28045748Smckusick  *	vm_map_entry_dispose:	[ internal use only ]
28145748Smckusick  *
28245748Smckusick  *	Inverse of vm_map_entry_create.
28345748Smckusick  */
28468162Scgd void
28568162Scgd vm_map_entry_dispose(map, entry)
28645748Smckusick 	vm_map_t	map;
28745748Smckusick 	vm_map_entry_t	entry;
28845748Smckusick {
28965686Shibler #ifdef DEBUG
29058374Smckusick 	extern vm_map_t		kernel_map, kmem_map, mb_map, pager_map;
29165686Shibler 	boolean_t		isspecial;
29245748Smckusick 
29365686Shibler 	isspecial = (map == kernel_map || map == kmem_map ||
29465686Shibler 		     map == mb_map || map == pager_map);
29565686Shibler 	if (isspecial && map->entries_pageable ||
29665686Shibler 	    !isspecial && !map->entries_pageable)
29765686Shibler 		panic("vm_map_entry_dispose: bogus map");
29865686Shibler #endif
29965686Shibler 	if (map->entries_pageable) {
30065686Shibler 		FREE(entry, M_VMMAPENT);
30165686Shibler 	} else {
30245748Smckusick 		entry->next = kentry_free;
30345748Smckusick 		kentry_free = entry;
30465686Shibler 	}
30545748Smckusick }
30645748Smckusick 
30745748Smckusick /*
30845748Smckusick  *	vm_map_entry_{un,}link:
30945748Smckusick  *
31045748Smckusick  *	Insert/remove entries from maps.
31145748Smckusick  */
31245748Smckusick #define	vm_map_entry_link(map, after_where, entry) \
31345748Smckusick 		{ \
31445748Smckusick 		(map)->nentries++; \
31545748Smckusick 		(entry)->prev = (after_where); \
31645748Smckusick 		(entry)->next = (after_where)->next; \
31745748Smckusick 		(entry)->prev->next = (entry); \
31845748Smckusick 		(entry)->next->prev = (entry); \
31945748Smckusick 		}
32045748Smckusick #define	vm_map_entry_unlink(map, entry) \
32145748Smckusick 		{ \
32245748Smckusick 		(map)->nentries--; \
32345748Smckusick 		(entry)->next->prev = (entry)->prev; \
32445748Smckusick 		(entry)->prev->next = (entry)->next; \
32545748Smckusick 		}
32645748Smckusick 
32745748Smckusick /*
32845748Smckusick  *	vm_map_reference:
32945748Smckusick  *
33045748Smckusick  *	Creates another valid reference to the given map.
33145748Smckusick  *
33245748Smckusick  */
33368162Scgd void
33468162Scgd vm_map_reference(map)
33545748Smckusick 	register vm_map_t	map;
33645748Smckusick {
33748383Skarels 	if (map == NULL)
33845748Smckusick 		return;
33945748Smckusick 
34045748Smckusick 	simple_lock(&map->ref_lock);
34145748Smckusick 	map->ref_count++;
34245748Smckusick 	simple_unlock(&map->ref_lock);
34345748Smckusick }
34445748Smckusick 
34545748Smckusick /*
34645748Smckusick  *	vm_map_deallocate:
34745748Smckusick  *
34845748Smckusick  *	Removes a reference from the specified map,
34945748Smckusick  *	destroying it if no references remain.
35045748Smckusick  *	The map should not be locked.
35145748Smckusick  */
35268162Scgd void
35368162Scgd vm_map_deallocate(map)
35445748Smckusick 	register vm_map_t	map;
35545748Smckusick {
35645748Smckusick 	register int		c;
35745748Smckusick 
35848383Skarels 	if (map == NULL)
35945748Smckusick 		return;
36045748Smckusick 
36145748Smckusick 	simple_lock(&map->ref_lock);
36245748Smckusick 	c = --map->ref_count;
36345748Smckusick 	simple_unlock(&map->ref_lock);
36445748Smckusick 
36545748Smckusick 	if (c > 0) {
36645748Smckusick 		return;
36745748Smckusick 	}
36845748Smckusick 
36945748Smckusick 	/*
37045748Smckusick 	 *	Lock the map, to wait out all other references
37145748Smckusick 	 *	to it.
37245748Smckusick 	 */
37345748Smckusick 
37445748Smckusick 	vm_map_lock(map);
37545748Smckusick 
37645748Smckusick 	(void) vm_map_delete(map, map->min_offset, map->max_offset);
37745748Smckusick 
37845748Smckusick 	pmap_destroy(map->pmap);
37945748Smckusick 
38045748Smckusick 	FREE(map, M_VMMAP);
38145748Smckusick }
38245748Smckusick 
38345748Smckusick /*
38465686Shibler  *	vm_map_insert:
38545748Smckusick  *
38645748Smckusick  *	Inserts the given whole VM object into the target
38745748Smckusick  *	map at the specified address range.  The object's
38845748Smckusick  *	size should match that of the address range.
38945748Smckusick  *
39045748Smckusick  *	Requires that the map be locked, and leaves it so.
39145748Smckusick  */
39253357Sbostic int
39345748Smckusick vm_map_insert(map, object, offset, start, end)
39445748Smckusick 	vm_map_t	map;
39545748Smckusick 	vm_object_t	object;
39645748Smckusick 	vm_offset_t	offset;
39745748Smckusick 	vm_offset_t	start;
39845748Smckusick 	vm_offset_t	end;
39945748Smckusick {
40045748Smckusick 	register vm_map_entry_t		new_entry;
40145748Smckusick 	register vm_map_entry_t		prev_entry;
40245748Smckusick 	vm_map_entry_t			temp_entry;
40345748Smckusick 
40445748Smckusick 	/*
40545748Smckusick 	 *	Check that the start and end points are not bogus.
40645748Smckusick 	 */
40745748Smckusick 
40845748Smckusick 	if ((start < map->min_offset) || (end > map->max_offset) ||
40945748Smckusick 			(start >= end))
41045748Smckusick 		return(KERN_INVALID_ADDRESS);
41145748Smckusick 
41245748Smckusick 	/*
41345748Smckusick 	 *	Find the entry prior to the proposed
41445748Smckusick 	 *	starting address; if it's part of an
41545748Smckusick 	 *	existing entry, this range is bogus.
41645748Smckusick 	 */
41745748Smckusick 
41845748Smckusick 	if (vm_map_lookup_entry(map, start, &temp_entry))
41945748Smckusick 		return(KERN_NO_SPACE);
42045748Smckusick 
42145748Smckusick 	prev_entry = temp_entry;
42245748Smckusick 
42345748Smckusick 	/*
42445748Smckusick 	 *	Assert that the next entry doesn't overlap the
42545748Smckusick 	 *	end point.
42645748Smckusick 	 */
42745748Smckusick 
42845748Smckusick 	if ((prev_entry->next != &map->header) &&
42945748Smckusick 			(prev_entry->next->start < end))
43045748Smckusick 		return(KERN_NO_SPACE);
43145748Smckusick 
43245748Smckusick 	/*
43345748Smckusick 	 *	See if we can avoid creating a new entry by
43445748Smckusick 	 *	extending one of our neighbors.
43545748Smckusick 	 */
43645748Smckusick 
43748383Skarels 	if (object == NULL) {
43845748Smckusick 		if ((prev_entry != &map->header) &&
43945748Smckusick 		    (prev_entry->end == start) &&
44045748Smckusick 		    (map->is_main_map) &&
44145748Smckusick 		    (prev_entry->is_a_map == FALSE) &&
44245748Smckusick 		    (prev_entry->is_sub_map == FALSE) &&
44345748Smckusick 		    (prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
44445748Smckusick 		    (prev_entry->protection == VM_PROT_DEFAULT) &&
44545748Smckusick 		    (prev_entry->max_protection == VM_PROT_DEFAULT) &&
44645748Smckusick 		    (prev_entry->wired_count == 0)) {
44745748Smckusick 
44845748Smckusick 			if (vm_object_coalesce(prev_entry->object.vm_object,
44948383Skarels 					NULL,
45045748Smckusick 					prev_entry->offset,
45145748Smckusick 					(vm_offset_t) 0,
45245748Smckusick 					(vm_size_t)(prev_entry->end
45345748Smckusick 						     - prev_entry->start),
45445748Smckusick 					(vm_size_t)(end - prev_entry->end))) {
45545748Smckusick 				/*
45645748Smckusick 				 *	Coalesced the two objects - can extend
45745748Smckusick 				 *	the previous map entry to include the
45845748Smckusick 				 *	new range.
45945748Smckusick 				 */
46045748Smckusick 				map->size += (end - prev_entry->end);
46145748Smckusick 				prev_entry->end = end;
46245748Smckusick 				return(KERN_SUCCESS);
46345748Smckusick 			}
46445748Smckusick 		}
46545748Smckusick 	}
46645748Smckusick 
46745748Smckusick 	/*
46845748Smckusick 	 *	Create a new entry
46945748Smckusick 	 */
47045748Smckusick 
47145748Smckusick 	new_entry = vm_map_entry_create(map);
47245748Smckusick 	new_entry->start = start;
47345748Smckusick 	new_entry->end = end;
47445748Smckusick 
47545748Smckusick 	new_entry->is_a_map = FALSE;
47645748Smckusick 	new_entry->is_sub_map = FALSE;
47745748Smckusick 	new_entry->object.vm_object = object;
47845748Smckusick 	new_entry->offset = offset;
47945748Smckusick 
48045748Smckusick 	new_entry->copy_on_write = FALSE;
48145748Smckusick 	new_entry->needs_copy = FALSE;
48245748Smckusick 
48345748Smckusick 	if (map->is_main_map) {
48445748Smckusick 		new_entry->inheritance = VM_INHERIT_DEFAULT;
48545748Smckusick 		new_entry->protection = VM_PROT_DEFAULT;
48645748Smckusick 		new_entry->max_protection = VM_PROT_DEFAULT;
48745748Smckusick 		new_entry->wired_count = 0;
48845748Smckusick 	}
48945748Smckusick 
49045748Smckusick 	/*
49145748Smckusick 	 *	Insert the new entry into the list
49245748Smckusick 	 */
49345748Smckusick 
49445748Smckusick 	vm_map_entry_link(map, prev_entry, new_entry);
49545748Smckusick 	map->size += new_entry->end - new_entry->start;
49645748Smckusick 
49745748Smckusick 	/*
49845748Smckusick 	 *	Update the free space hint
49945748Smckusick 	 */
50045748Smckusick 
50145748Smckusick 	if ((map->first_free == prev_entry) && (prev_entry->end >= new_entry->start))
50245748Smckusick 		map->first_free = new_entry;
50345748Smckusick 
50445748Smckusick 	return(KERN_SUCCESS);
50545748Smckusick }
50645748Smckusick 
50745748Smckusick /*
50845748Smckusick  *	SAVE_HINT:
50945748Smckusick  *
51045748Smckusick  *	Saves the specified entry as the hint for
51145748Smckusick  *	future lookups.  Performs necessary interlocks.
51245748Smckusick  */
51345748Smckusick #define	SAVE_HINT(map,value) \
51445748Smckusick 		simple_lock(&(map)->hint_lock); \
51545748Smckusick 		(map)->hint = (value); \
51645748Smckusick 		simple_unlock(&(map)->hint_lock);
51745748Smckusick 
51845748Smckusick /*
51945748Smckusick  *	vm_map_lookup_entry:	[ internal use only ]
52045748Smckusick  *
52145748Smckusick  *	Finds the map entry containing (or
52245748Smckusick  *	immediately preceding) the specified address
52345748Smckusick  *	in the given map; the entry is returned
52445748Smckusick  *	in the "entry" parameter.  The boolean
52545748Smckusick  *	result indicates whether the address is
52645748Smckusick  *	actually contained in the map.
52745748Smckusick  */
52868162Scgd boolean_t
52968162Scgd vm_map_lookup_entry(map, address, entry)
53045748Smckusick 	register vm_map_t	map;
53145748Smckusick 	register vm_offset_t	address;
53245748Smckusick 	vm_map_entry_t		*entry;		/* OUT */
53345748Smckusick {
53445748Smckusick 	register vm_map_entry_t		cur;
53545748Smckusick 	register vm_map_entry_t		last;
53645748Smckusick 
53745748Smckusick 	/*
53845748Smckusick 	 *	Start looking either from the head of the
53945748Smckusick 	 *	list, or from the hint.
54045748Smckusick 	 */
54145748Smckusick 
54245748Smckusick 	simple_lock(&map->hint_lock);
54345748Smckusick 	cur = map->hint;
54445748Smckusick 	simple_unlock(&map->hint_lock);
54545748Smckusick 
54645748Smckusick 	if (cur == &map->header)
54745748Smckusick 		cur = cur->next;
54845748Smckusick 
54945748Smckusick 	if (address >= cur->start) {
55045748Smckusick 	    	/*
55145748Smckusick 		 *	Go from hint to end of list.
55245748Smckusick 		 *
55345748Smckusick 		 *	But first, make a quick check to see if
55445748Smckusick 		 *	we are already looking at the entry we
55545748Smckusick 		 *	want (which is usually the case).
55645748Smckusick 		 *	Note also that we don't need to save the hint
55745748Smckusick 		 *	here... it is the same hint (unless we are
55845748Smckusick 		 *	at the header, in which case the hint didn't
55945748Smckusick 		 *	buy us anything anyway).
56045748Smckusick 		 */
56145748Smckusick 		last = &map->header;
56245748Smckusick 		if ((cur != last) && (cur->end > address)) {
56345748Smckusick 			*entry = cur;
56445748Smckusick 			return(TRUE);
56545748Smckusick 		}
56645748Smckusick 	}
56745748Smckusick 	else {
56845748Smckusick 	    	/*
56945748Smckusick 		 *	Go from start to hint, *inclusively*
57045748Smckusick 		 */
57145748Smckusick 		last = cur->next;
57245748Smckusick 		cur = map->header.next;
57345748Smckusick 	}
57445748Smckusick 
57545748Smckusick 	/*
57645748Smckusick 	 *	Search linearly
57745748Smckusick 	 */
57845748Smckusick 
57945748Smckusick 	while (cur != last) {
58045748Smckusick 		if (cur->end > address) {
58145748Smckusick 			if (address >= cur->start) {
58245748Smckusick 			    	/*
58345748Smckusick 				 *	Save this lookup for future
58445748Smckusick 				 *	hints, and return
58545748Smckusick 				 */
58645748Smckusick 
58745748Smckusick 				*entry = cur;
58845748Smckusick 				SAVE_HINT(map, cur);
58945748Smckusick 				return(TRUE);
59045748Smckusick 			}
59145748Smckusick 			break;
59245748Smckusick 		}
59345748Smckusick 		cur = cur->next;
59445748Smckusick 	}
59545748Smckusick 	*entry = cur->prev;
59645748Smckusick 	SAVE_HINT(map, *entry);
59745748Smckusick 	return(FALSE);
59845748Smckusick }
59945748Smckusick 
60045748Smckusick /*
60152610Storek  * Find sufficient space for `length' bytes in the given map, starting at
60252610Storek  * `start'.  The map must be locked.  Returns 0 on success, 1 on no space.
60352610Storek  */
60452610Storek int
60552610Storek vm_map_findspace(map, start, length, addr)
60652610Storek 	register vm_map_t map;
60752610Storek 	register vm_offset_t start;
60852610Storek 	vm_size_t length;
60952610Storek 	vm_offset_t *addr;
61052610Storek {
61152610Storek 	register vm_map_entry_t entry, next;
61252610Storek 	register vm_offset_t end;
61352610Storek 
61452610Storek 	if (start < map->min_offset)
61552610Storek 		start = map->min_offset;
61652610Storek 	if (start > map->max_offset)
61752610Storek 		return (1);
61852610Storek 
61952610Storek 	/*
62052610Storek 	 * Look for the first possible address; if there's already
62152610Storek 	 * something at this address, we have to start after it.
62252610Storek 	 */
62352610Storek 	if (start == map->min_offset) {
62452610Storek 		if ((entry = map->first_free) != &map->header)
62552610Storek 			start = entry->end;
62652610Storek 	} else {
62752610Storek 		vm_map_entry_t tmp;
62852610Storek 		if (vm_map_lookup_entry(map, start, &tmp))
62952610Storek 			start = tmp->end;
63052610Storek 		entry = tmp;
63152610Storek 	}
63252610Storek 
63352610Storek 	/*
63452610Storek 	 * Look through the rest of the map, trying to fit a new region in
63552610Storek 	 * the gap between existing regions, or after the very last region.
63652610Storek 	 */
63752610Storek 	for (;; start = (entry = next)->end) {
63852610Storek 		/*
63952610Storek 		 * Find the end of the proposed new region.  Be sure we didn't
64052610Storek 		 * go beyond the end of the map, or wrap around the address;
64152610Storek 		 * if so, we lose.  Otherwise, if this is the last entry, or
64252610Storek 		 * if the proposed new region fits before the next entry, we
64352610Storek 		 * win.
64452610Storek 		 */
64552610Storek 		end = start + length;
64652610Storek 		if (end > map->max_offset || end < start)
64752610Storek 			return (1);
64852610Storek 		next = entry->next;
64952610Storek 		if (next == &map->header || next->start >= end)
65052610Storek 			break;
65152610Storek 	}
65252610Storek 	SAVE_HINT(map, entry);
65352610Storek 	*addr = start;
65452610Storek 	return (0);
65552610Storek }
65652610Storek 
65752610Storek /*
65845748Smckusick  *	vm_map_find finds an unallocated region in the target address
65945748Smckusick  *	map with the given length.  The search is defined to be
66045748Smckusick  *	first-fit from the specified address; the region found is
66145748Smckusick  *	returned in the same parameter.
66245748Smckusick  *
66345748Smckusick  */
66453357Sbostic int
66545748Smckusick vm_map_find(map, object, offset, addr, length, find_space)
66645748Smckusick 	vm_map_t	map;
66745748Smckusick 	vm_object_t	object;
66845748Smckusick 	vm_offset_t	offset;
66945748Smckusick 	vm_offset_t	*addr;		/* IN/OUT */
67045748Smckusick 	vm_size_t	length;
67145748Smckusick 	boolean_t	find_space;
67245748Smckusick {
67345748Smckusick 	register vm_offset_t	start;
67445748Smckusick 	int			result;
67545748Smckusick 
67645748Smckusick 	start = *addr;
67745748Smckusick 	vm_map_lock(map);
67845748Smckusick 	if (find_space) {
67952610Storek 		if (vm_map_findspace(map, start, length, addr)) {
68045748Smckusick 			vm_map_unlock(map);
68145748Smckusick 			return (KERN_NO_SPACE);
68245748Smckusick 		}
68352610Storek 		start = *addr;
68445748Smckusick 	}
68545748Smckusick 	result = vm_map_insert(map, object, offset, start, start + length);
68645748Smckusick 	vm_map_unlock(map);
68752610Storek 	return (result);
68845748Smckusick }
68945748Smckusick 
69045748Smckusick /*
69145748Smckusick  *	vm_map_simplify_entry:	[ internal use only ]
69245748Smckusick  *
69345748Smckusick  *	Simplify the given map entry by:
69445748Smckusick  *		removing extra sharing maps
69545748Smckusick  *		[XXX maybe later] merging with a neighbor
69645748Smckusick  */
69768162Scgd void
69868162Scgd vm_map_simplify_entry(map, entry)
69945748Smckusick 	vm_map_t	map;
70045748Smckusick 	vm_map_entry_t	entry;
70145748Smckusick {
70245748Smckusick #ifdef	lint
70345748Smckusick 	map++;
70460345Storek #endif
70545748Smckusick 
70645748Smckusick 	/*
70745748Smckusick 	 *	If this entry corresponds to a sharing map, then
70845748Smckusick 	 *	see if we can remove the level of indirection.
70945748Smckusick 	 *	If it's not a sharing map, then it points to
71045748Smckusick 	 *	a VM object, so see if we can merge with either
71145748Smckusick 	 *	of our neighbors.
71245748Smckusick 	 */
71345748Smckusick 
71445748Smckusick 	if (entry->is_sub_map)
71545748Smckusick 		return;
71645748Smckusick 	if (entry->is_a_map) {
71745748Smckusick #if	0
71845748Smckusick 		vm_map_t	my_share_map;
71945748Smckusick 		int		count;
72045748Smckusick 
72145748Smckusick 		my_share_map = entry->object.share_map;
72245748Smckusick 		simple_lock(&my_share_map->ref_lock);
72345748Smckusick 		count = my_share_map->ref_count;
72445748Smckusick 		simple_unlock(&my_share_map->ref_lock);
72545748Smckusick 
72645748Smckusick 		if (count == 1) {
72745748Smckusick 			/* Can move the region from
72845748Smckusick 			 * entry->start to entry->end (+ entry->offset)
72945748Smckusick 			 * in my_share_map into place of entry.
73045748Smckusick 			 * Later.
73145748Smckusick 			 */
73245748Smckusick 		}
73360345Storek #endif
73445748Smckusick 	}
73545748Smckusick 	else {
73645748Smckusick 		/*
73745748Smckusick 		 *	Try to merge with our neighbors.
73845748Smckusick 		 *
73945748Smckusick 		 *	Conditions for merge are:
74045748Smckusick 		 *
74145748Smckusick 		 *	1.  entries are adjacent.
74245748Smckusick 		 *	2.  both entries point to objects
74345748Smckusick 		 *	    with null pagers.
74445748Smckusick 		 *
74545748Smckusick 		 * 	If a merge is possible, we replace the two
74645748Smckusick 		 *	entries with a single entry, then merge
74745748Smckusick 		 *	the two objects into a single object.
74845748Smckusick 		 *
74945748Smckusick 		 *	Now, all that is left to do is write the
75045748Smckusick 		 *	code!
75145748Smckusick 		 */
75245748Smckusick 	}
75345748Smckusick }
75445748Smckusick 
75545748Smckusick /*
75645748Smckusick  *	vm_map_clip_start:	[ internal use only ]
75745748Smckusick  *
75845748Smckusick  *	Asserts that the given entry begins at or after
75945748Smckusick  *	the specified address; if necessary,
76045748Smckusick  *	it splits the entry into two.
76145748Smckusick  */
76245748Smckusick #define vm_map_clip_start(map, entry, startaddr) \
76345748Smckusick { \
76445748Smckusick 	if (startaddr > entry->start) \
76545748Smckusick 		_vm_map_clip_start(map, entry, startaddr); \
76645748Smckusick }
76745748Smckusick 
76845748Smckusick /*
76945748Smckusick  *	This routine is called only when it is known that
77045748Smckusick  *	the entry must be split.
77145748Smckusick  */
77268162Scgd static void
77368162Scgd _vm_map_clip_start(map, entry, start)
77445748Smckusick 	register vm_map_t	map;
77545748Smckusick 	register vm_map_entry_t	entry;
77645748Smckusick 	register vm_offset_t	start;
77745748Smckusick {
77845748Smckusick 	register vm_map_entry_t	new_entry;
77945748Smckusick 
78045748Smckusick 	/*
78145748Smckusick 	 *	See if we can simplify this entry first
78245748Smckusick 	 */
78345748Smckusick 
78445748Smckusick 	vm_map_simplify_entry(map, entry);
78545748Smckusick 
78645748Smckusick 	/*
78745748Smckusick 	 *	Split off the front portion --
78845748Smckusick 	 *	note that we must insert the new
78945748Smckusick 	 *	entry BEFORE this one, so that
79045748Smckusick 	 *	this entry has the specified starting
79145748Smckusick 	 *	address.
79245748Smckusick 	 */
79345748Smckusick 
79445748Smckusick 	new_entry = vm_map_entry_create(map);
79545748Smckusick 	*new_entry = *entry;
79645748Smckusick 
79745748Smckusick 	new_entry->end = start;
79845748Smckusick 	entry->offset += (start - entry->start);
79945748Smckusick 	entry->start = start;
80045748Smckusick 
80145748Smckusick 	vm_map_entry_link(map, entry->prev, new_entry);
80245748Smckusick 
80345748Smckusick 	if (entry->is_a_map || entry->is_sub_map)
80445748Smckusick 	 	vm_map_reference(new_entry->object.share_map);
80545748Smckusick 	else
80645748Smckusick 		vm_object_reference(new_entry->object.vm_object);
80745748Smckusick }
80845748Smckusick 
80945748Smckusick /*
81045748Smckusick  *	vm_map_clip_end:	[ internal use only ]
81145748Smckusick  *
81245748Smckusick  *	Asserts that the given entry ends at or before
81345748Smckusick  *	the specified address; if necessary,
81445748Smckusick  *	it splits the entry into two.
81545748Smckusick  */
81645748Smckusick 
81745748Smckusick #define vm_map_clip_end(map, entry, endaddr) \
81845748Smckusick { \
81945748Smckusick 	if (endaddr < entry->end) \
82045748Smckusick 		_vm_map_clip_end(map, entry, endaddr); \
82145748Smckusick }
82245748Smckusick 
82345748Smckusick /*
82445748Smckusick  *	This routine is called only when it is known that
82545748Smckusick  *	the entry must be split.
82645748Smckusick  */
82768162Scgd static void
82868162Scgd _vm_map_clip_end(map, entry, end)
82945748Smckusick 	register vm_map_t	map;
83045748Smckusick 	register vm_map_entry_t	entry;
83145748Smckusick 	register vm_offset_t	end;
83245748Smckusick {
83345748Smckusick 	register vm_map_entry_t	new_entry;
83445748Smckusick 
83545748Smckusick 	/*
83645748Smckusick 	 *	Create a new entry and insert it
83745748Smckusick 	 *	AFTER the specified entry
83845748Smckusick 	 */
83945748Smckusick 
84045748Smckusick 	new_entry = vm_map_entry_create(map);
84145748Smckusick 	*new_entry = *entry;
84245748Smckusick 
84345748Smckusick 	new_entry->start = entry->end = end;
84445748Smckusick 	new_entry->offset += (end - entry->start);
84545748Smckusick 
84645748Smckusick 	vm_map_entry_link(map, entry, new_entry);
84745748Smckusick 
84845748Smckusick 	if (entry->is_a_map || entry->is_sub_map)
84945748Smckusick 	 	vm_map_reference(new_entry->object.share_map);
85045748Smckusick 	else
85145748Smckusick 		vm_object_reference(new_entry->object.vm_object);
85245748Smckusick }
85345748Smckusick 
85445748Smckusick /*
85545748Smckusick  *	VM_MAP_RANGE_CHECK:	[ internal use only ]
85645748Smckusick  *
85745748Smckusick  *	Asserts that the starting and ending region
85845748Smckusick  *	addresses fall within the valid range of the map.
85945748Smckusick  */
86045748Smckusick #define	VM_MAP_RANGE_CHECK(map, start, end)		\
86145748Smckusick 		{					\
86245748Smckusick 		if (start < vm_map_min(map))		\
86345748Smckusick 			start = vm_map_min(map);	\
86445748Smckusick 		if (end > vm_map_max(map))		\
86545748Smckusick 			end = vm_map_max(map);		\
86645748Smckusick 		if (start > end)			\
86745748Smckusick 			start = end;			\
86845748Smckusick 		}
86945748Smckusick 
87045748Smckusick /*
87145748Smckusick  *	vm_map_submap:		[ kernel use only ]
87245748Smckusick  *
87345748Smckusick  *	Mark the given range as handled by a subordinate map.
87445748Smckusick  *
87545748Smckusick  *	This range must have been created with vm_map_find,
87645748Smckusick  *	and no other operations may have been performed on this
87745748Smckusick  *	range prior to calling vm_map_submap.
87845748Smckusick  *
87945748Smckusick  *	Only a limited number of operations can be performed
88045748Smckusick  *	within this rage after calling vm_map_submap:
88145748Smckusick  *		vm_fault
88245748Smckusick  *	[Don't try vm_map_copy!]
88345748Smckusick  *
88445748Smckusick  *	To remove a submapping, one must first remove the
88545748Smckusick  *	range from the superior map, and then destroy the
88645748Smckusick  *	submap (if desired).  [Better yet, don't try it.]
88745748Smckusick  */
88853357Sbostic int
88945748Smckusick vm_map_submap(map, start, end, submap)
89045748Smckusick 	register vm_map_t	map;
89145748Smckusick 	register vm_offset_t	start;
89245748Smckusick 	register vm_offset_t	end;
89345748Smckusick 	vm_map_t		submap;
89445748Smckusick {
89545748Smckusick 	vm_map_entry_t		entry;
89645748Smckusick 	register int		result = KERN_INVALID_ARGUMENT;
89745748Smckusick 
89845748Smckusick 	vm_map_lock(map);
89945748Smckusick 
90045748Smckusick 	VM_MAP_RANGE_CHECK(map, start, end);
90145748Smckusick 
90245748Smckusick 	if (vm_map_lookup_entry(map, start, &entry)) {
90345748Smckusick 		vm_map_clip_start(map, entry, start);
90445748Smckusick 	}
90545748Smckusick 	 else
90645748Smckusick 		entry = entry->next;
90745748Smckusick 
90845748Smckusick 	vm_map_clip_end(map, entry, end);
90945748Smckusick 
91045748Smckusick 	if ((entry->start == start) && (entry->end == end) &&
91145748Smckusick 	    (!entry->is_a_map) &&
91248383Skarels 	    (entry->object.vm_object == NULL) &&
91345748Smckusick 	    (!entry->copy_on_write)) {
91445748Smckusick 		entry->is_a_map = FALSE;
91545748Smckusick 		entry->is_sub_map = TRUE;
91645748Smckusick 		vm_map_reference(entry->object.sub_map = submap);
91745748Smckusick 		result = KERN_SUCCESS;
91845748Smckusick 	}
91945748Smckusick 	vm_map_unlock(map);
92045748Smckusick 
92145748Smckusick 	return(result);
92245748Smckusick }
92345748Smckusick 
92445748Smckusick /*
92545748Smckusick  *	vm_map_protect:
92645748Smckusick  *
92745748Smckusick  *	Sets the protection of the specified address
92845748Smckusick  *	region in the target map.  If "set_max" is
92945748Smckusick  *	specified, the maximum protection is to be set;
93045748Smckusick  *	otherwise, only the current protection is affected.
93145748Smckusick  */
93253357Sbostic int
93345748Smckusick vm_map_protect(map, start, end, new_prot, set_max)
93445748Smckusick 	register vm_map_t	map;
93545748Smckusick 	register vm_offset_t	start;
93645748Smckusick 	register vm_offset_t	end;
93745748Smckusick 	register vm_prot_t	new_prot;
93845748Smckusick 	register boolean_t	set_max;
93945748Smckusick {
94045748Smckusick 	register vm_map_entry_t		current;
94145748Smckusick 	vm_map_entry_t			entry;
94245748Smckusick 
94345748Smckusick 	vm_map_lock(map);
94445748Smckusick 
94545748Smckusick 	VM_MAP_RANGE_CHECK(map, start, end);
94645748Smckusick 
94745748Smckusick 	if (vm_map_lookup_entry(map, start, &entry)) {
94845748Smckusick 		vm_map_clip_start(map, entry, start);
94945748Smckusick 	}
95045748Smckusick 	 else
95145748Smckusick 		entry = entry->next;
95245748Smckusick 
95345748Smckusick 	/*
95445748Smckusick 	 *	Make a first pass to check for protection
95545748Smckusick 	 *	violations.
95645748Smckusick 	 */
95745748Smckusick 
95845748Smckusick 	current = entry;
95945748Smckusick 	while ((current != &map->header) && (current->start < end)) {
96045748Smckusick 		if (current->is_sub_map)
96145748Smckusick 			return(KERN_INVALID_ARGUMENT);
96245748Smckusick 		if ((new_prot & current->max_protection) != new_prot) {
96345748Smckusick 			vm_map_unlock(map);
96445748Smckusick 			return(KERN_PROTECTION_FAILURE);
96545748Smckusick 		}
96645748Smckusick 
96745748Smckusick 		current = current->next;
96845748Smckusick 	}
96945748Smckusick 
97045748Smckusick 	/*
97145748Smckusick 	 *	Go back and fix up protections.
97245748Smckusick 	 *	[Note that clipping is not necessary the second time.]
97345748Smckusick 	 */
97445748Smckusick 
97545748Smckusick 	current = entry;
97645748Smckusick 
97745748Smckusick 	while ((current != &map->header) && (current->start < end)) {
97845748Smckusick 		vm_prot_t	old_prot;
97945748Smckusick 
98045748Smckusick 		vm_map_clip_end(map, current, end);
98145748Smckusick 
98245748Smckusick 		old_prot = current->protection;
98345748Smckusick 		if (set_max)
98445748Smckusick 			current->protection =
98545748Smckusick 				(current->max_protection = new_prot) &
98645748Smckusick 					old_prot;
98745748Smckusick 		else
98845748Smckusick 			current->protection = new_prot;
98945748Smckusick 
99045748Smckusick 		/*
99145748Smckusick 		 *	Update physical map if necessary.
99245748Smckusick 		 *	Worry about copy-on-write here -- CHECK THIS XXX
99345748Smckusick 		 */
99445748Smckusick 
99545748Smckusick 		if (current->protection != old_prot) {
99645748Smckusick 
99745748Smckusick #define MASK(entry)	((entry)->copy_on_write ? ~VM_PROT_WRITE : \
99845748Smckusick 							VM_PROT_ALL)
99945748Smckusick #define	max(a,b)	((a) > (b) ? (a) : (b))
100045748Smckusick 
100145748Smckusick 			if (current->is_a_map) {
100245748Smckusick 				vm_map_entry_t	share_entry;
100345748Smckusick 				vm_offset_t	share_end;
100445748Smckusick 
100545748Smckusick 				vm_map_lock(current->object.share_map);
100645748Smckusick 				(void) vm_map_lookup_entry(
100745748Smckusick 						current->object.share_map,
100845748Smckusick 						current->offset,
100945748Smckusick 						&share_entry);
101045748Smckusick 				share_end = current->offset +
101145748Smckusick 					(current->end - current->start);
101245748Smckusick 				while ((share_entry !=
101345748Smckusick 					&current->object.share_map->header) &&
101445748Smckusick 					(share_entry->start < share_end)) {
101545748Smckusick 
101645748Smckusick 					pmap_protect(map->pmap,
101745748Smckusick 						(max(share_entry->start,
101845748Smckusick 							current->offset) -
101945748Smckusick 							current->offset +
102045748Smckusick 							current->start),
102145748Smckusick 						min(share_entry->end,
102245748Smckusick 							share_end) -
102345748Smckusick 						current->offset +
102445748Smckusick 						current->start,
102545748Smckusick 						current->protection &
102645748Smckusick 							MASK(share_entry));
102745748Smckusick 
102845748Smckusick 					share_entry = share_entry->next;
102945748Smckusick 				}
103045748Smckusick 				vm_map_unlock(current->object.share_map);
103145748Smckusick 			}
103245748Smckusick 			else
103345748Smckusick 			 	pmap_protect(map->pmap, current->start,
103445748Smckusick 					current->end,
103545748Smckusick 					current->protection & MASK(entry));
103645748Smckusick #undef	max
103745748Smckusick #undef	MASK
103845748Smckusick 		}
103945748Smckusick 		current = current->next;
104045748Smckusick 	}
104145748Smckusick 
104245748Smckusick 	vm_map_unlock(map);
104345748Smckusick 	return(KERN_SUCCESS);
104445748Smckusick }
104545748Smckusick 
104645748Smckusick /*
104745748Smckusick  *	vm_map_inherit:
104845748Smckusick  *
104945748Smckusick  *	Sets the inheritance of the specified address
105045748Smckusick  *	range in the target map.  Inheritance
105145748Smckusick  *	affects how the map will be shared with
105245748Smckusick  *	child maps at the time of vm_map_fork.
105345748Smckusick  */
105453357Sbostic int
105545748Smckusick vm_map_inherit(map, start, end, new_inheritance)
105645748Smckusick 	register vm_map_t	map;
105745748Smckusick 	register vm_offset_t	start;
105845748Smckusick 	register vm_offset_t	end;
105945748Smckusick 	register vm_inherit_t	new_inheritance;
106045748Smckusick {
106145748Smckusick 	register vm_map_entry_t	entry;
106245748Smckusick 	vm_map_entry_t	temp_entry;
106345748Smckusick 
106445748Smckusick 	switch (new_inheritance) {
106545748Smckusick 	case VM_INHERIT_NONE:
106645748Smckusick 	case VM_INHERIT_COPY:
106745748Smckusick 	case VM_INHERIT_SHARE:
106845748Smckusick 		break;
106945748Smckusick 	default:
107045748Smckusick 		return(KERN_INVALID_ARGUMENT);
107145748Smckusick 	}
107245748Smckusick 
107345748Smckusick 	vm_map_lock(map);
107445748Smckusick 
107545748Smckusick 	VM_MAP_RANGE_CHECK(map, start, end);
107645748Smckusick 
107745748Smckusick 	if (vm_map_lookup_entry(map, start, &temp_entry)) {
107845748Smckusick 		entry = temp_entry;
107945748Smckusick 		vm_map_clip_start(map, entry, start);
108045748Smckusick 	}
108145748Smckusick 	else
108245748Smckusick 		entry = temp_entry->next;
108345748Smckusick 
108445748Smckusick 	while ((entry != &map->header) && (entry->start < end)) {
108545748Smckusick 		vm_map_clip_end(map, entry, end);
108645748Smckusick 
108745748Smckusick 		entry->inheritance = new_inheritance;
108845748Smckusick 
108945748Smckusick 		entry = entry->next;
109045748Smckusick 	}
109145748Smckusick 
109245748Smckusick 	vm_map_unlock(map);
109345748Smckusick 	return(KERN_SUCCESS);
109445748Smckusick }
109545748Smckusick 
109645748Smckusick /*
109745748Smckusick  *	vm_map_pageable:
109845748Smckusick  *
109945748Smckusick  *	Sets the pageability of the specified address
110045748Smckusick  *	range in the target map.  Regions specified
110145748Smckusick  *	as not pageable require locked-down physical
110245748Smckusick  *	memory and physical page maps.
110345748Smckusick  *
110445748Smckusick  *	The map must not be locked, but a reference
110545748Smckusick  *	must remain to the map throughout the call.
110645748Smckusick  */
110753357Sbostic int
110845748Smckusick vm_map_pageable(map, start, end, new_pageable)
110945748Smckusick 	register vm_map_t	map;
111045748Smckusick 	register vm_offset_t	start;
111145748Smckusick 	register vm_offset_t	end;
111245748Smckusick 	register boolean_t	new_pageable;
111345748Smckusick {
111445748Smckusick 	register vm_map_entry_t	entry;
111561005Shibler 	vm_map_entry_t		start_entry;
111658596Shibler 	register vm_offset_t	failed;
111758596Shibler 	int			rv;
111845748Smckusick 
111945748Smckusick 	vm_map_lock(map);
112045748Smckusick 
112145748Smckusick 	VM_MAP_RANGE_CHECK(map, start, end);
112245748Smckusick 
112345748Smckusick 	/*
112445748Smckusick 	 *	Only one pageability change may take place at one
112545748Smckusick 	 *	time, since vm_fault assumes it will be called
112645748Smckusick 	 *	only once for each wiring/unwiring.  Therefore, we
112745748Smckusick 	 *	have to make sure we're actually changing the pageability
112845748Smckusick 	 *	for the entire region.  We do so before making any changes.
112945748Smckusick 	 */
113045748Smckusick 
113161005Shibler 	if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) {
113261005Shibler 		vm_map_unlock(map);
113361005Shibler 		return(KERN_INVALID_ADDRESS);
113445748Smckusick 	}
113561005Shibler 	entry = start_entry;
113645748Smckusick 
113745748Smckusick 	/*
113845748Smckusick 	 *	Actions are rather different for wiring and unwiring,
113945748Smckusick 	 *	so we have two separate cases.
114045748Smckusick 	 */
114145748Smckusick 
114245748Smckusick 	if (new_pageable) {
114345748Smckusick 
114461005Shibler 		vm_map_clip_start(map, entry, start);
114561005Shibler 
114645748Smckusick 		/*
114745748Smckusick 		 *	Unwiring.  First ensure that the range to be
114861005Shibler 		 *	unwired is really wired down and that there
114961005Shibler 		 *	are no holes.
115045748Smckusick 		 */
115145748Smckusick 		while ((entry != &map->header) && (entry->start < end)) {
115245748Smckusick 
115361005Shibler 		    if (entry->wired_count == 0 ||
115461005Shibler 			(entry->end < end &&
115561005Shibler 			 (entry->next == &map->header ||
115661005Shibler 			  entry->next->start > entry->end))) {
115745748Smckusick 			vm_map_unlock(map);
115845748Smckusick 			return(KERN_INVALID_ARGUMENT);
115945748Smckusick 		    }
116045748Smckusick 		    entry = entry->next;
116145748Smckusick 		}
116245748Smckusick 
116345748Smckusick 		/*
116445748Smckusick 		 *	Now decrement the wiring count for each region.
116545748Smckusick 		 *	If a region becomes completely unwired,
116645748Smckusick 		 *	unwire its physical pages and mappings.
116745748Smckusick 		 */
116868795Smckusick 		vm_map_set_recursive(&map->lock);
116945748Smckusick 
117061005Shibler 		entry = start_entry;
117145748Smckusick 		while ((entry != &map->header) && (entry->start < end)) {
117245748Smckusick 		    vm_map_clip_end(map, entry, end);
117345748Smckusick 
117445748Smckusick 		    entry->wired_count--;
117545748Smckusick 		    if (entry->wired_count == 0)
117645748Smckusick 			vm_fault_unwire(map, entry->start, entry->end);
117745748Smckusick 
117845748Smckusick 		    entry = entry->next;
117945748Smckusick 		}
118068795Smckusick 		vm_map_clear_recursive(&map->lock);
118145748Smckusick 	}
118245748Smckusick 
118345748Smckusick 	else {
118445748Smckusick 		/*
118545748Smckusick 		 *	Wiring.  We must do this in two passes:
118645748Smckusick 		 *
118761005Shibler 		 *	1.  Holding the write lock, we create any shadow
118861005Shibler 		 *	    or zero-fill objects that need to be created.
118961005Shibler 		 *	    Then we clip each map entry to the region to be
119061005Shibler 		 *	    wired and increment its wiring count.  We
119161005Shibler 		 *	    create objects before clipping the map entries
119261005Shibler 		 *	    to avoid object proliferation.
119345748Smckusick 		 *
119445748Smckusick 		 *	2.  We downgrade to a read lock, and call
119545748Smckusick 		 *	    vm_fault_wire to fault in the pages for any
119645748Smckusick 		 *	    newly wired area (wired_count is 1).
119745748Smckusick 		 *
119845748Smckusick 		 *	Downgrading to a read lock for vm_fault_wire avoids
119945748Smckusick 		 *	a possible deadlock with another thread that may have
120045748Smckusick 		 *	faulted on one of the pages to be wired (it would mark
120145748Smckusick 		 *	the page busy, blocking us, then in turn block on the
120245748Smckusick 		 *	map lock that we hold).  Because of problems in the
120345748Smckusick 		 *	recursive lock package, we cannot upgrade to a write
120445748Smckusick 		 *	lock in vm_map_lookup.  Thus, any actions that require
120545748Smckusick 		 *	the write lock must be done beforehand.  Because we
120645748Smckusick 		 *	keep the read lock on the map, the copy-on-write status
120745748Smckusick 		 *	of the entries we modify here cannot change.
120845748Smckusick 		 */
120945748Smckusick 
121045748Smckusick 		/*
121145748Smckusick 		 *	Pass 1.
121245748Smckusick 		 */
121345748Smckusick 		while ((entry != &map->header) && (entry->start < end)) {
121461005Shibler 		    if (entry->wired_count == 0) {
121545748Smckusick 
121645748Smckusick 			/*
121745748Smckusick 			 *	Perform actions of vm_map_lookup that need
121845748Smckusick 			 *	the write lock on the map: create a shadow
121945748Smckusick 			 *	object for a copy-on-write region, or an
122045748Smckusick 			 *	object for a zero-fill region.
122145748Smckusick 			 *
122245748Smckusick 			 *	We don't have to do this for entries that
122345748Smckusick 			 *	point to sharing maps, because we won't hold
122445748Smckusick 			 *	the lock on the sharing map.
122545748Smckusick 			 */
122645748Smckusick 			if (!entry->is_a_map) {
122745748Smckusick 			    if (entry->needs_copy &&
122845748Smckusick 				((entry->protection & VM_PROT_WRITE) != 0)) {
122945748Smckusick 
123045748Smckusick 				vm_object_shadow(&entry->object.vm_object,
123145748Smckusick 						&entry->offset,
123245748Smckusick 						(vm_size_t)(entry->end
123345748Smckusick 							- entry->start));
123445748Smckusick 				entry->needs_copy = FALSE;
123545748Smckusick 			    }
123648383Skarels 			    else if (entry->object.vm_object == NULL) {
123745748Smckusick 				entry->object.vm_object =
123845748Smckusick 				    vm_object_allocate((vm_size_t)(entry->end
123945748Smckusick 				    			- entry->start));
124045748Smckusick 				entry->offset = (vm_offset_t)0;
124145748Smckusick 			    }
124245748Smckusick 			}
124345748Smckusick 		    }
124461005Shibler 		    vm_map_clip_start(map, entry, start);
124561005Shibler 		    vm_map_clip_end(map, entry, end);
124661005Shibler 		    entry->wired_count++;
124745748Smckusick 
124861005Shibler 		    /*
124961005Shibler 		     * Check for holes
125061005Shibler 		     */
125161005Shibler 		    if (entry->end < end &&
125261005Shibler 			(entry->next == &map->header ||
125361005Shibler 			 entry->next->start > entry->end)) {
125461005Shibler 			/*
125561005Shibler 			 *	Found one.  Object creation actions
125661005Shibler 			 *	do not need to be undone, but the
125761005Shibler 			 *	wired counts need to be restored.
125861005Shibler 			 */
125961005Shibler 			while (entry != &map->header && entry->end > start) {
126061005Shibler 			    entry->wired_count--;
126161005Shibler 			    entry = entry->prev;
126261005Shibler 			}
126361005Shibler 			vm_map_unlock(map);
126461005Shibler 			return(KERN_INVALID_ARGUMENT);
126561005Shibler 		    }
126645748Smckusick 		    entry = entry->next;
126745748Smckusick 		}
126845748Smckusick 
126945748Smckusick 		/*
127045748Smckusick 		 *	Pass 2.
127145748Smckusick 		 */
127245748Smckusick 
127345748Smckusick 		/*
127445748Smckusick 		 * HACK HACK HACK HACK
127545748Smckusick 		 *
127645748Smckusick 		 * If we are wiring in the kernel map or a submap of it,
127745748Smckusick 		 * unlock the map to avoid deadlocks.  We trust that the
127845748Smckusick 		 * kernel threads are well-behaved, and therefore will
127945748Smckusick 		 * not do anything destructive to this region of the map
128045748Smckusick 		 * while we have it unlocked.  We cannot trust user threads
128145748Smckusick 		 * to do the same.
128245748Smckusick 		 *
128345748Smckusick 		 * HACK HACK HACK HACK
128445748Smckusick 		 */
128545748Smckusick 		if (vm_map_pmap(map) == kernel_pmap) {
128645748Smckusick 		    vm_map_unlock(map);		/* trust me ... */
128745748Smckusick 		}
128845748Smckusick 		else {
128968795Smckusick 		    vm_map_set_recursive(&map->lock);
1290*68938Smckusick 		    lockmgr(&map->lock, LK_DOWNGRADE, (void *)0, LOCKPID);
129145748Smckusick 		}
129245748Smckusick 
129358596Shibler 		rv = 0;
129461005Shibler 		entry = start_entry;
129545748Smckusick 		while (entry != &map->header && entry->start < end) {
129658596Shibler 		    /*
129758596Shibler 		     * If vm_fault_wire fails for any page we need to
129858596Shibler 		     * undo what has been done.  We decrement the wiring
129958596Shibler 		     * count for those pages which have not yet been
130058596Shibler 		     * wired (now) and unwire those that have (later).
130158596Shibler 		     *
130258596Shibler 		     * XXX this violates the locking protocol on the map,
130358596Shibler 		     * needs to be fixed.
130458596Shibler 		     */
130558596Shibler 		    if (rv)
130658596Shibler 			entry->wired_count--;
130758596Shibler 		    else if (entry->wired_count == 1) {
130858596Shibler 			rv = vm_fault_wire(map, entry->start, entry->end);
130958596Shibler 			if (rv) {
131058596Shibler 			    failed = entry->start;
131158596Shibler 			    entry->wired_count--;
131258596Shibler 			}
131345748Smckusick 		    }
131445748Smckusick 		    entry = entry->next;
131545748Smckusick 		}
131645748Smckusick 
131745748Smckusick 		if (vm_map_pmap(map) == kernel_pmap) {
131845748Smckusick 		    vm_map_lock(map);
131945748Smckusick 		}
132045748Smckusick 		else {
132168795Smckusick 		    vm_map_clear_recursive(&map->lock);
132245748Smckusick 		}
132358596Shibler 		if (rv) {
132458596Shibler 		    vm_map_unlock(map);
132558596Shibler 		    (void) vm_map_pageable(map, start, failed, TRUE);
132658596Shibler 		    return(rv);
132758596Shibler 		}
132845748Smckusick 	}
132945748Smckusick 
133045748Smckusick 	vm_map_unlock(map);
133145748Smckusick 
133245748Smckusick 	return(KERN_SUCCESS);
133345748Smckusick }
133445748Smckusick 
133545748Smckusick /*
133665686Shibler  * vm_map_clean
133765686Shibler  *
133865686Shibler  * Push any dirty cached pages in the address range to their pager.
133965686Shibler  * If syncio is TRUE, dirty pages are written synchronously.
134065686Shibler  * If invalidate is TRUE, any cached pages are freed as well.
134165686Shibler  *
134265686Shibler  * Returns an error if any part of the specified range is not mapped.
134365686Shibler  */
134465686Shibler int
134565686Shibler vm_map_clean(map, start, end, syncio, invalidate)
134665686Shibler 	vm_map_t	map;
134765686Shibler 	vm_offset_t	start;
134865686Shibler 	vm_offset_t	end;
134965686Shibler 	boolean_t	syncio;
135065686Shibler 	boolean_t	invalidate;
135165686Shibler {
135265686Shibler 	register vm_map_entry_t current;
135365686Shibler 	vm_map_entry_t entry;
135465686Shibler 	vm_size_t size;
135565686Shibler 	vm_object_t object;
135665686Shibler 	vm_offset_t offset;
135765686Shibler 
135865686Shibler 	vm_map_lock_read(map);
135965686Shibler 	VM_MAP_RANGE_CHECK(map, start, end);
136065686Shibler 	if (!vm_map_lookup_entry(map, start, &entry)) {
136165686Shibler 		vm_map_unlock_read(map);
136265686Shibler 		return(KERN_INVALID_ADDRESS);
136365686Shibler 	}
136465686Shibler 
136565686Shibler 	/*
136665686Shibler 	 * Make a first pass to check for holes.
136765686Shibler 	 */
136865686Shibler 	for (current = entry; current->start < end; current = current->next) {
136965686Shibler 		if (current->is_sub_map) {
137065686Shibler 			vm_map_unlock_read(map);
137165686Shibler 			return(KERN_INVALID_ARGUMENT);
137265686Shibler 		}
137365686Shibler 		if (end > current->end &&
137465686Shibler 		    (current->next == &map->header ||
137565686Shibler 		     current->end != current->next->start)) {
137665686Shibler 			vm_map_unlock_read(map);
137765686Shibler 			return(KERN_INVALID_ADDRESS);
137865686Shibler 		}
137965686Shibler 	}
138065686Shibler 
138165686Shibler 	/*
138265686Shibler 	 * Make a second pass, cleaning/uncaching pages from the indicated
138365686Shibler 	 * objects as we go.
138465686Shibler 	 */
138565686Shibler 	for (current = entry; current->start < end; current = current->next) {
138665686Shibler 		offset = current->offset + (start - current->start);
138765686Shibler 		size = (end <= current->end ? end : current->end) - start;
138865686Shibler 		if (current->is_a_map) {
138965686Shibler 			register vm_map_t smap;
139065686Shibler 			vm_map_entry_t tentry;
139165686Shibler 			vm_size_t tsize;
139265686Shibler 
139365686Shibler 			smap = current->object.share_map;
139465686Shibler 			vm_map_lock_read(smap);
139565686Shibler 			(void) vm_map_lookup_entry(smap, offset, &tentry);
139665686Shibler 			tsize = tentry->end - offset;
139765686Shibler 			if (tsize < size)
139865686Shibler 				size = tsize;
139965686Shibler 			object = tentry->object.vm_object;
140065686Shibler 			offset = tentry->offset + (offset - tentry->start);
140165686Shibler 			vm_object_lock(object);
140265686Shibler 			vm_map_unlock_read(smap);
140365686Shibler 		} else {
140465686Shibler 			object = current->object.vm_object;
140565686Shibler 			vm_object_lock(object);
140665686Shibler 		}
140765686Shibler 		/*
140865686Shibler 		 * Flush pages if writing is allowed.
140965686Shibler 		 * XXX should we continue on an error?
141065686Shibler 		 */
141165686Shibler 		if ((current->protection & VM_PROT_WRITE) &&
141265686Shibler 		    !vm_object_page_clean(object, offset, offset+size,
141365686Shibler 					  syncio, FALSE)) {
141465686Shibler 			vm_object_unlock(object);
141565686Shibler 			vm_map_unlock_read(map);
141665686Shibler 			return(KERN_FAILURE);
141765686Shibler 		}
141865686Shibler 		if (invalidate)
141965686Shibler 			vm_object_page_remove(object, offset, offset+size);
142065686Shibler 		vm_object_unlock(object);
142165686Shibler 		start += size;
142265686Shibler 	}
142365686Shibler 
142465686Shibler 	vm_map_unlock_read(map);
142565686Shibler 	return(KERN_SUCCESS);
142665686Shibler }
142765686Shibler 
142865686Shibler /*
142945748Smckusick  *	vm_map_entry_unwire:	[ internal use only ]
143045748Smckusick  *
143145748Smckusick  *	Make the region specified by this entry pageable.
143245748Smckusick  *
143345748Smckusick  *	The map in question should be locked.
143445748Smckusick  *	[This is the reason for this routine's existence.]
143545748Smckusick  */
143668162Scgd void
143768162Scgd vm_map_entry_unwire(map, entry)
143845748Smckusick 	vm_map_t		map;
143945748Smckusick 	register vm_map_entry_t	entry;
144045748Smckusick {
144145748Smckusick 	vm_fault_unwire(map, entry->start, entry->end);
144245748Smckusick 	entry->wired_count = 0;
144345748Smckusick }
144445748Smckusick 
144545748Smckusick /*
144645748Smckusick  *	vm_map_entry_delete:	[ internal use only ]
144745748Smckusick  *
144845748Smckusick  *	Deallocate the given entry from the target map.
144945748Smckusick  */
145068162Scgd void
145168162Scgd vm_map_entry_delete(map, entry)
145245748Smckusick 	register vm_map_t	map;
145345748Smckusick 	register vm_map_entry_t	entry;
145445748Smckusick {
145545748Smckusick 	if (entry->wired_count != 0)
145645748Smckusick 		vm_map_entry_unwire(map, entry);
145745748Smckusick 
145845748Smckusick 	vm_map_entry_unlink(map, entry);
145945748Smckusick 	map->size -= entry->end - entry->start;
146045748Smckusick 
146145748Smckusick 	if (entry->is_a_map || entry->is_sub_map)
146245748Smckusick 		vm_map_deallocate(entry->object.share_map);
146345748Smckusick 	else
146445748Smckusick 	 	vm_object_deallocate(entry->object.vm_object);
146545748Smckusick 
146645748Smckusick 	vm_map_entry_dispose(map, entry);
146745748Smckusick }
146845748Smckusick 
146945748Smckusick /*
147045748Smckusick  *	vm_map_delete:	[ internal use only ]
147145748Smckusick  *
147245748Smckusick  *	Deallocates the given address range from the target
147345748Smckusick  *	map.
147445748Smckusick  *
147545748Smckusick  *	When called with a sharing map, removes pages from
147645748Smckusick  *	that region from all physical maps.
147745748Smckusick  */
147853357Sbostic int
147945748Smckusick vm_map_delete(map, start, end)
148045748Smckusick 	register vm_map_t	map;
148145748Smckusick 	vm_offset_t		start;
148245748Smckusick 	register vm_offset_t	end;
148345748Smckusick {
148445748Smckusick 	register vm_map_entry_t	entry;
148545748Smckusick 	vm_map_entry_t		first_entry;
148645748Smckusick 
148745748Smckusick 	/*
148845748Smckusick 	 *	Find the start of the region, and clip it
148945748Smckusick 	 */
149045748Smckusick 
149145748Smckusick 	if (!vm_map_lookup_entry(map, start, &first_entry))
149245748Smckusick 		entry = first_entry->next;
149345748Smckusick 	else {
149445748Smckusick 		entry = first_entry;
149545748Smckusick 		vm_map_clip_start(map, entry, start);
149645748Smckusick 
149745748Smckusick 		/*
149845748Smckusick 		 *	Fix the lookup hint now, rather than each
149945748Smckusick 		 *	time though the loop.
150045748Smckusick 		 */
150145748Smckusick 
150245748Smckusick 		SAVE_HINT(map, entry->prev);
150345748Smckusick 	}
150445748Smckusick 
150545748Smckusick 	/*
150645748Smckusick 	 *	Save the free space hint
150745748Smckusick 	 */
150845748Smckusick 
150945748Smckusick 	if (map->first_free->start >= start)
151045748Smckusick 		map->first_free = entry->prev;
151145748Smckusick 
151245748Smckusick 	/*
151345748Smckusick 	 *	Step through all entries in this region
151445748Smckusick 	 */
151545748Smckusick 
151645748Smckusick 	while ((entry != &map->header) && (entry->start < end)) {
151745748Smckusick 		vm_map_entry_t		next;
151845748Smckusick 		register vm_offset_t	s, e;
151945748Smckusick 		register vm_object_t	object;
152045748Smckusick 
152145748Smckusick 		vm_map_clip_end(map, entry, end);
152245748Smckusick 
152345748Smckusick 		next = entry->next;
152445748Smckusick 		s = entry->start;
152545748Smckusick 		e = entry->end;
152645748Smckusick 
152745748Smckusick 		/*
152845748Smckusick 		 *	Unwire before removing addresses from the pmap;
152945748Smckusick 		 *	otherwise, unwiring will put the entries back in
153045748Smckusick 		 *	the pmap.
153145748Smckusick 		 */
153245748Smckusick 
153345748Smckusick 		object = entry->object.vm_object;
153445748Smckusick 		if (entry->wired_count != 0)
153545748Smckusick 			vm_map_entry_unwire(map, entry);
153645748Smckusick 
153745748Smckusick 		/*
153845748Smckusick 		 *	If this is a sharing map, we must remove
153945748Smckusick 		 *	*all* references to this data, since we can't
154045748Smckusick 		 *	find all of the physical maps which are sharing
154145748Smckusick 		 *	it.
154245748Smckusick 		 */
154345748Smckusick 
154445748Smckusick 		if (object == kernel_object || object == kmem_object)
154545748Smckusick 			vm_object_page_remove(object, entry->offset,
154645748Smckusick 					entry->offset + (e - s));
154745748Smckusick 		else if (!map->is_main_map)
154845748Smckusick 			vm_object_pmap_remove(object,
154945748Smckusick 					 entry->offset,
155045748Smckusick 					 entry->offset + (e - s));
155145748Smckusick 		else
155245748Smckusick 			pmap_remove(map->pmap, s, e);
155345748Smckusick 
155445748Smckusick 		/*
155545748Smckusick 		 *	Delete the entry (which may delete the object)
155645748Smckusick 		 *	only after removing all pmap entries pointing
155745748Smckusick 		 *	to its pages.  (Otherwise, its page frames may
155845748Smckusick 		 *	be reallocated, and any modify bits will be
155945748Smckusick 		 *	set in the wrong object!)
156045748Smckusick 		 */
156145748Smckusick 
156245748Smckusick 		vm_map_entry_delete(map, entry);
156345748Smckusick 		entry = next;
156445748Smckusick 	}
156545748Smckusick 	return(KERN_SUCCESS);
156645748Smckusick }
156745748Smckusick 
156845748Smckusick /*
156945748Smckusick  *	vm_map_remove:
157045748Smckusick  *
157145748Smckusick  *	Remove the given address range from the target map.
157245748Smckusick  *	This is the exported form of vm_map_delete.
157345748Smckusick  */
157453357Sbostic int
157545748Smckusick vm_map_remove(map, start, end)
157645748Smckusick 	register vm_map_t	map;
157745748Smckusick 	register vm_offset_t	start;
157845748Smckusick 	register vm_offset_t	end;
157945748Smckusick {
158045748Smckusick 	register int		result;
158145748Smckusick 
158245748Smckusick 	vm_map_lock(map);
158345748Smckusick 	VM_MAP_RANGE_CHECK(map, start, end);
158445748Smckusick 	result = vm_map_delete(map, start, end);
158545748Smckusick 	vm_map_unlock(map);
158645748Smckusick 
158745748Smckusick 	return(result);
158845748Smckusick }
158945748Smckusick 
159045748Smckusick /*
159145748Smckusick  *	vm_map_check_protection:
159245748Smckusick  *
159345748Smckusick  *	Assert that the target map allows the specified
159445748Smckusick  *	privilege on the entire address region given.
159545748Smckusick  *	The entire region must be allocated.
159645748Smckusick  */
159768162Scgd boolean_t
159868162Scgd vm_map_check_protection(map, start, end, protection)
159945748Smckusick 	register vm_map_t	map;
160045748Smckusick 	register vm_offset_t	start;
160145748Smckusick 	register vm_offset_t	end;
160245748Smckusick 	register vm_prot_t	protection;
160345748Smckusick {
160445748Smckusick 	register vm_map_entry_t	entry;
160545748Smckusick 	vm_map_entry_t		tmp_entry;
160645748Smckusick 
160745748Smckusick 	if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
160845748Smckusick 		return(FALSE);
160945748Smckusick 	}
161045748Smckusick 
161145748Smckusick 	entry = tmp_entry;
161245748Smckusick 
161345748Smckusick 	while (start < end) {
161445748Smckusick 		if (entry == &map->header) {
161545748Smckusick 			return(FALSE);
161645748Smckusick 		}
161745748Smckusick 
161845748Smckusick 		/*
161945748Smckusick 		 *	No holes allowed!
162045748Smckusick 		 */
162145748Smckusick 
162245748Smckusick 		if (start < entry->start) {
162345748Smckusick 			return(FALSE);
162445748Smckusick 		}
162545748Smckusick 
162645748Smckusick 		/*
162745748Smckusick 		 * Check protection associated with entry.
162845748Smckusick 		 */
162945748Smckusick 
163045748Smckusick 		if ((entry->protection & protection) != protection) {
163145748Smckusick 			return(FALSE);
163245748Smckusick 		}
163345748Smckusick 
163445748Smckusick 		/* go to next entry */
163545748Smckusick 
163645748Smckusick 		start = entry->end;
163745748Smckusick 		entry = entry->next;
163845748Smckusick 	}
163945748Smckusick 	return(TRUE);
164045748Smckusick }
164145748Smckusick 
164245748Smckusick /*
164345748Smckusick  *	vm_map_copy_entry:
164445748Smckusick  *
164545748Smckusick  *	Copies the contents of the source entry to the destination
164645748Smckusick  *	entry.  The entries *must* be aligned properly.
164745748Smckusick  */
164868162Scgd void
164968162Scgd vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
165045748Smckusick 	vm_map_t		src_map, dst_map;
165145748Smckusick 	register vm_map_entry_t	src_entry, dst_entry;
165245748Smckusick {
165345748Smckusick 	vm_object_t	temp_object;
165445748Smckusick 
165545748Smckusick 	if (src_entry->is_sub_map || dst_entry->is_sub_map)
165645748Smckusick 		return;
165745748Smckusick 
165848383Skarels 	if (dst_entry->object.vm_object != NULL &&
165950919Smckusick 	    (dst_entry->object.vm_object->flags & OBJ_INTERNAL) == 0)
166045748Smckusick 		printf("vm_map_copy_entry: copying over permanent data!\n");
166145748Smckusick 
166245748Smckusick 	/*
166345748Smckusick 	 *	If our destination map was wired down,
166445748Smckusick 	 *	unwire it now.
166545748Smckusick 	 */
166645748Smckusick 
166745748Smckusick 	if (dst_entry->wired_count != 0)
166845748Smckusick 		vm_map_entry_unwire(dst_map, dst_entry);
166945748Smckusick 
167045748Smckusick 	/*
167145748Smckusick 	 *	If we're dealing with a sharing map, we
167245748Smckusick 	 *	must remove the destination pages from
167345748Smckusick 	 *	all maps (since we cannot know which maps
167445748Smckusick 	 *	this sharing map belongs in).
167545748Smckusick 	 */
167645748Smckusick 
167745748Smckusick 	if (dst_map->is_main_map)
167845748Smckusick 		pmap_remove(dst_map->pmap, dst_entry->start, dst_entry->end);
167945748Smckusick 	else
168045748Smckusick 		vm_object_pmap_remove(dst_entry->object.vm_object,
168145748Smckusick 			dst_entry->offset,
168245748Smckusick 			dst_entry->offset +
168345748Smckusick 				(dst_entry->end - dst_entry->start));
168445748Smckusick 
168545748Smckusick 	if (src_entry->wired_count == 0) {
168645748Smckusick 
168745748Smckusick 		boolean_t	src_needs_copy;
168845748Smckusick 
168945748Smckusick 		/*
169045748Smckusick 		 *	If the source entry is marked needs_copy,
169145748Smckusick 		 *	it is already write-protected.
169245748Smckusick 		 */
169345748Smckusick 		if (!src_entry->needs_copy) {
169445748Smckusick 
169545748Smckusick 			boolean_t	su;
169645748Smckusick 
169745748Smckusick 			/*
169845748Smckusick 			 *	If the source entry has only one mapping,
169945748Smckusick 			 *	we can just protect the virtual address
170045748Smckusick 			 *	range.
170145748Smckusick 			 */
170245748Smckusick 			if (!(su = src_map->is_main_map)) {
170345748Smckusick 				simple_lock(&src_map->ref_lock);
170445748Smckusick 				su = (src_map->ref_count == 1);
170545748Smckusick 				simple_unlock(&src_map->ref_lock);
170645748Smckusick 			}
170745748Smckusick 
170845748Smckusick 			if (su) {
170945748Smckusick 				pmap_protect(src_map->pmap,
171045748Smckusick 					src_entry->start,
171145748Smckusick 					src_entry->end,
171245748Smckusick 					src_entry->protection & ~VM_PROT_WRITE);
171345748Smckusick 			}
171445748Smckusick 			else {
171545748Smckusick 				vm_object_pmap_copy(src_entry->object.vm_object,
171645748Smckusick 					src_entry->offset,
171745748Smckusick 					src_entry->offset + (src_entry->end
171845748Smckusick 							    -src_entry->start));
171945748Smckusick 			}
172045748Smckusick 		}
172145748Smckusick 
172245748Smckusick 		/*
172345748Smckusick 		 *	Make a copy of the object.
172445748Smckusick 		 */
172545748Smckusick 		temp_object = dst_entry->object.vm_object;
172645748Smckusick 		vm_object_copy(src_entry->object.vm_object,
172745748Smckusick 				src_entry->offset,
172845748Smckusick 				(vm_size_t)(src_entry->end -
172945748Smckusick 					    src_entry->start),
173045748Smckusick 				&dst_entry->object.vm_object,
173145748Smckusick 				&dst_entry->offset,
173245748Smckusick 				&src_needs_copy);
173345748Smckusick 		/*
173445748Smckusick 		 *	If we didn't get a copy-object now, mark the
173545748Smckusick 		 *	source map entry so that a shadow will be created
173645748Smckusick 		 *	to hold its changed pages.
173745748Smckusick 		 */
173845748Smckusick 		if (src_needs_copy)
173945748Smckusick 			src_entry->needs_copy = TRUE;
174045748Smckusick 
174145748Smckusick 		/*
174245748Smckusick 		 *	The destination always needs to have a shadow
174345748Smckusick 		 *	created.
174445748Smckusick 		 */
174545748Smckusick 		dst_entry->needs_copy = TRUE;
174645748Smckusick 
174745748Smckusick 		/*
174845748Smckusick 		 *	Mark the entries copy-on-write, so that write-enabling
174945748Smckusick 		 *	the entry won't make copy-on-write pages writable.
175045748Smckusick 		 */
175145748Smckusick 		src_entry->copy_on_write = TRUE;
175245748Smckusick 		dst_entry->copy_on_write = TRUE;
175345748Smckusick 		/*
175445748Smckusick 		 *	Get rid of the old object.
175545748Smckusick 		 */
175645748Smckusick 		vm_object_deallocate(temp_object);
175745748Smckusick 
175845748Smckusick 		pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
175945748Smckusick 			dst_entry->end - dst_entry->start, src_entry->start);
176045748Smckusick 	}
176145748Smckusick 	else {
176245748Smckusick 		/*
176345748Smckusick 		 *	Of course, wired down pages can't be set copy-on-write.
176445748Smckusick 		 *	Cause wired pages to be copied into the new
176545748Smckusick 		 *	map by simulating faults (the new pages are
176645748Smckusick 		 *	pageable)
176745748Smckusick 		 */
176845748Smckusick 		vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
176945748Smckusick 	}
177045748Smckusick }
177145748Smckusick 
177245748Smckusick /*
177345748Smckusick  *	vm_map_copy:
177445748Smckusick  *
177545748Smckusick  *	Perform a virtual memory copy from the source
177645748Smckusick  *	address map/range to the destination map/range.
177745748Smckusick  *
177845748Smckusick  *	If src_destroy or dst_alloc is requested,
177945748Smckusick  *	the source and destination regions should be
178045748Smckusick  *	disjoint, not only in the top-level map, but
178145748Smckusick  *	in the sharing maps as well.  [The best way
178245748Smckusick  *	to guarantee this is to use a new intermediate
178345748Smckusick  *	map to make copies.  This also reduces map
178445748Smckusick  *	fragmentation.]
178545748Smckusick  */
178653357Sbostic int
178745748Smckusick vm_map_copy(dst_map, src_map,
178845748Smckusick 			  dst_addr, len, src_addr,
178945748Smckusick 			  dst_alloc, src_destroy)
179045748Smckusick 	vm_map_t	dst_map;
179145748Smckusick 	vm_map_t	src_map;
179245748Smckusick 	vm_offset_t	dst_addr;
179345748Smckusick 	vm_size_t	len;
179445748Smckusick 	vm_offset_t	src_addr;
179545748Smckusick 	boolean_t	dst_alloc;
179645748Smckusick 	boolean_t	src_destroy;
179745748Smckusick {
179845748Smckusick 	register
179945748Smckusick 	vm_map_entry_t	src_entry;
180045748Smckusick 	register
180145748Smckusick 	vm_map_entry_t	dst_entry;
180245748Smckusick 	vm_map_entry_t	tmp_entry;
180345748Smckusick 	vm_offset_t	src_start;
180445748Smckusick 	vm_offset_t	src_end;
180545748Smckusick 	vm_offset_t	dst_start;
180645748Smckusick 	vm_offset_t	dst_end;
180745748Smckusick 	vm_offset_t	src_clip;
180845748Smckusick 	vm_offset_t	dst_clip;
180945748Smckusick 	int		result;
181045748Smckusick 	boolean_t	old_src_destroy;
181145748Smckusick 
181245748Smckusick 	/*
181345748Smckusick 	 *	XXX While we figure out why src_destroy screws up,
181445748Smckusick 	 *	we'll do it by explicitly vm_map_delete'ing at the end.
181545748Smckusick 	 */
181645748Smckusick 
181745748Smckusick 	old_src_destroy = src_destroy;
181845748Smckusick 	src_destroy = FALSE;
181945748Smckusick 
182045748Smckusick 	/*
182145748Smckusick 	 *	Compute start and end of region in both maps
182245748Smckusick 	 */
182345748Smckusick 
182445748Smckusick 	src_start = src_addr;
182545748Smckusick 	src_end = src_start + len;
182645748Smckusick 	dst_start = dst_addr;
182745748Smckusick 	dst_end = dst_start + len;
182845748Smckusick 
182945748Smckusick 	/*
183045748Smckusick 	 *	Check that the region can exist in both source
183145748Smckusick 	 *	and destination.
183245748Smckusick 	 */
183345748Smckusick 
183445748Smckusick 	if ((dst_end < dst_start) || (src_end < src_start))
183545748Smckusick 		return(KERN_NO_SPACE);
183645748Smckusick 
183745748Smckusick 	/*
183845748Smckusick 	 *	Lock the maps in question -- we avoid deadlock
183945748Smckusick 	 *	by ordering lock acquisition by map value
184045748Smckusick 	 */
184145748Smckusick 
184245748Smckusick 	if (src_map == dst_map) {
184345748Smckusick 		vm_map_lock(src_map);
184445748Smckusick 	}
184568162Scgd 	else if ((long) src_map < (long) dst_map) {
184645748Smckusick 	 	vm_map_lock(src_map);
184745748Smckusick 		vm_map_lock(dst_map);
184845748Smckusick 	} else {
184945748Smckusick 		vm_map_lock(dst_map);
185045748Smckusick 	 	vm_map_lock(src_map);
185145748Smckusick 	}
185245748Smckusick 
185345748Smckusick 	result = KERN_SUCCESS;
185445748Smckusick 
185545748Smckusick 	/*
185645748Smckusick 	 *	Check protections... source must be completely readable and
185745748Smckusick 	 *	destination must be completely writable.  [Note that if we're
185845748Smckusick 	 *	allocating the destination region, we don't have to worry
185945748Smckusick 	 *	about protection, but instead about whether the region
186045748Smckusick 	 *	exists.]
186145748Smckusick 	 */
186245748Smckusick 
186345748Smckusick 	if (src_map->is_main_map && dst_map->is_main_map) {
186445748Smckusick 		if (!vm_map_check_protection(src_map, src_start, src_end,
186545748Smckusick 					VM_PROT_READ)) {
186645748Smckusick 			result = KERN_PROTECTION_FAILURE;
186745748Smckusick 			goto Return;
186845748Smckusick 		}
186945748Smckusick 
187045748Smckusick 		if (dst_alloc) {
187145748Smckusick 			/* XXX Consider making this a vm_map_find instead */
187248383Skarels 			if ((result = vm_map_insert(dst_map, NULL,
187345748Smckusick 					(vm_offset_t) 0, dst_start, dst_end)) != KERN_SUCCESS)
187445748Smckusick 				goto Return;
187545748Smckusick 		}
187645748Smckusick 		else if (!vm_map_check_protection(dst_map, dst_start, dst_end,
187745748Smckusick 					VM_PROT_WRITE)) {
187845748Smckusick 			result = KERN_PROTECTION_FAILURE;
187945748Smckusick 			goto Return;
188045748Smckusick 		}
188145748Smckusick 	}
188245748Smckusick 
188345748Smckusick 	/*
188445748Smckusick 	 *	Find the start entries and clip.
188545748Smckusick 	 *
188645748Smckusick 	 *	Note that checking protection asserts that the
188745748Smckusick 	 *	lookup cannot fail.
188845748Smckusick 	 *
188945748Smckusick 	 *	Also note that we wait to do the second lookup
189045748Smckusick 	 *	until we have done the first clip, as the clip
189145748Smckusick 	 *	may affect which entry we get!
189245748Smckusick 	 */
189345748Smckusick 
189445748Smckusick 	(void) vm_map_lookup_entry(src_map, src_addr, &tmp_entry);
189545748Smckusick 	src_entry = tmp_entry;
189645748Smckusick 	vm_map_clip_start(src_map, src_entry, src_start);
189745748Smckusick 
189845748Smckusick 	(void) vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry);
189945748Smckusick 	dst_entry = tmp_entry;
190045748Smckusick 	vm_map_clip_start(dst_map, dst_entry, dst_start);
190145748Smckusick 
190245748Smckusick 	/*
190345748Smckusick 	 *	If both source and destination entries are the same,
190445748Smckusick 	 *	retry the first lookup, as it may have changed.
190545748Smckusick 	 */
190645748Smckusick 
190745748Smckusick 	if (src_entry == dst_entry) {
190845748Smckusick 		(void) vm_map_lookup_entry(src_map, src_addr, &tmp_entry);
190945748Smckusick 		src_entry = tmp_entry;
191045748Smckusick 	}
191145748Smckusick 
191245748Smckusick 	/*
191345748Smckusick 	 *	If source and destination entries are still the same,
191445748Smckusick 	 *	a null copy is being performed.
191545748Smckusick 	 */
191645748Smckusick 
191745748Smckusick 	if (src_entry == dst_entry)
191845748Smckusick 		goto Return;
191945748Smckusick 
192045748Smckusick 	/*
192145748Smckusick 	 *	Go through entries until we get to the end of the
192245748Smckusick 	 *	region.
192345748Smckusick 	 */
192445748Smckusick 
192545748Smckusick 	while (src_start < src_end) {
192645748Smckusick 		/*
192745748Smckusick 		 *	Clip the entries to the endpoint of the entire region.
192845748Smckusick 		 */
192945748Smckusick 
193045748Smckusick 		vm_map_clip_end(src_map, src_entry, src_end);
193145748Smckusick 		vm_map_clip_end(dst_map, dst_entry, dst_end);
193245748Smckusick 
193345748Smckusick 		/*
193445748Smckusick 		 *	Clip each entry to the endpoint of the other entry.
193545748Smckusick 		 */
193645748Smckusick 
193745748Smckusick 		src_clip = src_entry->start + (dst_entry->end - dst_entry->start);
193845748Smckusick 		vm_map_clip_end(src_map, src_entry, src_clip);
193945748Smckusick 
194045748Smckusick 		dst_clip = dst_entry->start + (src_entry->end - src_entry->start);
194145748Smckusick 		vm_map_clip_end(dst_map, dst_entry, dst_clip);
194245748Smckusick 
194345748Smckusick 		/*
194445748Smckusick 		 *	Both entries now match in size and relative endpoints.
194545748Smckusick 		 *
194645748Smckusick 		 *	If both entries refer to a VM object, we can
194745748Smckusick 		 *	deal with them now.
194845748Smckusick 		 */
194945748Smckusick 
195045748Smckusick 		if (!src_entry->is_a_map && !dst_entry->is_a_map) {
195145748Smckusick 			vm_map_copy_entry(src_map, dst_map, src_entry,
195245748Smckusick 						dst_entry);
195345748Smckusick 		}
195445748Smckusick 		else {
195545748Smckusick 			register vm_map_t	new_dst_map;
195645748Smckusick 			vm_offset_t		new_dst_start;
195745748Smckusick 			vm_size_t		new_size;
195845748Smckusick 			vm_map_t		new_src_map;
195945748Smckusick 			vm_offset_t		new_src_start;
196045748Smckusick 
196145748Smckusick 			/*
196245748Smckusick 			 *	We have to follow at least one sharing map.
196345748Smckusick 			 */
196445748Smckusick 
196545748Smckusick 			new_size = (dst_entry->end - dst_entry->start);
196645748Smckusick 
196745748Smckusick 			if (src_entry->is_a_map) {
196845748Smckusick 				new_src_map = src_entry->object.share_map;
196945748Smckusick 				new_src_start = src_entry->offset;
197045748Smckusick 			}
197145748Smckusick 			else {
197245748Smckusick 			 	new_src_map = src_map;
197345748Smckusick 				new_src_start = src_entry->start;
197468795Smckusick 				vm_map_set_recursive(&src_map->lock);
197545748Smckusick 			}
197645748Smckusick 
197745748Smckusick 			if (dst_entry->is_a_map) {
197845748Smckusick 			    	vm_offset_t	new_dst_end;
197945748Smckusick 
198045748Smckusick 				new_dst_map = dst_entry->object.share_map;
198145748Smckusick 				new_dst_start = dst_entry->offset;
198245748Smckusick 
198345748Smckusick 				/*
198445748Smckusick 				 *	Since the destination sharing entries
198545748Smckusick 				 *	will be merely deallocated, we can
198645748Smckusick 				 *	do that now, and replace the region
198745748Smckusick 				 *	with a null object.  [This prevents
198845748Smckusick 				 *	splitting the source map to match
198945748Smckusick 				 *	the form of the destination map.]
199045748Smckusick 				 *	Note that we can only do so if the
199145748Smckusick 				 *	source and destination do not overlap.
199245748Smckusick 				 */
199345748Smckusick 
199445748Smckusick 				new_dst_end = new_dst_start + new_size;
199545748Smckusick 
199645748Smckusick 				if (new_dst_map != new_src_map) {
199745748Smckusick 					vm_map_lock(new_dst_map);
199845748Smckusick 					(void) vm_map_delete(new_dst_map,
199945748Smckusick 							new_dst_start,
200045748Smckusick 							new_dst_end);
200145748Smckusick 					(void) vm_map_insert(new_dst_map,
200248383Skarels 							NULL,
200345748Smckusick 							(vm_offset_t) 0,
200445748Smckusick 							new_dst_start,
200545748Smckusick 							new_dst_end);
200645748Smckusick 					vm_map_unlock(new_dst_map);
200745748Smckusick 				}
200845748Smckusick 			}
200945748Smckusick 			else {
201045748Smckusick 			 	new_dst_map = dst_map;
201145748Smckusick 				new_dst_start = dst_entry->start;
201268795Smckusick 				vm_map_set_recursive(&dst_map->lock);
201345748Smckusick 			}
201445748Smckusick 
201545748Smckusick 			/*
201645748Smckusick 			 *	Recursively copy the sharing map.
201745748Smckusick 			 */
201845748Smckusick 
201945748Smckusick 			(void) vm_map_copy(new_dst_map, new_src_map,
202045748Smckusick 				new_dst_start, new_size, new_src_start,
202145748Smckusick 				FALSE, FALSE);
202245748Smckusick 
202345748Smckusick 			if (dst_map == new_dst_map)
202468795Smckusick 				vm_map_clear_recursive(&dst_map->lock);
202545748Smckusick 			if (src_map == new_src_map)
202668795Smckusick 				vm_map_clear_recursive(&src_map->lock);
202745748Smckusick 		}
202845748Smckusick 
202945748Smckusick 		/*
203045748Smckusick 		 *	Update variables for next pass through the loop.
203145748Smckusick 		 */
203245748Smckusick 
203345748Smckusick 		src_start = src_entry->end;
203445748Smckusick 		src_entry = src_entry->next;
203545748Smckusick 		dst_start = dst_entry->end;
203645748Smckusick 		dst_entry = dst_entry->next;
203745748Smckusick 
203845748Smckusick 		/*
203945748Smckusick 		 *	If the source is to be destroyed, here is the
204045748Smckusick 		 *	place to do it.
204145748Smckusick 		 */
204245748Smckusick 
204345748Smckusick 		if (src_destroy && src_map->is_main_map &&
204445748Smckusick 						dst_map->is_main_map)
204545748Smckusick 			vm_map_entry_delete(src_map, src_entry->prev);
204645748Smckusick 	}
204745748Smckusick 
204845748Smckusick 	/*
204945748Smckusick 	 *	Update the physical maps as appropriate
205045748Smckusick 	 */
205145748Smckusick 
205245748Smckusick 	if (src_map->is_main_map && dst_map->is_main_map) {
205345748Smckusick 		if (src_destroy)
205445748Smckusick 			pmap_remove(src_map->pmap, src_addr, src_addr + len);
205545748Smckusick 	}
205645748Smckusick 
205745748Smckusick 	/*
205845748Smckusick 	 *	Unlock the maps
205945748Smckusick 	 */
206045748Smckusick 
206145748Smckusick 	Return: ;
206245748Smckusick 
206345748Smckusick 	if (old_src_destroy)
206445748Smckusick 		vm_map_delete(src_map, src_addr, src_addr + len);
206545748Smckusick 
206645748Smckusick 	vm_map_unlock(src_map);
206745748Smckusick 	if (src_map != dst_map)
206845748Smckusick 		vm_map_unlock(dst_map);
206945748Smckusick 
207045748Smckusick 	return(result);
207145748Smckusick }
207245748Smckusick 
207345748Smckusick /*
207448383Skarels  * vmspace_fork:
207548383Skarels  * Create a new process vmspace structure and vm_map
207648383Skarels  * based on those of an existing process.  The new map
207748383Skarels  * is based on the old map, according to the inheritance
207848383Skarels  * values on the regions in that map.
207945748Smckusick  *
208048383Skarels  * The source map must not be locked.
208145748Smckusick  */
208248383Skarels struct vmspace *
208348383Skarels vmspace_fork(vm1)
208448383Skarels 	register struct vmspace *vm1;
208545748Smckusick {
208648383Skarels 	register struct vmspace *vm2;
208748383Skarels 	vm_map_t	old_map = &vm1->vm_map;
208845748Smckusick 	vm_map_t	new_map;
208945748Smckusick 	vm_map_entry_t	old_entry;
209045748Smckusick 	vm_map_entry_t	new_entry;
209145748Smckusick 	pmap_t		new_pmap;
209245748Smckusick 
209345748Smckusick 	vm_map_lock(old_map);
209445748Smckusick 
209548383Skarels 	vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset,
209648383Skarels 	    old_map->entries_pageable);
209748383Skarels 	bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
209848383Skarels 	    (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
209948383Skarels 	new_pmap = &vm2->vm_pmap;		/* XXX */
210048383Skarels 	new_map = &vm2->vm_map;			/* XXX */
210145748Smckusick 
210245748Smckusick 	old_entry = old_map->header.next;
210345748Smckusick 
210445748Smckusick 	while (old_entry != &old_map->header) {
210545748Smckusick 		if (old_entry->is_sub_map)
210645748Smckusick 			panic("vm_map_fork: encountered a submap");
210745748Smckusick 
210845748Smckusick 		switch (old_entry->inheritance) {
210945748Smckusick 		case VM_INHERIT_NONE:
211045748Smckusick 			break;
211145748Smckusick 
211245748Smckusick 		case VM_INHERIT_SHARE:
211345748Smckusick 			/*
211445748Smckusick 			 *	If we don't already have a sharing map:
211545748Smckusick 			 */
211645748Smckusick 
211745748Smckusick 			if (!old_entry->is_a_map) {
211845748Smckusick 			 	vm_map_t	new_share_map;
211945748Smckusick 				vm_map_entry_t	new_share_entry;
212045748Smckusick 
212145748Smckusick 				/*
212245748Smckusick 				 *	Create a new sharing map
212345748Smckusick 				 */
212445748Smckusick 
212548383Skarels 				new_share_map = vm_map_create(NULL,
212645748Smckusick 							old_entry->start,
212745748Smckusick 							old_entry->end,
212845748Smckusick 							TRUE);
212945748Smckusick 				new_share_map->is_main_map = FALSE;
213045748Smckusick 
213145748Smckusick 				/*
213245748Smckusick 				 *	Create the only sharing entry from the
213345748Smckusick 				 *	old task map entry.
213445748Smckusick 				 */
213545748Smckusick 
213645748Smckusick 				new_share_entry =
213745748Smckusick 					vm_map_entry_create(new_share_map);
213845748Smckusick 				*new_share_entry = *old_entry;
213961005Shibler 				new_share_entry->wired_count = 0;
214045748Smckusick 
214145748Smckusick 				/*
214245748Smckusick 				 *	Insert the entry into the new sharing
214345748Smckusick 				 *	map
214445748Smckusick 				 */
214545748Smckusick 
214645748Smckusick 				vm_map_entry_link(new_share_map,
214745748Smckusick 						new_share_map->header.prev,
214845748Smckusick 						new_share_entry);
214945748Smckusick 
215045748Smckusick 				/*
215145748Smckusick 				 *	Fix up the task map entry to refer
215245748Smckusick 				 *	to the sharing map now.
215345748Smckusick 				 */
215445748Smckusick 
215545748Smckusick 				old_entry->is_a_map = TRUE;
215645748Smckusick 				old_entry->object.share_map = new_share_map;
215745748Smckusick 				old_entry->offset = old_entry->start;
215845748Smckusick 			}
215945748Smckusick 
216045748Smckusick 			/*
216145748Smckusick 			 *	Clone the entry, referencing the sharing map.
216245748Smckusick 			 */
216345748Smckusick 
216445748Smckusick 			new_entry = vm_map_entry_create(new_map);
216545748Smckusick 			*new_entry = *old_entry;
216661005Shibler 			new_entry->wired_count = 0;
216745748Smckusick 			vm_map_reference(new_entry->object.share_map);
216845748Smckusick 
216945748Smckusick 			/*
217045748Smckusick 			 *	Insert the entry into the new map -- we
217145748Smckusick 			 *	know we're inserting at the end of the new
217245748Smckusick 			 *	map.
217345748Smckusick 			 */
217445748Smckusick 
217545748Smckusick 			vm_map_entry_link(new_map, new_map->header.prev,
217645748Smckusick 						new_entry);
217745748Smckusick 
217845748Smckusick 			/*
217945748Smckusick 			 *	Update the physical map
218045748Smckusick 			 */
218145748Smckusick 
218245748Smckusick 			pmap_copy(new_map->pmap, old_map->pmap,
218345748Smckusick 				new_entry->start,
218445748Smckusick 				(old_entry->end - old_entry->start),
218545748Smckusick 				old_entry->start);
218645748Smckusick 			break;
218745748Smckusick 
218845748Smckusick 		case VM_INHERIT_COPY:
218945748Smckusick 			/*
219045748Smckusick 			 *	Clone the entry and link into the map.
219145748Smckusick 			 */
219245748Smckusick 
219345748Smckusick 			new_entry = vm_map_entry_create(new_map);
219445748Smckusick 			*new_entry = *old_entry;
219545748Smckusick 			new_entry->wired_count = 0;
219648383Skarels 			new_entry->object.vm_object = NULL;
219745748Smckusick 			new_entry->is_a_map = FALSE;
219845748Smckusick 			vm_map_entry_link(new_map, new_map->header.prev,
219945748Smckusick 							new_entry);
220045748Smckusick 			if (old_entry->is_a_map) {
220145748Smckusick 				int	check;
220245748Smckusick 
220345748Smckusick 				check = vm_map_copy(new_map,
220445748Smckusick 						old_entry->object.share_map,
220545748Smckusick 						new_entry->start,
220645748Smckusick 						(vm_size_t)(new_entry->end -
220745748Smckusick 							new_entry->start),
220845748Smckusick 						old_entry->offset,
220945748Smckusick 						FALSE, FALSE);
221045748Smckusick 				if (check != KERN_SUCCESS)
221145748Smckusick 					printf("vm_map_fork: copy in share_map region failed\n");
221245748Smckusick 			}
221345748Smckusick 			else {
221445748Smckusick 				vm_map_copy_entry(old_map, new_map, old_entry,
221545748Smckusick 						new_entry);
221645748Smckusick 			}
221745748Smckusick 			break;
221845748Smckusick 		}
221945748Smckusick 		old_entry = old_entry->next;
222045748Smckusick 	}
222145748Smckusick 
222245748Smckusick 	new_map->size = old_map->size;
222345748Smckusick 	vm_map_unlock(old_map);
222445748Smckusick 
222548383Skarels 	return(vm2);
222645748Smckusick }
222745748Smckusick 
222845748Smckusick /*
222945748Smckusick  *	vm_map_lookup:
223045748Smckusick  *
223145748Smckusick  *	Finds the VM object, offset, and
223245748Smckusick  *	protection for a given virtual address in the
223345748Smckusick  *	specified map, assuming a page fault of the
223445748Smckusick  *	type specified.
223545748Smckusick  *
223645748Smckusick  *	Leaves the map in question locked for read; return
223745748Smckusick  *	values are guaranteed until a vm_map_lookup_done
223845748Smckusick  *	call is performed.  Note that the map argument
223945748Smckusick  *	is in/out; the returned map must be used in
224045748Smckusick  *	the call to vm_map_lookup_done.
224145748Smckusick  *
224245748Smckusick  *	A handle (out_entry) is returned for use in
224345748Smckusick  *	vm_map_lookup_done, to make that fast.
224445748Smckusick  *
224545748Smckusick  *	If a lookup is requested with "write protection"
224645748Smckusick  *	specified, the map may be changed to perform virtual
224745748Smckusick  *	copying operations, although the data referenced will
224845748Smckusick  *	remain the same.
224945748Smckusick  */
225053357Sbostic int
225145748Smckusick vm_map_lookup(var_map, vaddr, fault_type, out_entry,
225245748Smckusick 				object, offset, out_prot, wired, single_use)
225345748Smckusick 	vm_map_t		*var_map;	/* IN/OUT */
225445748Smckusick 	register vm_offset_t	vaddr;
225545748Smckusick 	register vm_prot_t	fault_type;
225645748Smckusick 
225745748Smckusick 	vm_map_entry_t		*out_entry;	/* OUT */
225845748Smckusick 	vm_object_t		*object;	/* OUT */
225945748Smckusick 	vm_offset_t		*offset;	/* OUT */
226045748Smckusick 	vm_prot_t		*out_prot;	/* OUT */
226145748Smckusick 	boolean_t		*wired;		/* OUT */
226245748Smckusick 	boolean_t		*single_use;	/* OUT */
226345748Smckusick {
226445748Smckusick 	vm_map_t			share_map;
226545748Smckusick 	vm_offset_t			share_offset;
226645748Smckusick 	register vm_map_entry_t		entry;
226745748Smckusick 	register vm_map_t		map = *var_map;
226845748Smckusick 	register vm_prot_t		prot;
226945748Smckusick 	register boolean_t		su;
227045748Smckusick 
227145748Smckusick 	RetryLookup: ;
227245748Smckusick 
227345748Smckusick 	/*
227445748Smckusick 	 *	Lookup the faulting address.
227545748Smckusick 	 */
227645748Smckusick 
227745748Smckusick 	vm_map_lock_read(map);
227845748Smckusick 
227945748Smckusick #define	RETURN(why) \
228045748Smckusick 		{ \
228145748Smckusick 		vm_map_unlock_read(map); \
228245748Smckusick 		return(why); \
228345748Smckusick 		}
228445748Smckusick 
228545748Smckusick 	/*
228645748Smckusick 	 *	If the map has an interesting hint, try it before calling
228745748Smckusick 	 *	full blown lookup routine.
228845748Smckusick 	 */
228945748Smckusick 
229045748Smckusick 	simple_lock(&map->hint_lock);
229145748Smckusick 	entry = map->hint;
229245748Smckusick 	simple_unlock(&map->hint_lock);
229345748Smckusick 
229445748Smckusick 	*out_entry = entry;
229545748Smckusick 
229645748Smckusick 	if ((entry == &map->header) ||
229745748Smckusick 	    (vaddr < entry->start) || (vaddr >= entry->end)) {
229845748Smckusick 		vm_map_entry_t	tmp_entry;
229945748Smckusick 
230045748Smckusick 		/*
230145748Smckusick 		 *	Entry was either not a valid hint, or the vaddr
230245748Smckusick 		 *	was not contained in the entry, so do a full lookup.
230345748Smckusick 		 */
230445748Smckusick 		if (!vm_map_lookup_entry(map, vaddr, &tmp_entry))
230545748Smckusick 			RETURN(KERN_INVALID_ADDRESS);
230645748Smckusick 
230745748Smckusick 		entry = tmp_entry;
230845748Smckusick 		*out_entry = entry;
230945748Smckusick 	}
231045748Smckusick 
231145748Smckusick 	/*
231245748Smckusick 	 *	Handle submaps.
231345748Smckusick 	 */
231445748Smckusick 
231545748Smckusick 	if (entry->is_sub_map) {
231645748Smckusick 		vm_map_t	old_map = map;
231745748Smckusick 
231845748Smckusick 		*var_map = map = entry->object.sub_map;
231945748Smckusick 		vm_map_unlock_read(old_map);
232045748Smckusick 		goto RetryLookup;
232145748Smckusick 	}
232245748Smckusick 
232345748Smckusick 	/*
232445748Smckusick 	 *	Check whether this task is allowed to have
232545748Smckusick 	 *	this page.
232645748Smckusick 	 */
232745748Smckusick 
232845748Smckusick 	prot = entry->protection;
232945748Smckusick 	if ((fault_type & (prot)) != fault_type)
233045748Smckusick 		RETURN(KERN_PROTECTION_FAILURE);
233145748Smckusick 
233245748Smckusick 	/*
233345748Smckusick 	 *	If this page is not pageable, we have to get
233445748Smckusick 	 *	it for all possible accesses.
233545748Smckusick 	 */
233645748Smckusick 
233745748Smckusick 	if (*wired = (entry->wired_count != 0))
233845748Smckusick 		prot = fault_type = entry->protection;
233945748Smckusick 
234045748Smckusick 	/*
234145748Smckusick 	 *	If we don't already have a VM object, track
234245748Smckusick 	 *	it down.
234345748Smckusick 	 */
234445748Smckusick 
234545748Smckusick 	if (su = !entry->is_a_map) {
234645748Smckusick 	 	share_map = map;
234745748Smckusick 		share_offset = vaddr;
234845748Smckusick 	}
234945748Smckusick 	else {
235045748Smckusick 		vm_map_entry_t	share_entry;
235145748Smckusick 
235245748Smckusick 		/*
235345748Smckusick 		 *	Compute the sharing map, and offset into it.
235445748Smckusick 		 */
235545748Smckusick 
235645748Smckusick 		share_map = entry->object.share_map;
235745748Smckusick 		share_offset = (vaddr - entry->start) + entry->offset;
235845748Smckusick 
235945748Smckusick 		/*
236045748Smckusick 		 *	Look for the backing store object and offset
236145748Smckusick 		 */
236245748Smckusick 
236345748Smckusick 		vm_map_lock_read(share_map);
236445748Smckusick 
236545748Smckusick 		if (!vm_map_lookup_entry(share_map, share_offset,
236645748Smckusick 					&share_entry)) {
236745748Smckusick 			vm_map_unlock_read(share_map);
236845748Smckusick 			RETURN(KERN_INVALID_ADDRESS);
236945748Smckusick 		}
237045748Smckusick 		entry = share_entry;
237145748Smckusick 	}
237245748Smckusick 
237345748Smckusick 	/*
237445748Smckusick 	 *	If the entry was copy-on-write, we either ...
237545748Smckusick 	 */
237645748Smckusick 
237745748Smckusick 	if (entry->needs_copy) {
237845748Smckusick 	    	/*
237945748Smckusick 		 *	If we want to write the page, we may as well
238045748Smckusick 		 *	handle that now since we've got the sharing
238145748Smckusick 		 *	map locked.
238245748Smckusick 		 *
238345748Smckusick 		 *	If we don't need to write the page, we just
238445748Smckusick 		 *	demote the permissions allowed.
238545748Smckusick 		 */
238645748Smckusick 
238745748Smckusick 		if (fault_type & VM_PROT_WRITE) {
238845748Smckusick 			/*
238945748Smckusick 			 *	Make a new object, and place it in the
239045748Smckusick 			 *	object chain.  Note that no new references
239145748Smckusick 			 *	have appeared -- one just moved from the
239245748Smckusick 			 *	share map to the new object.
239345748Smckusick 			 */
239445748Smckusick 
2395*68938Smckusick 			if (lockmgr(&share_map->lock, LK_EXCLUPGRADE,
2396*68938Smckusick 				    (void *)0, LOCKPID)) {
239745748Smckusick 				if (share_map != map)
239845748Smckusick 					vm_map_unlock_read(map);
239945748Smckusick 				goto RetryLookup;
240045748Smckusick 			}
240145748Smckusick 
240245748Smckusick 			vm_object_shadow(
240345748Smckusick 				&entry->object.vm_object,
240445748Smckusick 				&entry->offset,
240545748Smckusick 				(vm_size_t) (entry->end - entry->start));
240645748Smckusick 
240745748Smckusick 			entry->needs_copy = FALSE;
240845748Smckusick 
2409*68938Smckusick 			lockmgr(&share_map->lock, LK_DOWNGRADE,
2410*68938Smckusick 				(void *)0, LOCKPID);
241145748Smckusick 		}
241245748Smckusick 		else {
241345748Smckusick 			/*
241445748Smckusick 			 *	We're attempting to read a copy-on-write
241545748Smckusick 			 *	page -- don't allow writes.
241645748Smckusick 			 */
241745748Smckusick 
241845748Smckusick 			prot &= (~VM_PROT_WRITE);
241945748Smckusick 		}
242045748Smckusick 	}
242145748Smckusick 
242245748Smckusick 	/*
242345748Smckusick 	 *	Create an object if necessary.
242445748Smckusick 	 */
242548383Skarels 	if (entry->object.vm_object == NULL) {
242645748Smckusick 
2427*68938Smckusick 		if (lockmgr(&share_map->lock, LK_EXCLUPGRADE,
2428*68938Smckusick 				(void *)0, LOCKPID)) {
242945748Smckusick 			if (share_map != map)
243045748Smckusick 				vm_map_unlock_read(map);
243145748Smckusick 			goto RetryLookup;
243245748Smckusick 		}
243345748Smckusick 
243445748Smckusick 		entry->object.vm_object = vm_object_allocate(
243545748Smckusick 					(vm_size_t)(entry->end - entry->start));
243645748Smckusick 		entry->offset = 0;
2437*68938Smckusick 		lockmgr(&share_map->lock, LK_DOWNGRADE, (void *)0, LOCKPID);
243845748Smckusick 	}
243945748Smckusick 
244045748Smckusick 	/*
244145748Smckusick 	 *	Return the object/offset from this entry.  If the entry
244245748Smckusick 	 *	was copy-on-write or empty, it has been fixed up.
244345748Smckusick 	 */
244445748Smckusick 
244545748Smckusick 	*offset = (share_offset - entry->start) + entry->offset;
244645748Smckusick 	*object = entry->object.vm_object;
244745748Smckusick 
244845748Smckusick 	/*
244945748Smckusick 	 *	Return whether this is the only map sharing this data.
245045748Smckusick 	 */
245145748Smckusick 
245245748Smckusick 	if (!su) {
245345748Smckusick 		simple_lock(&share_map->ref_lock);
245445748Smckusick 		su = (share_map->ref_count == 1);
245545748Smckusick 		simple_unlock(&share_map->ref_lock);
245645748Smckusick 	}
245745748Smckusick 
245845748Smckusick 	*out_prot = prot;
245945748Smckusick 	*single_use = su;
246045748Smckusick 
246145748Smckusick 	return(KERN_SUCCESS);
246245748Smckusick 
246345748Smckusick #undef	RETURN
246445748Smckusick }
246545748Smckusick 
246645748Smckusick /*
246745748Smckusick  *	vm_map_lookup_done:
246845748Smckusick  *
246945748Smckusick  *	Releases locks acquired by a vm_map_lookup
247045748Smckusick  *	(according to the handle returned by that lookup).
247145748Smckusick  */
247245748Smckusick 
247368162Scgd void
247468162Scgd vm_map_lookup_done(map, entry)
247545748Smckusick 	register vm_map_t	map;
247645748Smckusick 	vm_map_entry_t		entry;
247745748Smckusick {
247845748Smckusick 	/*
247945748Smckusick 	 *	If this entry references a map, unlock it first.
248045748Smckusick 	 */
248145748Smckusick 
248245748Smckusick 	if (entry->is_a_map)
248345748Smckusick 		vm_map_unlock_read(entry->object.share_map);
248445748Smckusick 
248545748Smckusick 	/*
248645748Smckusick 	 *	Unlock the main-level map
248745748Smckusick 	 */
248845748Smckusick 
248945748Smckusick 	vm_map_unlock_read(map);
249045748Smckusick }
249145748Smckusick 
249245748Smckusick /*
249345748Smckusick  *	Routine:	vm_map_simplify
249445748Smckusick  *	Purpose:
249545748Smckusick  *		Attempt to simplify the map representation in
249645748Smckusick  *		the vicinity of the given starting address.
249745748Smckusick  *	Note:
249845748Smckusick  *		This routine is intended primarily to keep the
249945748Smckusick  *		kernel maps more compact -- they generally don't
250045748Smckusick  *		benefit from the "expand a map entry" technology
250145748Smckusick  *		at allocation time because the adjacent entry
250245748Smckusick  *		is often wired down.
250345748Smckusick  */
250468162Scgd void
250568162Scgd vm_map_simplify(map, start)
250645748Smckusick 	vm_map_t	map;
250745748Smckusick 	vm_offset_t	start;
250845748Smckusick {
250945748Smckusick 	vm_map_entry_t	this_entry;
251045748Smckusick 	vm_map_entry_t	prev_entry;
251145748Smckusick 
251245748Smckusick 	vm_map_lock(map);
251345748Smckusick 	if (
251445748Smckusick 		(vm_map_lookup_entry(map, start, &this_entry)) &&
251545748Smckusick 		((prev_entry = this_entry->prev) != &map->header) &&
251645748Smckusick 
251745748Smckusick 		(prev_entry->end == start) &&
251845748Smckusick 		(map->is_main_map) &&
251945748Smckusick 
252045748Smckusick 		(prev_entry->is_a_map == FALSE) &&
252145748Smckusick 		(prev_entry->is_sub_map == FALSE) &&
252245748Smckusick 
252345748Smckusick 		(this_entry->is_a_map == FALSE) &&
252445748Smckusick 		(this_entry->is_sub_map == FALSE) &&
252545748Smckusick 
252645748Smckusick 		(prev_entry->inheritance == this_entry->inheritance) &&
252745748Smckusick 		(prev_entry->protection == this_entry->protection) &&
252845748Smckusick 		(prev_entry->max_protection == this_entry->max_protection) &&
252945748Smckusick 		(prev_entry->wired_count == this_entry->wired_count) &&
253045748Smckusick 
253145748Smckusick 		(prev_entry->copy_on_write == this_entry->copy_on_write) &&
253245748Smckusick 		(prev_entry->needs_copy == this_entry->needs_copy) &&
253345748Smckusick 
253445748Smckusick 		(prev_entry->object.vm_object == this_entry->object.vm_object) &&
253545748Smckusick 		((prev_entry->offset + (prev_entry->end - prev_entry->start))
253645748Smckusick 		     == this_entry->offset)
253745748Smckusick 	) {
253845748Smckusick 		if (map->first_free == this_entry)
253945748Smckusick 			map->first_free = prev_entry;
254045748Smckusick 
254145748Smckusick 		SAVE_HINT(map, prev_entry);
254245748Smckusick 		vm_map_entry_unlink(map, this_entry);
254345748Smckusick 		prev_entry->end = this_entry->end;
254445748Smckusick 	 	vm_object_deallocate(this_entry->object.vm_object);
254545748Smckusick 		vm_map_entry_dispose(map, this_entry);
254645748Smckusick 	}
254745748Smckusick 	vm_map_unlock(map);
254845748Smckusick }
254945748Smckusick 
255045748Smckusick /*
255145748Smckusick  *	vm_map_print:	[ debug ]
255245748Smckusick  */
255368162Scgd void
255468162Scgd vm_map_print(map, full)
255545748Smckusick 	register vm_map_t	map;
255645748Smckusick 	boolean_t		full;
255745748Smckusick {
255845748Smckusick 	register vm_map_entry_t	entry;
255945748Smckusick 	extern int indent;
256045748Smckusick 
256145748Smckusick 	iprintf("%s map 0x%x: pmap=0x%x,ref=%d,nentries=%d,version=%d\n",
256245748Smckusick 		(map->is_main_map ? "Task" : "Share"),
256345748Smckusick  		(int) map, (int) (map->pmap), map->ref_count, map->nentries,
256445748Smckusick 		map->timestamp);
256545748Smckusick 
256645748Smckusick 	if (!full && indent)
256745748Smckusick 		return;
256845748Smckusick 
256945748Smckusick 	indent += 2;
257045748Smckusick 	for (entry = map->header.next; entry != &map->header;
257145748Smckusick 				entry = entry->next) {
257245748Smckusick 		iprintf("map entry 0x%x: start=0x%x, end=0x%x, ",
257345748Smckusick 			(int) entry, (int) entry->start, (int) entry->end);
257445748Smckusick 		if (map->is_main_map) {
257545748Smckusick 		     	static char *inheritance_name[4] =
257645748Smckusick 				{ "share", "copy", "none", "donate_copy"};
257745748Smckusick 			printf("prot=%x/%x/%s, ",
257845748Smckusick 				entry->protection,
257945748Smckusick 				entry->max_protection,
258045748Smckusick 				inheritance_name[entry->inheritance]);
258145748Smckusick 			if (entry->wired_count != 0)
258245748Smckusick 				printf("wired, ");
258345748Smckusick 		}
258445748Smckusick 
258545748Smckusick 		if (entry->is_a_map || entry->is_sub_map) {
258645748Smckusick 		 	printf("share=0x%x, offset=0x%x\n",
258745748Smckusick 				(int) entry->object.share_map,
258845748Smckusick 				(int) entry->offset);
258945748Smckusick 			if ((entry->prev == &map->header) ||
259045748Smckusick 			    (!entry->prev->is_a_map) ||
259145748Smckusick 			    (entry->prev->object.share_map !=
259245748Smckusick 			     entry->object.share_map)) {
259345748Smckusick 				indent += 2;
259445748Smckusick 				vm_map_print(entry->object.share_map, full);
259545748Smckusick 				indent -= 2;
259645748Smckusick 			}
259745748Smckusick 
259845748Smckusick 		}
259945748Smckusick 		else {
260045748Smckusick 			printf("object=0x%x, offset=0x%x",
260145748Smckusick 				(int) entry->object.vm_object,
260245748Smckusick 				(int) entry->offset);
260345748Smckusick 			if (entry->copy_on_write)
260445748Smckusick 				printf(", copy (%s)",
260545748Smckusick 				       entry->needs_copy ? "needed" : "done");
260645748Smckusick 			printf("\n");
260745748Smckusick 
260845748Smckusick 			if ((entry->prev == &map->header) ||
260945748Smckusick 			    (entry->prev->is_a_map) ||
261045748Smckusick 			    (entry->prev->object.vm_object !=
261145748Smckusick 			     entry->object.vm_object)) {
261245748Smckusick 				indent += 2;
261345748Smckusick 				vm_object_print(entry->object.vm_object, full);
261445748Smckusick 				indent -= 2;
261545748Smckusick 			}
261645748Smckusick 		}
261745748Smckusick 	}
261845748Smckusick 	indent -= 2;
261945748Smckusick }
2620