xref: /csrg-svn/sys/vm/vm_map.c (revision 48383)
145748Smckusick /*
245748Smckusick  * Copyright (c) 1985, Avadis Tevanian, Jr., Michael Wayne Young
345748Smckusick  * Copyright (c) 1987 Carnegie-Mellon University
445748Smckusick  * Copyright (c) 1991 Regents of the University of California.
545748Smckusick  * All rights reserved.
645748Smckusick  *
745748Smckusick  * This code is derived from software contributed to Berkeley by
845748Smckusick  * The Mach Operating System project at Carnegie-Mellon University.
945748Smckusick  *
1045748Smckusick  * The CMU software License Agreement specifies the terms and conditions
1145748Smckusick  * for use and redistribution.
1245748Smckusick  *
13*48383Skarels  *	@(#)vm_map.c	7.2 (Berkeley) 04/20/91
1445748Smckusick  */
1545748Smckusick 
1645748Smckusick /*
1745748Smckusick  *	Virtual memory mapping module.
1845748Smckusick  */
1945748Smckusick 
20*48383Skarels #include "param.h"
2145748Smckusick #include "malloc.h"
22*48383Skarels #include "vm.h"
23*48383Skarels #include "vm_page.h"
24*48383Skarels #include "vm_object.h"
2545748Smckusick 
2645748Smckusick /*
2745748Smckusick  *	Virtual memory maps provide for the mapping, protection,
2845748Smckusick  *	and sharing of virtual memory objects.  In addition,
2945748Smckusick  *	this module provides for an efficient virtual copy of
3045748Smckusick  *	memory from one map to another.
3145748Smckusick  *
3245748Smckusick  *	Synchronization is required prior to most operations.
3345748Smckusick  *
3445748Smckusick  *	Maps consist of an ordered doubly-linked list of simple
3545748Smckusick  *	entries; a single hint is used to speed up lookups.
3645748Smckusick  *
3745748Smckusick  *	In order to properly represent the sharing of virtual
3845748Smckusick  *	memory regions among maps, the map structure is bi-level.
3945748Smckusick  *	Top-level ("address") maps refer to regions of sharable
4045748Smckusick  *	virtual memory.  These regions are implemented as
4145748Smckusick  *	("sharing") maps, which then refer to the actual virtual
4245748Smckusick  *	memory objects.  When two address maps "share" memory,
4345748Smckusick  *	their top-level maps both have references to the same
4445748Smckusick  *	sharing map.  When memory is virtual-copied from one
4545748Smckusick  *	address map to another, the references in the sharing
4645748Smckusick  *	maps are actually copied -- no copying occurs at the
4745748Smckusick  *	virtual memory object level.
4845748Smckusick  *
4945748Smckusick  *	Since portions of maps are specified by start/end addreses,
5045748Smckusick  *	which may not align with existing map entries, all
5145748Smckusick  *	routines merely "clip" entries to these start/end values.
5245748Smckusick  *	[That is, an entry is split into two, bordering at a
5345748Smckusick  *	start or end value.]  Note that these clippings may not
5445748Smckusick  *	always be necessary (as the two resulting entries are then
5545748Smckusick  *	not changed); however, the clipping is done for convenience.
5645748Smckusick  *	No attempt is currently made to "glue back together" two
5745748Smckusick  *	abutting entries.
5845748Smckusick  *
5945748Smckusick  *	As mentioned above, virtual copy operations are performed
6045748Smckusick  *	by copying VM object references from one sharing map to
6145748Smckusick  *	another, and then marking both regions as copy-on-write.
6245748Smckusick  *	It is important to note that only one writeable reference
6345748Smckusick  *	to a VM object region exists in any map -- this means that
6445748Smckusick  *	shadow object creation can be delayed until a write operation
6545748Smckusick  *	occurs.
6645748Smckusick  */
6745748Smckusick 
6845748Smckusick /*
69*48383Skarels  *	vm_map_startup:
7045748Smckusick  *
7145748Smckusick  *	Initialize the vm_map module.  Must be called before
7245748Smckusick  *	any other vm_map routines.
7345748Smckusick  *
7445748Smckusick  *	Map and entry structures are allocated from the general
7545748Smckusick  *	purpose memory pool with some exceptions:
7645748Smckusick  *
7745748Smckusick  *	- The kernel map and kmem submap are allocated statically.
7845748Smckusick  *	- Kernel map entries are allocated out of a static pool.
7945748Smckusick  *
8045748Smckusick  *	These restrictions are necessary since malloc() uses the
8145748Smckusick  *	maps and requires map entries.
8245748Smckusick  */
8345748Smckusick 
8445748Smckusick vm_offset_t	kentry_data;
8545748Smckusick vm_size_t	kentry_data_size;
8645748Smckusick vm_map_entry_t	kentry_free;
8745748Smckusick vm_map_t	kmap_free;
8845748Smckusick 
89*48383Skarels void vm_map_startup()
9045748Smckusick {
9145748Smckusick 	register int i;
9245748Smckusick 	register vm_map_entry_t mep;
9345748Smckusick 	vm_map_t mp;
9445748Smckusick 
9545748Smckusick 	/*
9645748Smckusick 	 * Static map structures for allocation before initialization of
9745748Smckusick 	 * kernel map or kmem map.  vm_map_create knows how to deal with them.
9845748Smckusick 	 */
9945748Smckusick 	kmap_free = mp = (vm_map_t) kentry_data;
10045748Smckusick 	i = MAX_KMAP;
10145748Smckusick 	while (--i > 0) {
10245748Smckusick 		mp->header.next = (vm_map_entry_t) (mp + 1);
10345748Smckusick 		mp++;
10445748Smckusick 	}
105*48383Skarels 	mp++->header.next = NULL;
10645748Smckusick 
10745748Smckusick 	/*
10845748Smckusick 	 * Form a free list of statically allocated kernel map entries
10945748Smckusick 	 * with the rest.
11045748Smckusick 	 */
11145748Smckusick 	kentry_free = mep = (vm_map_entry_t) mp;
11245748Smckusick 	i = (kentry_data_size - MAX_KMAP * sizeof *mp) / sizeof *mep;
11345748Smckusick 	while (--i > 0) {
11445748Smckusick 		mep->next = mep + 1;
11545748Smckusick 		mep++;
11645748Smckusick 	}
117*48383Skarels 	mep->next = NULL;
11845748Smckusick }
11945748Smckusick 
12045748Smckusick /*
121*48383Skarels  * Allocate a vmspace structure, including a vm_map and pmap,
122*48383Skarels  * and initialize those structures.  The refcnt is set to 1.
123*48383Skarels  * The remaining fields must be initialized by the caller.
124*48383Skarels  */
125*48383Skarels struct vmspace *
126*48383Skarels vmspace_alloc(min, max, pageable)
127*48383Skarels 	vm_offset_t min, max;
128*48383Skarels 	int pageable;
129*48383Skarels {
130*48383Skarels 	register struct vmspace *vm;
131*48383Skarels 
132*48383Skarels 	MALLOC(vm, struct vmspace *, sizeof(struct vmspace), M_VMMAP, M_WAITOK);
133*48383Skarels 	bzero(vm, (caddr_t) &vm->vm_startcopy - (caddr_t) vm);
134*48383Skarels 	vm_map_init(&vm->vm_map, min, max, pageable);
135*48383Skarels 	pmap_pinit(&vm->vm_pmap);
136*48383Skarels 	vm->vm_map.pmap = &vm->vm_pmap;		/* XXX */
137*48383Skarels 	vm->vm_refcnt = 1;
138*48383Skarels 	return (vm);
139*48383Skarels }
140*48383Skarels 
141*48383Skarels void
142*48383Skarels vmspace_free(vm)
143*48383Skarels 	register struct vmspace *vm;
144*48383Skarels {
145*48383Skarels 
146*48383Skarels 	if (--vm->vm_refcnt == 0) {
147*48383Skarels 		/*
148*48383Skarels 		 * Lock the map, to wait out all other references to it.
149*48383Skarels 		 * Delete all of the mappings and pages they hold,
150*48383Skarels 		 * then call the pmap module to reclaim anything left.
151*48383Skarels 		 */
152*48383Skarels 		vm_map_lock(&vm->vm_map);
153*48383Skarels 		(void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
154*48383Skarels 		    vm->vm_map.max_offset);
155*48383Skarels 		pmap_release(&vm->vm_pmap);
156*48383Skarels 		FREE(vm, M_VMMAP);
157*48383Skarels 	}
158*48383Skarels }
159*48383Skarels 
160*48383Skarels /*
16145748Smckusick  *	vm_map_create:
16245748Smckusick  *
16345748Smckusick  *	Creates and returns a new empty VM map with
16445748Smckusick  *	the given physical map structure, and having
16545748Smckusick  *	the given lower and upper address bounds.
16645748Smckusick  */
16745748Smckusick vm_map_t vm_map_create(pmap, min, max, pageable)
16845748Smckusick 	pmap_t		pmap;
16945748Smckusick 	vm_offset_t	min, max;
17045748Smckusick 	boolean_t	pageable;
17145748Smckusick {
17245748Smckusick 	register vm_map_t	result;
17345748Smckusick 	extern vm_map_t		kernel_map, kmem_map;
17445748Smckusick 
175*48383Skarels 	if (kmem_map == NULL) {
17645748Smckusick 		result = kmap_free;
17745748Smckusick 		kmap_free = (vm_map_t) result->header.next;
178*48383Skarels 		if (result == NULL)
179*48383Skarels 			panic("vm_map_create: out of maps");
18045748Smckusick 	} else
18145748Smckusick 		MALLOC(result, vm_map_t, sizeof(struct vm_map),
18245748Smckusick 		       M_VMMAP, M_WAITOK);
18345748Smckusick 
184*48383Skarels 	vm_map_init(result, min, max, pageable);
18545748Smckusick 	result->pmap = pmap;
18645748Smckusick 	return(result);
18745748Smckusick }
18845748Smckusick 
18945748Smckusick /*
190*48383Skarels  * Initialize an existing vm_map structure
191*48383Skarels  * such as that in the vmspace structure.
192*48383Skarels  * The pmap is set elsewhere.
193*48383Skarels  */
194*48383Skarels void
195*48383Skarels vm_map_init(map, min, max, pageable)
196*48383Skarels 	register struct vm_map *map;
197*48383Skarels 	vm_offset_t	min, max;
198*48383Skarels 	boolean_t	pageable;
199*48383Skarels {
200*48383Skarels 	map->header.next = map->header.prev = &map->header;
201*48383Skarels 	map->nentries = 0;
202*48383Skarels 	map->size = 0;
203*48383Skarels 	map->ref_count = 1;
204*48383Skarels 	map->is_main_map = TRUE;
205*48383Skarels 	map->min_offset = min;
206*48383Skarels 	map->max_offset = max;
207*48383Skarels 	map->entries_pageable = pageable;
208*48383Skarels 	map->first_free = &map->header;
209*48383Skarels 	map->hint = &map->header;
210*48383Skarels 	map->timestamp = 0;
211*48383Skarels 	lock_init(&map->lock, TRUE);
212*48383Skarels 	simple_lock_init(&map->ref_lock);
213*48383Skarels 	simple_lock_init(&map->hint_lock);
214*48383Skarels }
215*48383Skarels 
216*48383Skarels /*
21745748Smckusick  *	vm_map_entry_create:	[ internal use only ]
21845748Smckusick  *
21945748Smckusick  *	Allocates a VM map entry for insertion.
22045748Smckusick  *	No entry fields are filled in.  This routine is
22145748Smckusick  */
22245748Smckusick vm_map_entry_t vm_map_entry_create(map)
22345748Smckusick 	vm_map_t	map;
22445748Smckusick {
22545748Smckusick 	vm_map_entry_t	entry;
22645748Smckusick 	extern vm_map_t		kernel_map, kmem_map, mb_map;
22745748Smckusick 
22845748Smckusick 	if (map == kernel_map || map == kmem_map || map == mb_map) {
22945748Smckusick 		if (entry = kentry_free)
23045748Smckusick 			kentry_free = kentry_free->next;
23145748Smckusick 	} else
23245748Smckusick 		MALLOC(entry, vm_map_entry_t, sizeof(struct vm_map_entry),
23345748Smckusick 		       M_VMMAPENT, M_WAITOK);
234*48383Skarels 	if (entry == NULL)
23545748Smckusick 		panic("vm_map_entry_create: out of map entries");
23645748Smckusick 
23745748Smckusick 	return(entry);
23845748Smckusick }
23945748Smckusick 
24045748Smckusick /*
24145748Smckusick  *	vm_map_entry_dispose:	[ internal use only ]
24245748Smckusick  *
24345748Smckusick  *	Inverse of vm_map_entry_create.
24445748Smckusick  */
24545748Smckusick void vm_map_entry_dispose(map, entry)
24645748Smckusick 	vm_map_t	map;
24745748Smckusick 	vm_map_entry_t	entry;
24845748Smckusick {
24945748Smckusick 	extern vm_map_t		kernel_map, kmem_map, mb_map;
25045748Smckusick 
25145748Smckusick 	if (map == kernel_map || map == kmem_map || map == mb_map) {
25245748Smckusick 		entry->next = kentry_free;
25345748Smckusick 		kentry_free = entry;
25445748Smckusick 	} else
25545748Smckusick 		FREE(entry, M_VMMAPENT);
25645748Smckusick }
25745748Smckusick 
25845748Smckusick /*
25945748Smckusick  *	vm_map_entry_{un,}link:
26045748Smckusick  *
26145748Smckusick  *	Insert/remove entries from maps.
26245748Smckusick  */
26345748Smckusick #define	vm_map_entry_link(map, after_where, entry) \
26445748Smckusick 		{ \
26545748Smckusick 		(map)->nentries++; \
26645748Smckusick 		(entry)->prev = (after_where); \
26745748Smckusick 		(entry)->next = (after_where)->next; \
26845748Smckusick 		(entry)->prev->next = (entry); \
26945748Smckusick 		(entry)->next->prev = (entry); \
27045748Smckusick 		}
27145748Smckusick #define	vm_map_entry_unlink(map, entry) \
27245748Smckusick 		{ \
27345748Smckusick 		(map)->nentries--; \
27445748Smckusick 		(entry)->next->prev = (entry)->prev; \
27545748Smckusick 		(entry)->prev->next = (entry)->next; \
27645748Smckusick 		}
27745748Smckusick 
27845748Smckusick /*
27945748Smckusick  *	vm_map_reference:
28045748Smckusick  *
28145748Smckusick  *	Creates another valid reference to the given map.
28245748Smckusick  *
28345748Smckusick  */
28445748Smckusick void vm_map_reference(map)
28545748Smckusick 	register vm_map_t	map;
28645748Smckusick {
287*48383Skarels 	if (map == NULL)
28845748Smckusick 		return;
28945748Smckusick 
29045748Smckusick 	simple_lock(&map->ref_lock);
29145748Smckusick 	map->ref_count++;
29245748Smckusick 	simple_unlock(&map->ref_lock);
29345748Smckusick }
29445748Smckusick 
29545748Smckusick /*
29645748Smckusick  *	vm_map_deallocate:
29745748Smckusick  *
29845748Smckusick  *	Removes a reference from the specified map,
29945748Smckusick  *	destroying it if no references remain.
30045748Smckusick  *	The map should not be locked.
30145748Smckusick  */
30245748Smckusick void vm_map_deallocate(map)
30345748Smckusick 	register vm_map_t	map;
30445748Smckusick {
30545748Smckusick 	register int		c;
30645748Smckusick 
307*48383Skarels 	if (map == NULL)
30845748Smckusick 		return;
30945748Smckusick 
31045748Smckusick 	simple_lock(&map->ref_lock);
31145748Smckusick 	c = --map->ref_count;
31245748Smckusick 	simple_unlock(&map->ref_lock);
31345748Smckusick 
31445748Smckusick 	if (c > 0) {
31545748Smckusick 		return;
31645748Smckusick 	}
31745748Smckusick 
31845748Smckusick 	/*
31945748Smckusick 	 *	Lock the map, to wait out all other references
32045748Smckusick 	 *	to it.
32145748Smckusick 	 */
32245748Smckusick 
32345748Smckusick 	vm_map_lock(map);
32445748Smckusick 
32545748Smckusick 	(void) vm_map_delete(map, map->min_offset, map->max_offset);
32645748Smckusick 
32745748Smckusick 	pmap_destroy(map->pmap);
32845748Smckusick 
32945748Smckusick 	FREE(map, M_VMMAP);
33045748Smckusick }
33145748Smckusick 
33245748Smckusick /*
33345748Smckusick  *	vm_map_insert:	[ internal use only ]
33445748Smckusick  *
33545748Smckusick  *	Inserts the given whole VM object into the target
33645748Smckusick  *	map at the specified address range.  The object's
33745748Smckusick  *	size should match that of the address range.
33845748Smckusick  *
33945748Smckusick  *	Requires that the map be locked, and leaves it so.
34045748Smckusick  */
34145748Smckusick vm_map_insert(map, object, offset, start, end)
34245748Smckusick 	vm_map_t	map;
34345748Smckusick 	vm_object_t	object;
34445748Smckusick 	vm_offset_t	offset;
34545748Smckusick 	vm_offset_t	start;
34645748Smckusick 	vm_offset_t	end;
34745748Smckusick {
34845748Smckusick 	register vm_map_entry_t		new_entry;
34945748Smckusick 	register vm_map_entry_t		prev_entry;
35045748Smckusick 	vm_map_entry_t			temp_entry;
35145748Smckusick 
35245748Smckusick 	/*
35345748Smckusick 	 *	Check that the start and end points are not bogus.
35445748Smckusick 	 */
35545748Smckusick 
35645748Smckusick 	if ((start < map->min_offset) || (end > map->max_offset) ||
35745748Smckusick 			(start >= end))
35845748Smckusick 		return(KERN_INVALID_ADDRESS);
35945748Smckusick 
36045748Smckusick 	/*
36145748Smckusick 	 *	Find the entry prior to the proposed
36245748Smckusick 	 *	starting address; if it's part of an
36345748Smckusick 	 *	existing entry, this range is bogus.
36445748Smckusick 	 */
36545748Smckusick 
36645748Smckusick 	if (vm_map_lookup_entry(map, start, &temp_entry))
36745748Smckusick 		return(KERN_NO_SPACE);
36845748Smckusick 
36945748Smckusick 	prev_entry = temp_entry;
37045748Smckusick 
37145748Smckusick 	/*
37245748Smckusick 	 *	Assert that the next entry doesn't overlap the
37345748Smckusick 	 *	end point.
37445748Smckusick 	 */
37545748Smckusick 
37645748Smckusick 	if ((prev_entry->next != &map->header) &&
37745748Smckusick 			(prev_entry->next->start < end))
37845748Smckusick 		return(KERN_NO_SPACE);
37945748Smckusick 
38045748Smckusick 	/*
38145748Smckusick 	 *	See if we can avoid creating a new entry by
38245748Smckusick 	 *	extending one of our neighbors.
38345748Smckusick 	 */
38445748Smckusick 
385*48383Skarels 	if (object == NULL) {
38645748Smckusick 		if ((prev_entry != &map->header) &&
38745748Smckusick 		    (prev_entry->end == start) &&
38845748Smckusick 		    (map->is_main_map) &&
38945748Smckusick 		    (prev_entry->is_a_map == FALSE) &&
39045748Smckusick 		    (prev_entry->is_sub_map == FALSE) &&
39145748Smckusick 		    (prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
39245748Smckusick 		    (prev_entry->protection == VM_PROT_DEFAULT) &&
39345748Smckusick 		    (prev_entry->max_protection == VM_PROT_DEFAULT) &&
39445748Smckusick 		    (prev_entry->wired_count == 0)) {
39545748Smckusick 
39645748Smckusick 			if (vm_object_coalesce(prev_entry->object.vm_object,
397*48383Skarels 					NULL,
39845748Smckusick 					prev_entry->offset,
39945748Smckusick 					(vm_offset_t) 0,
40045748Smckusick 					(vm_size_t)(prev_entry->end
40145748Smckusick 						     - prev_entry->start),
40245748Smckusick 					(vm_size_t)(end - prev_entry->end))) {
40345748Smckusick 				/*
40445748Smckusick 				 *	Coalesced the two objects - can extend
40545748Smckusick 				 *	the previous map entry to include the
40645748Smckusick 				 *	new range.
40745748Smckusick 				 */
40845748Smckusick 				map->size += (end - prev_entry->end);
40945748Smckusick 				prev_entry->end = end;
41045748Smckusick 				return(KERN_SUCCESS);
41145748Smckusick 			}
41245748Smckusick 		}
41345748Smckusick 	}
41445748Smckusick 
41545748Smckusick 	/*
41645748Smckusick 	 *	Create a new entry
41745748Smckusick 	 */
41845748Smckusick 
41945748Smckusick 	new_entry = vm_map_entry_create(map);
42045748Smckusick 	new_entry->start = start;
42145748Smckusick 	new_entry->end = end;
42245748Smckusick 
42345748Smckusick 	new_entry->is_a_map = FALSE;
42445748Smckusick 	new_entry->is_sub_map = FALSE;
42545748Smckusick 	new_entry->object.vm_object = object;
42645748Smckusick 	new_entry->offset = offset;
42745748Smckusick 
42845748Smckusick 	new_entry->copy_on_write = FALSE;
42945748Smckusick 	new_entry->needs_copy = FALSE;
43045748Smckusick 
43145748Smckusick 	if (map->is_main_map) {
43245748Smckusick 		new_entry->inheritance = VM_INHERIT_DEFAULT;
43345748Smckusick 		new_entry->protection = VM_PROT_DEFAULT;
43445748Smckusick 		new_entry->max_protection = VM_PROT_DEFAULT;
43545748Smckusick 		new_entry->wired_count = 0;
43645748Smckusick 	}
43745748Smckusick 
43845748Smckusick 	/*
43945748Smckusick 	 *	Insert the new entry into the list
44045748Smckusick 	 */
44145748Smckusick 
44245748Smckusick 	vm_map_entry_link(map, prev_entry, new_entry);
44345748Smckusick 	map->size += new_entry->end - new_entry->start;
44445748Smckusick 
44545748Smckusick 	/*
44645748Smckusick 	 *	Update the free space hint
44745748Smckusick 	 */
44845748Smckusick 
44945748Smckusick 	if ((map->first_free == prev_entry) && (prev_entry->end >= new_entry->start))
45045748Smckusick 		map->first_free = new_entry;
45145748Smckusick 
45245748Smckusick 	return(KERN_SUCCESS);
45345748Smckusick }
45445748Smckusick 
45545748Smckusick /*
45645748Smckusick  *	SAVE_HINT:
45745748Smckusick  *
45845748Smckusick  *	Saves the specified entry as the hint for
45945748Smckusick  *	future lookups.  Performs necessary interlocks.
46045748Smckusick  */
46145748Smckusick #define	SAVE_HINT(map,value) \
46245748Smckusick 		simple_lock(&(map)->hint_lock); \
46345748Smckusick 		(map)->hint = (value); \
46445748Smckusick 		simple_unlock(&(map)->hint_lock);
46545748Smckusick 
46645748Smckusick /*
46745748Smckusick  *	vm_map_lookup_entry:	[ internal use only ]
46845748Smckusick  *
46945748Smckusick  *	Finds the map entry containing (or
47045748Smckusick  *	immediately preceding) the specified address
47145748Smckusick  *	in the given map; the entry is returned
47245748Smckusick  *	in the "entry" parameter.  The boolean
47345748Smckusick  *	result indicates whether the address is
47445748Smckusick  *	actually contained in the map.
47545748Smckusick  */
47645748Smckusick boolean_t vm_map_lookup_entry(map, address, entry)
47745748Smckusick 	register vm_map_t	map;
47845748Smckusick 	register vm_offset_t	address;
47945748Smckusick 	vm_map_entry_t		*entry;		/* OUT */
48045748Smckusick {
48145748Smckusick 	register vm_map_entry_t		cur;
48245748Smckusick 	register vm_map_entry_t		last;
48345748Smckusick 
48445748Smckusick 	/*
48545748Smckusick 	 *	Start looking either from the head of the
48645748Smckusick 	 *	list, or from the hint.
48745748Smckusick 	 */
48845748Smckusick 
48945748Smckusick 	simple_lock(&map->hint_lock);
49045748Smckusick 	cur = map->hint;
49145748Smckusick 	simple_unlock(&map->hint_lock);
49245748Smckusick 
49345748Smckusick 	if (cur == &map->header)
49445748Smckusick 		cur = cur->next;
49545748Smckusick 
49645748Smckusick 	if (address >= cur->start) {
49745748Smckusick 	    	/*
49845748Smckusick 		 *	Go from hint to end of list.
49945748Smckusick 		 *
50045748Smckusick 		 *	But first, make a quick check to see if
50145748Smckusick 		 *	we are already looking at the entry we
50245748Smckusick 		 *	want (which is usually the case).
50345748Smckusick 		 *	Note also that we don't need to save the hint
50445748Smckusick 		 *	here... it is the same hint (unless we are
50545748Smckusick 		 *	at the header, in which case the hint didn't
50645748Smckusick 		 *	buy us anything anyway).
50745748Smckusick 		 */
50845748Smckusick 		last = &map->header;
50945748Smckusick 		if ((cur != last) && (cur->end > address)) {
51045748Smckusick 			*entry = cur;
51145748Smckusick 			return(TRUE);
51245748Smckusick 		}
51345748Smckusick 	}
51445748Smckusick 	else {
51545748Smckusick 	    	/*
51645748Smckusick 		 *	Go from start to hint, *inclusively*
51745748Smckusick 		 */
51845748Smckusick 		last = cur->next;
51945748Smckusick 		cur = map->header.next;
52045748Smckusick 	}
52145748Smckusick 
52245748Smckusick 	/*
52345748Smckusick 	 *	Search linearly
52445748Smckusick 	 */
52545748Smckusick 
52645748Smckusick 	while (cur != last) {
52745748Smckusick 		if (cur->end > address) {
52845748Smckusick 			if (address >= cur->start) {
52945748Smckusick 			    	/*
53045748Smckusick 				 *	Save this lookup for future
53145748Smckusick 				 *	hints, and return
53245748Smckusick 				 */
53345748Smckusick 
53445748Smckusick 				*entry = cur;
53545748Smckusick 				SAVE_HINT(map, cur);
53645748Smckusick 				return(TRUE);
53745748Smckusick 			}
53845748Smckusick 			break;
53945748Smckusick 		}
54045748Smckusick 		cur = cur->next;
54145748Smckusick 	}
54245748Smckusick 	*entry = cur->prev;
54345748Smckusick 	SAVE_HINT(map, *entry);
54445748Smckusick 	return(FALSE);
54545748Smckusick }
54645748Smckusick 
54745748Smckusick /*
54845748Smckusick  *	vm_map_find finds an unallocated region in the target address
54945748Smckusick  *	map with the given length.  The search is defined to be
55045748Smckusick  *	first-fit from the specified address; the region found is
55145748Smckusick  *	returned in the same parameter.
55245748Smckusick  *
55345748Smckusick  */
55445748Smckusick vm_map_find(map, object, offset, addr, length, find_space)
55545748Smckusick 	vm_map_t	map;
55645748Smckusick 	vm_object_t	object;
55745748Smckusick 	vm_offset_t	offset;
55845748Smckusick 	vm_offset_t	*addr;		/* IN/OUT */
55945748Smckusick 	vm_size_t	length;
56045748Smckusick 	boolean_t	find_space;
56145748Smckusick {
56245748Smckusick 	register vm_map_entry_t	entry;
56345748Smckusick 	register vm_offset_t	start;
56445748Smckusick 	register vm_offset_t	end;
56545748Smckusick 	int			result;
56645748Smckusick 
56745748Smckusick 	start = *addr;
56845748Smckusick 
56945748Smckusick 	vm_map_lock(map);
57045748Smckusick 
57145748Smckusick 	if (find_space) {
57245748Smckusick 		/*
57345748Smckusick 		 *	Calculate the first possible address.
57445748Smckusick 		 */
57545748Smckusick 
57645748Smckusick 		if (start < map->min_offset)
57745748Smckusick 			start = map->min_offset;
57845748Smckusick 		if (start > map->max_offset) {
57945748Smckusick 			vm_map_unlock(map);
58045748Smckusick 			return (KERN_NO_SPACE);
58145748Smckusick 		}
58245748Smckusick 
58345748Smckusick 		/*
58445748Smckusick 		 *	Look for the first possible address;
58545748Smckusick 		 *	if there's already something at this
58645748Smckusick 		 *	address, we have to start after it.
58745748Smckusick 		 */
58845748Smckusick 
58945748Smckusick 		if (start == map->min_offset) {
59045748Smckusick 			if ((entry = map->first_free) != &map->header)
59145748Smckusick 				start = entry->end;
59245748Smckusick 		} else {
59345748Smckusick 			vm_map_entry_t	tmp_entry;
59445748Smckusick 			if (vm_map_lookup_entry(map, start, &tmp_entry))
59545748Smckusick 				start = tmp_entry->end;
59645748Smckusick 			entry = tmp_entry;
59745748Smckusick 		}
59845748Smckusick 
59945748Smckusick 		/*
60045748Smckusick 		 *	In any case, the "entry" always precedes
60145748Smckusick 		 *	the proposed new region throughout the
60245748Smckusick 		 *	loop:
60345748Smckusick 		 */
60445748Smckusick 
60545748Smckusick 		while (TRUE) {
60645748Smckusick 			register vm_map_entry_t	next;
60745748Smckusick 
60845748Smckusick 		    	/*
60945748Smckusick 			 *	Find the end of the proposed new region.
61045748Smckusick 			 *	Be sure we didn't go beyond the end, or
61145748Smckusick 			 *	wrap around the address.
61245748Smckusick 			 */
61345748Smckusick 
61445748Smckusick 			end = start + length;
61545748Smckusick 
61645748Smckusick 			if ((end > map->max_offset) || (end < start)) {
61745748Smckusick 				vm_map_unlock(map);
61845748Smckusick 				return (KERN_NO_SPACE);
61945748Smckusick 			}
62045748Smckusick 
62145748Smckusick 			/*
62245748Smckusick 			 *	If there are no more entries, we must win.
62345748Smckusick 			 */
62445748Smckusick 
62545748Smckusick 			next = entry->next;
62645748Smckusick 			if (next == &map->header)
62745748Smckusick 				break;
62845748Smckusick 
62945748Smckusick 			/*
63045748Smckusick 			 *	If there is another entry, it must be
63145748Smckusick 			 *	after the end of the potential new region.
63245748Smckusick 			 */
63345748Smckusick 
63445748Smckusick 			if (next->start >= end)
63545748Smckusick 				break;
63645748Smckusick 
63745748Smckusick 			/*
63845748Smckusick 			 *	Didn't fit -- move to the next entry.
63945748Smckusick 			 */
64045748Smckusick 
64145748Smckusick 			entry = next;
64245748Smckusick 			start = entry->end;
64345748Smckusick 		}
64445748Smckusick 		*addr = start;
64545748Smckusick 
64645748Smckusick 		SAVE_HINT(map, entry);
64745748Smckusick 	}
64845748Smckusick 
64945748Smckusick 	result = vm_map_insert(map, object, offset, start, start + length);
65045748Smckusick 
65145748Smckusick 	vm_map_unlock(map);
65245748Smckusick 	return(result);
65345748Smckusick }
65445748Smckusick 
65545748Smckusick /*
65645748Smckusick  *	vm_map_simplify_entry:	[ internal use only ]
65745748Smckusick  *
65845748Smckusick  *	Simplify the given map entry by:
65945748Smckusick  *		removing extra sharing maps
66045748Smckusick  *		[XXX maybe later] merging with a neighbor
66145748Smckusick  */
66245748Smckusick void vm_map_simplify_entry(map, entry)
66345748Smckusick 	vm_map_t	map;
66445748Smckusick 	vm_map_entry_t	entry;
66545748Smckusick {
66645748Smckusick #ifdef	lint
66745748Smckusick 	map++;
66845748Smckusick #endif	lint
66945748Smckusick 
67045748Smckusick 	/*
67145748Smckusick 	 *	If this entry corresponds to a sharing map, then
67245748Smckusick 	 *	see if we can remove the level of indirection.
67345748Smckusick 	 *	If it's not a sharing map, then it points to
67445748Smckusick 	 *	a VM object, so see if we can merge with either
67545748Smckusick 	 *	of our neighbors.
67645748Smckusick 	 */
67745748Smckusick 
67845748Smckusick 	if (entry->is_sub_map)
67945748Smckusick 		return;
68045748Smckusick 	if (entry->is_a_map) {
68145748Smckusick #if	0
68245748Smckusick 		vm_map_t	my_share_map;
68345748Smckusick 		int		count;
68445748Smckusick 
68545748Smckusick 		my_share_map = entry->object.share_map;
68645748Smckusick 		simple_lock(&my_share_map->ref_lock);
68745748Smckusick 		count = my_share_map->ref_count;
68845748Smckusick 		simple_unlock(&my_share_map->ref_lock);
68945748Smckusick 
69045748Smckusick 		if (count == 1) {
69145748Smckusick 			/* Can move the region from
69245748Smckusick 			 * entry->start to entry->end (+ entry->offset)
69345748Smckusick 			 * in my_share_map into place of entry.
69445748Smckusick 			 * Later.
69545748Smckusick 			 */
69645748Smckusick 		}
69745748Smckusick #endif	0
69845748Smckusick 	}
69945748Smckusick 	else {
70045748Smckusick 		/*
70145748Smckusick 		 *	Try to merge with our neighbors.
70245748Smckusick 		 *
70345748Smckusick 		 *	Conditions for merge are:
70445748Smckusick 		 *
70545748Smckusick 		 *	1.  entries are adjacent.
70645748Smckusick 		 *	2.  both entries point to objects
70745748Smckusick 		 *	    with null pagers.
70845748Smckusick 		 *
70945748Smckusick 		 * 	If a merge is possible, we replace the two
71045748Smckusick 		 *	entries with a single entry, then merge
71145748Smckusick 		 *	the two objects into a single object.
71245748Smckusick 		 *
71345748Smckusick 		 *	Now, all that is left to do is write the
71445748Smckusick 		 *	code!
71545748Smckusick 		 */
71645748Smckusick 	}
71745748Smckusick }
71845748Smckusick 
71945748Smckusick /*
72045748Smckusick  *	vm_map_clip_start:	[ internal use only ]
72145748Smckusick  *
72245748Smckusick  *	Asserts that the given entry begins at or after
72345748Smckusick  *	the specified address; if necessary,
72445748Smckusick  *	it splits the entry into two.
72545748Smckusick  */
72645748Smckusick #define vm_map_clip_start(map, entry, startaddr) \
72745748Smckusick { \
72845748Smckusick 	if (startaddr > entry->start) \
72945748Smckusick 		_vm_map_clip_start(map, entry, startaddr); \
73045748Smckusick }
73145748Smckusick 
73245748Smckusick /*
73345748Smckusick  *	This routine is called only when it is known that
73445748Smckusick  *	the entry must be split.
73545748Smckusick  */
73645748Smckusick void _vm_map_clip_start(map, entry, start)
73745748Smckusick 	register vm_map_t	map;
73845748Smckusick 	register vm_map_entry_t	entry;
73945748Smckusick 	register vm_offset_t	start;
74045748Smckusick {
74145748Smckusick 	register vm_map_entry_t	new_entry;
74245748Smckusick 
74345748Smckusick 	/*
74445748Smckusick 	 *	See if we can simplify this entry first
74545748Smckusick 	 */
74645748Smckusick 
74745748Smckusick 	vm_map_simplify_entry(map, entry);
74845748Smckusick 
74945748Smckusick 	/*
75045748Smckusick 	 *	Split off the front portion --
75145748Smckusick 	 *	note that we must insert the new
75245748Smckusick 	 *	entry BEFORE this one, so that
75345748Smckusick 	 *	this entry has the specified starting
75445748Smckusick 	 *	address.
75545748Smckusick 	 */
75645748Smckusick 
75745748Smckusick 	new_entry = vm_map_entry_create(map);
75845748Smckusick 	*new_entry = *entry;
75945748Smckusick 
76045748Smckusick 	new_entry->end = start;
76145748Smckusick 	entry->offset += (start - entry->start);
76245748Smckusick 	entry->start = start;
76345748Smckusick 
76445748Smckusick 	vm_map_entry_link(map, entry->prev, new_entry);
76545748Smckusick 
76645748Smckusick 	if (entry->is_a_map || entry->is_sub_map)
76745748Smckusick 	 	vm_map_reference(new_entry->object.share_map);
76845748Smckusick 	else
76945748Smckusick 		vm_object_reference(new_entry->object.vm_object);
77045748Smckusick }
77145748Smckusick 
77245748Smckusick /*
77345748Smckusick  *	vm_map_clip_end:	[ internal use only ]
77445748Smckusick  *
77545748Smckusick  *	Asserts that the given entry ends at or before
77645748Smckusick  *	the specified address; if necessary,
77745748Smckusick  *	it splits the entry into two.
77845748Smckusick  */
77945748Smckusick 
78045748Smckusick void _vm_map_clip_end();
78145748Smckusick #define vm_map_clip_end(map, entry, endaddr) \
78245748Smckusick { \
78345748Smckusick 	if (endaddr < entry->end) \
78445748Smckusick 		_vm_map_clip_end(map, entry, endaddr); \
78545748Smckusick }
78645748Smckusick 
78745748Smckusick /*
78845748Smckusick  *	This routine is called only when it is known that
78945748Smckusick  *	the entry must be split.
79045748Smckusick  */
79145748Smckusick void _vm_map_clip_end(map, entry, end)
79245748Smckusick 	register vm_map_t	map;
79345748Smckusick 	register vm_map_entry_t	entry;
79445748Smckusick 	register vm_offset_t	end;
79545748Smckusick {
79645748Smckusick 	register vm_map_entry_t	new_entry;
79745748Smckusick 
79845748Smckusick 	/*
79945748Smckusick 	 *	Create a new entry and insert it
80045748Smckusick 	 *	AFTER the specified entry
80145748Smckusick 	 */
80245748Smckusick 
80345748Smckusick 	new_entry = vm_map_entry_create(map);
80445748Smckusick 	*new_entry = *entry;
80545748Smckusick 
80645748Smckusick 	new_entry->start = entry->end = end;
80745748Smckusick 	new_entry->offset += (end - entry->start);
80845748Smckusick 
80945748Smckusick 	vm_map_entry_link(map, entry, new_entry);
81045748Smckusick 
81145748Smckusick 	if (entry->is_a_map || entry->is_sub_map)
81245748Smckusick 	 	vm_map_reference(new_entry->object.share_map);
81345748Smckusick 	else
81445748Smckusick 		vm_object_reference(new_entry->object.vm_object);
81545748Smckusick }
81645748Smckusick 
81745748Smckusick /*
81845748Smckusick  *	VM_MAP_RANGE_CHECK:	[ internal use only ]
81945748Smckusick  *
82045748Smckusick  *	Asserts that the starting and ending region
82145748Smckusick  *	addresses fall within the valid range of the map.
82245748Smckusick  */
82345748Smckusick #define	VM_MAP_RANGE_CHECK(map, start, end)		\
82445748Smckusick 		{					\
82545748Smckusick 		if (start < vm_map_min(map))		\
82645748Smckusick 			start = vm_map_min(map);	\
82745748Smckusick 		if (end > vm_map_max(map))		\
82845748Smckusick 			end = vm_map_max(map);		\
82945748Smckusick 		if (start > end)			\
83045748Smckusick 			start = end;			\
83145748Smckusick 		}
83245748Smckusick 
83345748Smckusick /*
83445748Smckusick  *	vm_map_submap:		[ kernel use only ]
83545748Smckusick  *
83645748Smckusick  *	Mark the given range as handled by a subordinate map.
83745748Smckusick  *
83845748Smckusick  *	This range must have been created with vm_map_find,
83945748Smckusick  *	and no other operations may have been performed on this
84045748Smckusick  *	range prior to calling vm_map_submap.
84145748Smckusick  *
84245748Smckusick  *	Only a limited number of operations can be performed
84345748Smckusick  *	within this rage after calling vm_map_submap:
84445748Smckusick  *		vm_fault
84545748Smckusick  *	[Don't try vm_map_copy!]
84645748Smckusick  *
84745748Smckusick  *	To remove a submapping, one must first remove the
84845748Smckusick  *	range from the superior map, and then destroy the
84945748Smckusick  *	submap (if desired).  [Better yet, don't try it.]
85045748Smckusick  */
85145748Smckusick vm_map_submap(map, start, end, submap)
85245748Smckusick 	register vm_map_t	map;
85345748Smckusick 	register vm_offset_t	start;
85445748Smckusick 	register vm_offset_t	end;
85545748Smckusick 	vm_map_t		submap;
85645748Smckusick {
85745748Smckusick 	vm_map_entry_t		entry;
85845748Smckusick 	register int		result = KERN_INVALID_ARGUMENT;
85945748Smckusick 
86045748Smckusick 	vm_map_lock(map);
86145748Smckusick 
86245748Smckusick 	VM_MAP_RANGE_CHECK(map, start, end);
86345748Smckusick 
86445748Smckusick 	if (vm_map_lookup_entry(map, start, &entry)) {
86545748Smckusick 		vm_map_clip_start(map, entry, start);
86645748Smckusick 	}
86745748Smckusick 	 else
86845748Smckusick 		entry = entry->next;
86945748Smckusick 
87045748Smckusick 	vm_map_clip_end(map, entry, end);
87145748Smckusick 
87245748Smckusick 	if ((entry->start == start) && (entry->end == end) &&
87345748Smckusick 	    (!entry->is_a_map) &&
874*48383Skarels 	    (entry->object.vm_object == NULL) &&
87545748Smckusick 	    (!entry->copy_on_write)) {
87645748Smckusick 		entry->is_a_map = FALSE;
87745748Smckusick 		entry->is_sub_map = TRUE;
87845748Smckusick 		vm_map_reference(entry->object.sub_map = submap);
87945748Smckusick 		result = KERN_SUCCESS;
88045748Smckusick 	}
88145748Smckusick 	vm_map_unlock(map);
88245748Smckusick 
88345748Smckusick 	return(result);
88445748Smckusick }
88545748Smckusick 
88645748Smckusick /*
88745748Smckusick  *	vm_map_protect:
88845748Smckusick  *
88945748Smckusick  *	Sets the protection of the specified address
89045748Smckusick  *	region in the target map.  If "set_max" is
89145748Smckusick  *	specified, the maximum protection is to be set;
89245748Smckusick  *	otherwise, only the current protection is affected.
89345748Smckusick  */
89445748Smckusick vm_map_protect(map, start, end, new_prot, set_max)
89545748Smckusick 	register vm_map_t	map;
89645748Smckusick 	register vm_offset_t	start;
89745748Smckusick 	register vm_offset_t	end;
89845748Smckusick 	register vm_prot_t	new_prot;
89945748Smckusick 	register boolean_t	set_max;
90045748Smckusick {
90145748Smckusick 	register vm_map_entry_t		current;
90245748Smckusick 	vm_map_entry_t			entry;
90345748Smckusick 
90445748Smckusick 	vm_map_lock(map);
90545748Smckusick 
90645748Smckusick 	VM_MAP_RANGE_CHECK(map, start, end);
90745748Smckusick 
90845748Smckusick 	if (vm_map_lookup_entry(map, start, &entry)) {
90945748Smckusick 		vm_map_clip_start(map, entry, start);
91045748Smckusick 	}
91145748Smckusick 	 else
91245748Smckusick 		entry = entry->next;
91345748Smckusick 
91445748Smckusick 	/*
91545748Smckusick 	 *	Make a first pass to check for protection
91645748Smckusick 	 *	violations.
91745748Smckusick 	 */
91845748Smckusick 
91945748Smckusick 	current = entry;
92045748Smckusick 	while ((current != &map->header) && (current->start < end)) {
92145748Smckusick 		if (current->is_sub_map)
92245748Smckusick 			return(KERN_INVALID_ARGUMENT);
92345748Smckusick 		if ((new_prot & current->max_protection) != new_prot) {
92445748Smckusick 			vm_map_unlock(map);
92545748Smckusick 			return(KERN_PROTECTION_FAILURE);
92645748Smckusick 		}
92745748Smckusick 
92845748Smckusick 		current = current->next;
92945748Smckusick 	}
93045748Smckusick 
93145748Smckusick 	/*
93245748Smckusick 	 *	Go back and fix up protections.
93345748Smckusick 	 *	[Note that clipping is not necessary the second time.]
93445748Smckusick 	 */
93545748Smckusick 
93645748Smckusick 	current = entry;
93745748Smckusick 
93845748Smckusick 	while ((current != &map->header) && (current->start < end)) {
93945748Smckusick 		vm_prot_t	old_prot;
94045748Smckusick 
94145748Smckusick 		vm_map_clip_end(map, current, end);
94245748Smckusick 
94345748Smckusick 		old_prot = current->protection;
94445748Smckusick 		if (set_max)
94545748Smckusick 			current->protection =
94645748Smckusick 				(current->max_protection = new_prot) &
94745748Smckusick 					old_prot;
94845748Smckusick 		else
94945748Smckusick 			current->protection = new_prot;
95045748Smckusick 
95145748Smckusick 		/*
95245748Smckusick 		 *	Update physical map if necessary.
95345748Smckusick 		 *	Worry about copy-on-write here -- CHECK THIS XXX
95445748Smckusick 		 */
95545748Smckusick 
95645748Smckusick 		if (current->protection != old_prot) {
95745748Smckusick 
95845748Smckusick #define MASK(entry)	((entry)->copy_on_write ? ~VM_PROT_WRITE : \
95945748Smckusick 							VM_PROT_ALL)
96045748Smckusick #define	max(a,b)	((a) > (b) ? (a) : (b))
96145748Smckusick 
96245748Smckusick 			if (current->is_a_map) {
96345748Smckusick 				vm_map_entry_t	share_entry;
96445748Smckusick 				vm_offset_t	share_end;
96545748Smckusick 
96645748Smckusick 				vm_map_lock(current->object.share_map);
96745748Smckusick 				(void) vm_map_lookup_entry(
96845748Smckusick 						current->object.share_map,
96945748Smckusick 						current->offset,
97045748Smckusick 						&share_entry);
97145748Smckusick 				share_end = current->offset +
97245748Smckusick 					(current->end - current->start);
97345748Smckusick 				while ((share_entry !=
97445748Smckusick 					&current->object.share_map->header) &&
97545748Smckusick 					(share_entry->start < share_end)) {
97645748Smckusick 
97745748Smckusick 					pmap_protect(map->pmap,
97845748Smckusick 						(max(share_entry->start,
97945748Smckusick 							current->offset) -
98045748Smckusick 							current->offset +
98145748Smckusick 							current->start),
98245748Smckusick 						min(share_entry->end,
98345748Smckusick 							share_end) -
98445748Smckusick 						current->offset +
98545748Smckusick 						current->start,
98645748Smckusick 						current->protection &
98745748Smckusick 							MASK(share_entry));
98845748Smckusick 
98945748Smckusick 					share_entry = share_entry->next;
99045748Smckusick 				}
99145748Smckusick 				vm_map_unlock(current->object.share_map);
99245748Smckusick 			}
99345748Smckusick 			else
99445748Smckusick 			 	pmap_protect(map->pmap, current->start,
99545748Smckusick 					current->end,
99645748Smckusick 					current->protection & MASK(entry));
99745748Smckusick #undef	max
99845748Smckusick #undef	MASK
99945748Smckusick 		}
100045748Smckusick 		current = current->next;
100145748Smckusick 	}
100245748Smckusick 
100345748Smckusick 	vm_map_unlock(map);
100445748Smckusick 	return(KERN_SUCCESS);
100545748Smckusick }
100645748Smckusick 
100745748Smckusick /*
100845748Smckusick  *	vm_map_inherit:
100945748Smckusick  *
101045748Smckusick  *	Sets the inheritance of the specified address
101145748Smckusick  *	range in the target map.  Inheritance
101245748Smckusick  *	affects how the map will be shared with
101345748Smckusick  *	child maps at the time of vm_map_fork.
101445748Smckusick  */
101545748Smckusick vm_map_inherit(map, start, end, new_inheritance)
101645748Smckusick 	register vm_map_t	map;
101745748Smckusick 	register vm_offset_t	start;
101845748Smckusick 	register vm_offset_t	end;
101945748Smckusick 	register vm_inherit_t	new_inheritance;
102045748Smckusick {
102145748Smckusick 	register vm_map_entry_t	entry;
102245748Smckusick 	vm_map_entry_t	temp_entry;
102345748Smckusick 
102445748Smckusick 	switch (new_inheritance) {
102545748Smckusick 	case VM_INHERIT_NONE:
102645748Smckusick 	case VM_INHERIT_COPY:
102745748Smckusick 	case VM_INHERIT_SHARE:
102845748Smckusick 		break;
102945748Smckusick 	default:
103045748Smckusick 		return(KERN_INVALID_ARGUMENT);
103145748Smckusick 	}
103245748Smckusick 
103345748Smckusick 	vm_map_lock(map);
103445748Smckusick 
103545748Smckusick 	VM_MAP_RANGE_CHECK(map, start, end);
103645748Smckusick 
103745748Smckusick 	if (vm_map_lookup_entry(map, start, &temp_entry)) {
103845748Smckusick 		entry = temp_entry;
103945748Smckusick 		vm_map_clip_start(map, entry, start);
104045748Smckusick 	}
104145748Smckusick 	else
104245748Smckusick 		entry = temp_entry->next;
104345748Smckusick 
104445748Smckusick 	while ((entry != &map->header) && (entry->start < end)) {
104545748Smckusick 		vm_map_clip_end(map, entry, end);
104645748Smckusick 
104745748Smckusick 		entry->inheritance = new_inheritance;
104845748Smckusick 
104945748Smckusick 		entry = entry->next;
105045748Smckusick 	}
105145748Smckusick 
105245748Smckusick 	vm_map_unlock(map);
105345748Smckusick 	return(KERN_SUCCESS);
105445748Smckusick }
105545748Smckusick 
105645748Smckusick /*
105745748Smckusick  *	vm_map_pageable:
105845748Smckusick  *
105945748Smckusick  *	Sets the pageability of the specified address
106045748Smckusick  *	range in the target map.  Regions specified
106145748Smckusick  *	as not pageable require locked-down physical
106245748Smckusick  *	memory and physical page maps.
106345748Smckusick  *
106445748Smckusick  *	The map must not be locked, but a reference
106545748Smckusick  *	must remain to the map throughout the call.
106645748Smckusick  */
106745748Smckusick vm_map_pageable(map, start, end, new_pageable)
106845748Smckusick 	register vm_map_t	map;
106945748Smckusick 	register vm_offset_t	start;
107045748Smckusick 	register vm_offset_t	end;
107145748Smckusick 	register boolean_t	new_pageable;
107245748Smckusick {
107345748Smckusick 	register vm_map_entry_t	entry;
107445748Smckusick 	vm_map_entry_t		temp_entry;
107545748Smckusick 
107645748Smckusick 	vm_map_lock(map);
107745748Smckusick 
107845748Smckusick 	VM_MAP_RANGE_CHECK(map, start, end);
107945748Smckusick 
108045748Smckusick 	/*
108145748Smckusick 	 *	Only one pageability change may take place at one
108245748Smckusick 	 *	time, since vm_fault assumes it will be called
108345748Smckusick 	 *	only once for each wiring/unwiring.  Therefore, we
108445748Smckusick 	 *	have to make sure we're actually changing the pageability
108545748Smckusick 	 *	for the entire region.  We do so before making any changes.
108645748Smckusick 	 */
108745748Smckusick 
108845748Smckusick 	if (vm_map_lookup_entry(map, start, &temp_entry)) {
108945748Smckusick 		entry = temp_entry;
109045748Smckusick 		vm_map_clip_start(map, entry, start);
109145748Smckusick 	}
109245748Smckusick 	else
109345748Smckusick 		entry = temp_entry->next;
109445748Smckusick 	temp_entry = entry;
109545748Smckusick 
109645748Smckusick 	/*
109745748Smckusick 	 *	Actions are rather different for wiring and unwiring,
109845748Smckusick 	 *	so we have two separate cases.
109945748Smckusick 	 */
110045748Smckusick 
110145748Smckusick 	if (new_pageable) {
110245748Smckusick 
110345748Smckusick 		/*
110445748Smckusick 		 *	Unwiring.  First ensure that the range to be
110545748Smckusick 		 *	unwired is really wired down.
110645748Smckusick 		 */
110745748Smckusick 		while ((entry != &map->header) && (entry->start < end)) {
110845748Smckusick 
110945748Smckusick 		    if (entry->wired_count == 0) {
111045748Smckusick 			vm_map_unlock(map);
111145748Smckusick 			return(KERN_INVALID_ARGUMENT);
111245748Smckusick 		    }
111345748Smckusick 		    entry = entry->next;
111445748Smckusick 		}
111545748Smckusick 
111645748Smckusick 		/*
111745748Smckusick 		 *	Now decrement the wiring count for each region.
111845748Smckusick 		 *	If a region becomes completely unwired,
111945748Smckusick 		 *	unwire its physical pages and mappings.
112045748Smckusick 		 */
112145748Smckusick 		lock_set_recursive(&map->lock);
112245748Smckusick 
112345748Smckusick 		entry = temp_entry;
112445748Smckusick 		while ((entry != &map->header) && (entry->start < end)) {
112545748Smckusick 		    vm_map_clip_end(map, entry, end);
112645748Smckusick 
112745748Smckusick 		    entry->wired_count--;
112845748Smckusick 		    if (entry->wired_count == 0)
112945748Smckusick 			vm_fault_unwire(map, entry->start, entry->end);
113045748Smckusick 
113145748Smckusick 		    entry = entry->next;
113245748Smckusick 		}
113345748Smckusick 		lock_clear_recursive(&map->lock);
113445748Smckusick 	}
113545748Smckusick 
113645748Smckusick 	else {
113745748Smckusick 		/*
113845748Smckusick 		 *	Wiring.  We must do this in two passes:
113945748Smckusick 		 *
114045748Smckusick 		 *	1.  Holding the write lock, we increment the
114145748Smckusick 		 *	    wiring count.  For any area that is not already
114245748Smckusick 		 *	    wired, we create any shadow objects that need
114345748Smckusick 		 *	    to be created.
114445748Smckusick 		 *
114545748Smckusick 		 *	2.  We downgrade to a read lock, and call
114645748Smckusick 		 *	    vm_fault_wire to fault in the pages for any
114745748Smckusick 		 *	    newly wired area (wired_count is 1).
114845748Smckusick 		 *
114945748Smckusick 		 *	Downgrading to a read lock for vm_fault_wire avoids
115045748Smckusick 		 *	a possible deadlock with another thread that may have
115145748Smckusick 		 *	faulted on one of the pages to be wired (it would mark
115245748Smckusick 		 *	the page busy, blocking us, then in turn block on the
115345748Smckusick 		 *	map lock that we hold).  Because of problems in the
115445748Smckusick 		 *	recursive lock package, we cannot upgrade to a write
115545748Smckusick 		 *	lock in vm_map_lookup.  Thus, any actions that require
115645748Smckusick 		 *	the write lock must be done beforehand.  Because we
115745748Smckusick 		 *	keep the read lock on the map, the copy-on-write status
115845748Smckusick 		 *	of the entries we modify here cannot change.
115945748Smckusick 		 */
116045748Smckusick 
116145748Smckusick 		/*
116245748Smckusick 		 *	Pass 1.
116345748Smckusick 		 */
116445748Smckusick 		entry = temp_entry;
116545748Smckusick 		while ((entry != &map->header) && (entry->start < end)) {
116645748Smckusick 		    vm_map_clip_end(map, entry, end);
116745748Smckusick 
116845748Smckusick 		    entry->wired_count++;
116945748Smckusick 		    if (entry->wired_count == 1) {
117045748Smckusick 
117145748Smckusick 			/*
117245748Smckusick 			 *	Perform actions of vm_map_lookup that need
117345748Smckusick 			 *	the write lock on the map: create a shadow
117445748Smckusick 			 *	object for a copy-on-write region, or an
117545748Smckusick 			 *	object for a zero-fill region.
117645748Smckusick 			 *
117745748Smckusick 			 *	We don't have to do this for entries that
117845748Smckusick 			 *	point to sharing maps, because we won't hold
117945748Smckusick 			 *	the lock on the sharing map.
118045748Smckusick 			 */
118145748Smckusick 			if (!entry->is_a_map) {
118245748Smckusick 			    if (entry->needs_copy &&
118345748Smckusick 				((entry->protection & VM_PROT_WRITE) != 0)) {
118445748Smckusick 
118545748Smckusick 				vm_object_shadow(&entry->object.vm_object,
118645748Smckusick 						&entry->offset,
118745748Smckusick 						(vm_size_t)(entry->end
118845748Smckusick 							- entry->start));
118945748Smckusick 				entry->needs_copy = FALSE;
119045748Smckusick 			    }
1191*48383Skarels 			    else if (entry->object.vm_object == NULL) {
119245748Smckusick 				entry->object.vm_object =
119345748Smckusick 				    vm_object_allocate((vm_size_t)(entry->end
119445748Smckusick 				    			- entry->start));
119545748Smckusick 				entry->offset = (vm_offset_t)0;
119645748Smckusick 			    }
119745748Smckusick 			}
119845748Smckusick 		    }
119945748Smckusick 
120045748Smckusick 		    entry = entry->next;
120145748Smckusick 		}
120245748Smckusick 
120345748Smckusick 		/*
120445748Smckusick 		 *	Pass 2.
120545748Smckusick 		 */
120645748Smckusick 
120745748Smckusick 		/*
120845748Smckusick 		 * HACK HACK HACK HACK
120945748Smckusick 		 *
121045748Smckusick 		 * If we are wiring in the kernel map or a submap of it,
121145748Smckusick 		 * unlock the map to avoid deadlocks.  We trust that the
121245748Smckusick 		 * kernel threads are well-behaved, and therefore will
121345748Smckusick 		 * not do anything destructive to this region of the map
121445748Smckusick 		 * while we have it unlocked.  We cannot trust user threads
121545748Smckusick 		 * to do the same.
121645748Smckusick 		 *
121745748Smckusick 		 * HACK HACK HACK HACK
121845748Smckusick 		 */
121945748Smckusick 		if (vm_map_pmap(map) == kernel_pmap) {
122045748Smckusick 		    vm_map_unlock(map);		/* trust me ... */
122145748Smckusick 		}
122245748Smckusick 		else {
122345748Smckusick 		    lock_set_recursive(&map->lock);
122445748Smckusick 		    lock_write_to_read(&map->lock);
122545748Smckusick 		}
122645748Smckusick 
122745748Smckusick 		entry = temp_entry;
122845748Smckusick 		while (entry != &map->header && entry->start < end) {
122945748Smckusick 		    if (entry->wired_count == 1) {
123045748Smckusick 			vm_fault_wire(map, entry->start, entry->end);
123145748Smckusick 		    }
123245748Smckusick 		    entry = entry->next;
123345748Smckusick 		}
123445748Smckusick 
123545748Smckusick 		if (vm_map_pmap(map) == kernel_pmap) {
123645748Smckusick 		    vm_map_lock(map);
123745748Smckusick 		}
123845748Smckusick 		else {
123945748Smckusick 		    lock_clear_recursive(&map->lock);
124045748Smckusick 		}
124145748Smckusick 	}
124245748Smckusick 
124345748Smckusick 	vm_map_unlock(map);
124445748Smckusick 
124545748Smckusick 	return(KERN_SUCCESS);
124645748Smckusick }
124745748Smckusick 
124845748Smckusick /*
124945748Smckusick  *	vm_map_entry_unwire:	[ internal use only ]
125045748Smckusick  *
125145748Smckusick  *	Make the region specified by this entry pageable.
125245748Smckusick  *
125345748Smckusick  *	The map in question should be locked.
125445748Smckusick  *	[This is the reason for this routine's existence.]
125545748Smckusick  */
125645748Smckusick void vm_map_entry_unwire(map, entry)
125745748Smckusick 	vm_map_t		map;
125845748Smckusick 	register vm_map_entry_t	entry;
125945748Smckusick {
126045748Smckusick 	vm_fault_unwire(map, entry->start, entry->end);
126145748Smckusick 	entry->wired_count = 0;
126245748Smckusick }
126345748Smckusick 
126445748Smckusick /*
126545748Smckusick  *	vm_map_entry_delete:	[ internal use only ]
126645748Smckusick  *
126745748Smckusick  *	Deallocate the given entry from the target map.
126845748Smckusick  */
126945748Smckusick void vm_map_entry_delete(map, entry)
127045748Smckusick 	register vm_map_t	map;
127145748Smckusick 	register vm_map_entry_t	entry;
127245748Smckusick {
127345748Smckusick 	if (entry->wired_count != 0)
127445748Smckusick 		vm_map_entry_unwire(map, entry);
127545748Smckusick 
127645748Smckusick 	vm_map_entry_unlink(map, entry);
127745748Smckusick 	map->size -= entry->end - entry->start;
127845748Smckusick 
127945748Smckusick 	if (entry->is_a_map || entry->is_sub_map)
128045748Smckusick 		vm_map_deallocate(entry->object.share_map);
128145748Smckusick 	else
128245748Smckusick 	 	vm_object_deallocate(entry->object.vm_object);
128345748Smckusick 
128445748Smckusick 	vm_map_entry_dispose(map, entry);
128545748Smckusick }
128645748Smckusick 
128745748Smckusick /*
128845748Smckusick  *	vm_map_delete:	[ internal use only ]
128945748Smckusick  *
129045748Smckusick  *	Deallocates the given address range from the target
129145748Smckusick  *	map.
129245748Smckusick  *
129345748Smckusick  *	When called with a sharing map, removes pages from
129445748Smckusick  *	that region from all physical maps.
129545748Smckusick  */
129645748Smckusick vm_map_delete(map, start, end)
129745748Smckusick 	register vm_map_t	map;
129845748Smckusick 	vm_offset_t		start;
129945748Smckusick 	register vm_offset_t	end;
130045748Smckusick {
130145748Smckusick 	register vm_map_entry_t	entry;
130245748Smckusick 	vm_map_entry_t		first_entry;
130345748Smckusick 
130445748Smckusick 	/*
130545748Smckusick 	 *	Find the start of the region, and clip it
130645748Smckusick 	 */
130745748Smckusick 
130845748Smckusick 	if (!vm_map_lookup_entry(map, start, &first_entry))
130945748Smckusick 		entry = first_entry->next;
131045748Smckusick 	else {
131145748Smckusick 		entry = first_entry;
131245748Smckusick 		vm_map_clip_start(map, entry, start);
131345748Smckusick 
131445748Smckusick 		/*
131545748Smckusick 		 *	Fix the lookup hint now, rather than each
131645748Smckusick 		 *	time though the loop.
131745748Smckusick 		 */
131845748Smckusick 
131945748Smckusick 		SAVE_HINT(map, entry->prev);
132045748Smckusick 	}
132145748Smckusick 
132245748Smckusick 	/*
132345748Smckusick 	 *	Save the free space hint
132445748Smckusick 	 */
132545748Smckusick 
132645748Smckusick 	if (map->first_free->start >= start)
132745748Smckusick 		map->first_free = entry->prev;
132845748Smckusick 
132945748Smckusick 	/*
133045748Smckusick 	 *	Step through all entries in this region
133145748Smckusick 	 */
133245748Smckusick 
133345748Smckusick 	while ((entry != &map->header) && (entry->start < end)) {
133445748Smckusick 		vm_map_entry_t		next;
133545748Smckusick 		register vm_offset_t	s, e;
133645748Smckusick 		register vm_object_t	object;
133745748Smckusick 
133845748Smckusick 		vm_map_clip_end(map, entry, end);
133945748Smckusick 
134045748Smckusick 		next = entry->next;
134145748Smckusick 		s = entry->start;
134245748Smckusick 		e = entry->end;
134345748Smckusick 
134445748Smckusick 		/*
134545748Smckusick 		 *	Unwire before removing addresses from the pmap;
134645748Smckusick 		 *	otherwise, unwiring will put the entries back in
134745748Smckusick 		 *	the pmap.
134845748Smckusick 		 */
134945748Smckusick 
135045748Smckusick 		object = entry->object.vm_object;
135145748Smckusick 		if (entry->wired_count != 0)
135245748Smckusick 			vm_map_entry_unwire(map, entry);
135345748Smckusick 
135445748Smckusick 		/*
135545748Smckusick 		 *	If this is a sharing map, we must remove
135645748Smckusick 		 *	*all* references to this data, since we can't
135745748Smckusick 		 *	find all of the physical maps which are sharing
135845748Smckusick 		 *	it.
135945748Smckusick 		 */
136045748Smckusick 
136145748Smckusick 		if (object == kernel_object || object == kmem_object)
136245748Smckusick 			vm_object_page_remove(object, entry->offset,
136345748Smckusick 					entry->offset + (e - s));
136445748Smckusick 		else if (!map->is_main_map)
136545748Smckusick 			vm_object_pmap_remove(object,
136645748Smckusick 					 entry->offset,
136745748Smckusick 					 entry->offset + (e - s));
136845748Smckusick 		else
136945748Smckusick 			pmap_remove(map->pmap, s, e);
137045748Smckusick 
137145748Smckusick 		/*
137245748Smckusick 		 *	Delete the entry (which may delete the object)
137345748Smckusick 		 *	only after removing all pmap entries pointing
137445748Smckusick 		 *	to its pages.  (Otherwise, its page frames may
137545748Smckusick 		 *	be reallocated, and any modify bits will be
137645748Smckusick 		 *	set in the wrong object!)
137745748Smckusick 		 */
137845748Smckusick 
137945748Smckusick 		vm_map_entry_delete(map, entry);
138045748Smckusick 		entry = next;
138145748Smckusick 	}
138245748Smckusick 	return(KERN_SUCCESS);
138345748Smckusick }
138445748Smckusick 
138545748Smckusick /*
138645748Smckusick  *	vm_map_remove:
138745748Smckusick  *
138845748Smckusick  *	Remove the given address range from the target map.
138945748Smckusick  *	This is the exported form of vm_map_delete.
139045748Smckusick  */
139145748Smckusick vm_map_remove(map, start, end)
139245748Smckusick 	register vm_map_t	map;
139345748Smckusick 	register vm_offset_t	start;
139445748Smckusick 	register vm_offset_t	end;
139545748Smckusick {
139645748Smckusick 	register int		result;
139745748Smckusick 
139845748Smckusick 	vm_map_lock(map);
139945748Smckusick 	VM_MAP_RANGE_CHECK(map, start, end);
140045748Smckusick 	result = vm_map_delete(map, start, end);
140145748Smckusick 	vm_map_unlock(map);
140245748Smckusick 
140345748Smckusick 	return(result);
140445748Smckusick }
140545748Smckusick 
140645748Smckusick /*
140745748Smckusick  *	vm_map_check_protection:
140845748Smckusick  *
140945748Smckusick  *	Assert that the target map allows the specified
141045748Smckusick  *	privilege on the entire address region given.
141145748Smckusick  *	The entire region must be allocated.
141245748Smckusick  */
141345748Smckusick boolean_t vm_map_check_protection(map, start, end, protection)
141445748Smckusick 	register vm_map_t	map;
141545748Smckusick 	register vm_offset_t	start;
141645748Smckusick 	register vm_offset_t	end;
141745748Smckusick 	register vm_prot_t	protection;
141845748Smckusick {
141945748Smckusick 	register vm_map_entry_t	entry;
142045748Smckusick 	vm_map_entry_t		tmp_entry;
142145748Smckusick 
142245748Smckusick 	if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
142345748Smckusick 		return(FALSE);
142445748Smckusick 	}
142545748Smckusick 
142645748Smckusick 	entry = tmp_entry;
142745748Smckusick 
142845748Smckusick 	while (start < end) {
142945748Smckusick 		if (entry == &map->header) {
143045748Smckusick 			return(FALSE);
143145748Smckusick 		}
143245748Smckusick 
143345748Smckusick 		/*
143445748Smckusick 		 *	No holes allowed!
143545748Smckusick 		 */
143645748Smckusick 
143745748Smckusick 		if (start < entry->start) {
143845748Smckusick 			return(FALSE);
143945748Smckusick 		}
144045748Smckusick 
144145748Smckusick 		/*
144245748Smckusick 		 * Check protection associated with entry.
144345748Smckusick 		 */
144445748Smckusick 
144545748Smckusick 		if ((entry->protection & protection) != protection) {
144645748Smckusick 			return(FALSE);
144745748Smckusick 		}
144845748Smckusick 
144945748Smckusick 		/* go to next entry */
145045748Smckusick 
145145748Smckusick 		start = entry->end;
145245748Smckusick 		entry = entry->next;
145345748Smckusick 	}
145445748Smckusick 	return(TRUE);
145545748Smckusick }
145645748Smckusick 
145745748Smckusick /*
145845748Smckusick  *	vm_map_copy_entry:
145945748Smckusick  *
146045748Smckusick  *	Copies the contents of the source entry to the destination
146145748Smckusick  *	entry.  The entries *must* be aligned properly.
146245748Smckusick  */
146345748Smckusick void vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
146445748Smckusick 	vm_map_t		src_map, dst_map;
146545748Smckusick 	register vm_map_entry_t	src_entry, dst_entry;
146645748Smckusick {
146745748Smckusick 	vm_object_t	temp_object;
146845748Smckusick 
146945748Smckusick 	if (src_entry->is_sub_map || dst_entry->is_sub_map)
147045748Smckusick 		return;
147145748Smckusick 
1472*48383Skarels 	if (dst_entry->object.vm_object != NULL &&
147345748Smckusick 	    !dst_entry->object.vm_object->internal)
147445748Smckusick 		printf("vm_map_copy_entry: copying over permanent data!\n");
147545748Smckusick 
147645748Smckusick 	/*
147745748Smckusick 	 *	If our destination map was wired down,
147845748Smckusick 	 *	unwire it now.
147945748Smckusick 	 */
148045748Smckusick 
148145748Smckusick 	if (dst_entry->wired_count != 0)
148245748Smckusick 		vm_map_entry_unwire(dst_map, dst_entry);
148345748Smckusick 
148445748Smckusick 	/*
148545748Smckusick 	 *	If we're dealing with a sharing map, we
148645748Smckusick 	 *	must remove the destination pages from
148745748Smckusick 	 *	all maps (since we cannot know which maps
148845748Smckusick 	 *	this sharing map belongs in).
148945748Smckusick 	 */
149045748Smckusick 
149145748Smckusick 	if (dst_map->is_main_map)
149245748Smckusick 		pmap_remove(dst_map->pmap, dst_entry->start, dst_entry->end);
149345748Smckusick 	else
149445748Smckusick 		vm_object_pmap_remove(dst_entry->object.vm_object,
149545748Smckusick 			dst_entry->offset,
149645748Smckusick 			dst_entry->offset +
149745748Smckusick 				(dst_entry->end - dst_entry->start));
149845748Smckusick 
149945748Smckusick 	if (src_entry->wired_count == 0) {
150045748Smckusick 
150145748Smckusick 		boolean_t	src_needs_copy;
150245748Smckusick 
150345748Smckusick 		/*
150445748Smckusick 		 *	If the source entry is marked needs_copy,
150545748Smckusick 		 *	it is already write-protected.
150645748Smckusick 		 */
150745748Smckusick 		if (!src_entry->needs_copy) {
150845748Smckusick 
150945748Smckusick 			boolean_t	su;
151045748Smckusick 
151145748Smckusick 			/*
151245748Smckusick 			 *	If the source entry has only one mapping,
151345748Smckusick 			 *	we can just protect the virtual address
151445748Smckusick 			 *	range.
151545748Smckusick 			 */
151645748Smckusick 			if (!(su = src_map->is_main_map)) {
151745748Smckusick 				simple_lock(&src_map->ref_lock);
151845748Smckusick 				su = (src_map->ref_count == 1);
151945748Smckusick 				simple_unlock(&src_map->ref_lock);
152045748Smckusick 			}
152145748Smckusick 
152245748Smckusick 			if (su) {
152345748Smckusick 				pmap_protect(src_map->pmap,
152445748Smckusick 					src_entry->start,
152545748Smckusick 					src_entry->end,
152645748Smckusick 					src_entry->protection & ~VM_PROT_WRITE);
152745748Smckusick 			}
152845748Smckusick 			else {
152945748Smckusick 				vm_object_pmap_copy(src_entry->object.vm_object,
153045748Smckusick 					src_entry->offset,
153145748Smckusick 					src_entry->offset + (src_entry->end
153245748Smckusick 							    -src_entry->start));
153345748Smckusick 			}
153445748Smckusick 		}
153545748Smckusick 
153645748Smckusick 		/*
153745748Smckusick 		 *	Make a copy of the object.
153845748Smckusick 		 */
153945748Smckusick 		temp_object = dst_entry->object.vm_object;
154045748Smckusick 		vm_object_copy(src_entry->object.vm_object,
154145748Smckusick 				src_entry->offset,
154245748Smckusick 				(vm_size_t)(src_entry->end -
154345748Smckusick 					    src_entry->start),
154445748Smckusick 				&dst_entry->object.vm_object,
154545748Smckusick 				&dst_entry->offset,
154645748Smckusick 				&src_needs_copy);
154745748Smckusick 		/*
154845748Smckusick 		 *	If we didn't get a copy-object now, mark the
154945748Smckusick 		 *	source map entry so that a shadow will be created
155045748Smckusick 		 *	to hold its changed pages.
155145748Smckusick 		 */
155245748Smckusick 		if (src_needs_copy)
155345748Smckusick 			src_entry->needs_copy = TRUE;
155445748Smckusick 
155545748Smckusick 		/*
155645748Smckusick 		 *	The destination always needs to have a shadow
155745748Smckusick 		 *	created.
155845748Smckusick 		 */
155945748Smckusick 		dst_entry->needs_copy = TRUE;
156045748Smckusick 
156145748Smckusick 		/*
156245748Smckusick 		 *	Mark the entries copy-on-write, so that write-enabling
156345748Smckusick 		 *	the entry won't make copy-on-write pages writable.
156445748Smckusick 		 */
156545748Smckusick 		src_entry->copy_on_write = TRUE;
156645748Smckusick 		dst_entry->copy_on_write = TRUE;
156745748Smckusick 		/*
156845748Smckusick 		 *	Get rid of the old object.
156945748Smckusick 		 */
157045748Smckusick 		vm_object_deallocate(temp_object);
157145748Smckusick 
157245748Smckusick 		pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
157345748Smckusick 			dst_entry->end - dst_entry->start, src_entry->start);
157445748Smckusick 	}
157545748Smckusick 	else {
157645748Smckusick 		/*
157745748Smckusick 		 *	Of course, wired down pages can't be set copy-on-write.
157845748Smckusick 		 *	Cause wired pages to be copied into the new
157945748Smckusick 		 *	map by simulating faults (the new pages are
158045748Smckusick 		 *	pageable)
158145748Smckusick 		 */
158245748Smckusick 		vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
158345748Smckusick 	}
158445748Smckusick }
158545748Smckusick 
158645748Smckusick /*
158745748Smckusick  *	vm_map_copy:
158845748Smckusick  *
158945748Smckusick  *	Perform a virtual memory copy from the source
159045748Smckusick  *	address map/range to the destination map/range.
159145748Smckusick  *
159245748Smckusick  *	If src_destroy or dst_alloc is requested,
159345748Smckusick  *	the source and destination regions should be
159445748Smckusick  *	disjoint, not only in the top-level map, but
159545748Smckusick  *	in the sharing maps as well.  [The best way
159645748Smckusick  *	to guarantee this is to use a new intermediate
159745748Smckusick  *	map to make copies.  This also reduces map
159845748Smckusick  *	fragmentation.]
159945748Smckusick  */
160045748Smckusick vm_map_copy(dst_map, src_map,
160145748Smckusick 			  dst_addr, len, src_addr,
160245748Smckusick 			  dst_alloc, src_destroy)
160345748Smckusick 	vm_map_t	dst_map;
160445748Smckusick 	vm_map_t	src_map;
160545748Smckusick 	vm_offset_t	dst_addr;
160645748Smckusick 	vm_size_t	len;
160745748Smckusick 	vm_offset_t	src_addr;
160845748Smckusick 	boolean_t	dst_alloc;
160945748Smckusick 	boolean_t	src_destroy;
161045748Smckusick {
161145748Smckusick 	register
161245748Smckusick 	vm_map_entry_t	src_entry;
161345748Smckusick 	register
161445748Smckusick 	vm_map_entry_t	dst_entry;
161545748Smckusick 	vm_map_entry_t	tmp_entry;
161645748Smckusick 	vm_offset_t	src_start;
161745748Smckusick 	vm_offset_t	src_end;
161845748Smckusick 	vm_offset_t	dst_start;
161945748Smckusick 	vm_offset_t	dst_end;
162045748Smckusick 	vm_offset_t	src_clip;
162145748Smckusick 	vm_offset_t	dst_clip;
162245748Smckusick 	int		result;
162345748Smckusick 	boolean_t	old_src_destroy;
162445748Smckusick 
162545748Smckusick 	/*
162645748Smckusick 	 *	XXX While we figure out why src_destroy screws up,
162745748Smckusick 	 *	we'll do it by explicitly vm_map_delete'ing at the end.
162845748Smckusick 	 */
162945748Smckusick 
163045748Smckusick 	old_src_destroy = src_destroy;
163145748Smckusick 	src_destroy = FALSE;
163245748Smckusick 
163345748Smckusick 	/*
163445748Smckusick 	 *	Compute start and end of region in both maps
163545748Smckusick 	 */
163645748Smckusick 
163745748Smckusick 	src_start = src_addr;
163845748Smckusick 	src_end = src_start + len;
163945748Smckusick 	dst_start = dst_addr;
164045748Smckusick 	dst_end = dst_start + len;
164145748Smckusick 
164245748Smckusick 	/*
164345748Smckusick 	 *	Check that the region can exist in both source
164445748Smckusick 	 *	and destination.
164545748Smckusick 	 */
164645748Smckusick 
164745748Smckusick 	if ((dst_end < dst_start) || (src_end < src_start))
164845748Smckusick 		return(KERN_NO_SPACE);
164945748Smckusick 
165045748Smckusick 	/*
165145748Smckusick 	 *	Lock the maps in question -- we avoid deadlock
165245748Smckusick 	 *	by ordering lock acquisition by map value
165345748Smckusick 	 */
165445748Smckusick 
165545748Smckusick 	if (src_map == dst_map) {
165645748Smckusick 		vm_map_lock(src_map);
165745748Smckusick 	}
165845748Smckusick 	else if ((int) src_map < (int) dst_map) {
165945748Smckusick 	 	vm_map_lock(src_map);
166045748Smckusick 		vm_map_lock(dst_map);
166145748Smckusick 	} else {
166245748Smckusick 		vm_map_lock(dst_map);
166345748Smckusick 	 	vm_map_lock(src_map);
166445748Smckusick 	}
166545748Smckusick 
166645748Smckusick 	result = KERN_SUCCESS;
166745748Smckusick 
166845748Smckusick 	/*
166945748Smckusick 	 *	Check protections... source must be completely readable and
167045748Smckusick 	 *	destination must be completely writable.  [Note that if we're
167145748Smckusick 	 *	allocating the destination region, we don't have to worry
167245748Smckusick 	 *	about protection, but instead about whether the region
167345748Smckusick 	 *	exists.]
167445748Smckusick 	 */
167545748Smckusick 
167645748Smckusick 	if (src_map->is_main_map && dst_map->is_main_map) {
167745748Smckusick 		if (!vm_map_check_protection(src_map, src_start, src_end,
167845748Smckusick 					VM_PROT_READ)) {
167945748Smckusick 			result = KERN_PROTECTION_FAILURE;
168045748Smckusick 			goto Return;
168145748Smckusick 		}
168245748Smckusick 
168345748Smckusick 		if (dst_alloc) {
168445748Smckusick 			/* XXX Consider making this a vm_map_find instead */
1685*48383Skarels 			if ((result = vm_map_insert(dst_map, NULL,
168645748Smckusick 					(vm_offset_t) 0, dst_start, dst_end)) != KERN_SUCCESS)
168745748Smckusick 				goto Return;
168845748Smckusick 		}
168945748Smckusick 		else if (!vm_map_check_protection(dst_map, dst_start, dst_end,
169045748Smckusick 					VM_PROT_WRITE)) {
169145748Smckusick 			result = KERN_PROTECTION_FAILURE;
169245748Smckusick 			goto Return;
169345748Smckusick 		}
169445748Smckusick 	}
169545748Smckusick 
169645748Smckusick 	/*
169745748Smckusick 	 *	Find the start entries and clip.
169845748Smckusick 	 *
169945748Smckusick 	 *	Note that checking protection asserts that the
170045748Smckusick 	 *	lookup cannot fail.
170145748Smckusick 	 *
170245748Smckusick 	 *	Also note that we wait to do the second lookup
170345748Smckusick 	 *	until we have done the first clip, as the clip
170445748Smckusick 	 *	may affect which entry we get!
170545748Smckusick 	 */
170645748Smckusick 
170745748Smckusick 	(void) vm_map_lookup_entry(src_map, src_addr, &tmp_entry);
170845748Smckusick 	src_entry = tmp_entry;
170945748Smckusick 	vm_map_clip_start(src_map, src_entry, src_start);
171045748Smckusick 
171145748Smckusick 	(void) vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry);
171245748Smckusick 	dst_entry = tmp_entry;
171345748Smckusick 	vm_map_clip_start(dst_map, dst_entry, dst_start);
171445748Smckusick 
171545748Smckusick 	/*
171645748Smckusick 	 *	If both source and destination entries are the same,
171745748Smckusick 	 *	retry the first lookup, as it may have changed.
171845748Smckusick 	 */
171945748Smckusick 
172045748Smckusick 	if (src_entry == dst_entry) {
172145748Smckusick 		(void) vm_map_lookup_entry(src_map, src_addr, &tmp_entry);
172245748Smckusick 		src_entry = tmp_entry;
172345748Smckusick 	}
172445748Smckusick 
172545748Smckusick 	/*
172645748Smckusick 	 *	If source and destination entries are still the same,
172745748Smckusick 	 *	a null copy is being performed.
172845748Smckusick 	 */
172945748Smckusick 
173045748Smckusick 	if (src_entry == dst_entry)
173145748Smckusick 		goto Return;
173245748Smckusick 
173345748Smckusick 	/*
173445748Smckusick 	 *	Go through entries until we get to the end of the
173545748Smckusick 	 *	region.
173645748Smckusick 	 */
173745748Smckusick 
173845748Smckusick 	while (src_start < src_end) {
173945748Smckusick 		/*
174045748Smckusick 		 *	Clip the entries to the endpoint of the entire region.
174145748Smckusick 		 */
174245748Smckusick 
174345748Smckusick 		vm_map_clip_end(src_map, src_entry, src_end);
174445748Smckusick 		vm_map_clip_end(dst_map, dst_entry, dst_end);
174545748Smckusick 
174645748Smckusick 		/*
174745748Smckusick 		 *	Clip each entry to the endpoint of the other entry.
174845748Smckusick 		 */
174945748Smckusick 
175045748Smckusick 		src_clip = src_entry->start + (dst_entry->end - dst_entry->start);
175145748Smckusick 		vm_map_clip_end(src_map, src_entry, src_clip);
175245748Smckusick 
175345748Smckusick 		dst_clip = dst_entry->start + (src_entry->end - src_entry->start);
175445748Smckusick 		vm_map_clip_end(dst_map, dst_entry, dst_clip);
175545748Smckusick 
175645748Smckusick 		/*
175745748Smckusick 		 *	Both entries now match in size and relative endpoints.
175845748Smckusick 		 *
175945748Smckusick 		 *	If both entries refer to a VM object, we can
176045748Smckusick 		 *	deal with them now.
176145748Smckusick 		 */
176245748Smckusick 
176345748Smckusick 		if (!src_entry->is_a_map && !dst_entry->is_a_map) {
176445748Smckusick 			vm_map_copy_entry(src_map, dst_map, src_entry,
176545748Smckusick 						dst_entry);
176645748Smckusick 		}
176745748Smckusick 		else {
176845748Smckusick 			register vm_map_t	new_dst_map;
176945748Smckusick 			vm_offset_t		new_dst_start;
177045748Smckusick 			vm_size_t		new_size;
177145748Smckusick 			vm_map_t		new_src_map;
177245748Smckusick 			vm_offset_t		new_src_start;
177345748Smckusick 
177445748Smckusick 			/*
177545748Smckusick 			 *	We have to follow at least one sharing map.
177645748Smckusick 			 */
177745748Smckusick 
177845748Smckusick 			new_size = (dst_entry->end - dst_entry->start);
177945748Smckusick 
178045748Smckusick 			if (src_entry->is_a_map) {
178145748Smckusick 				new_src_map = src_entry->object.share_map;
178245748Smckusick 				new_src_start = src_entry->offset;
178345748Smckusick 			}
178445748Smckusick 			else {
178545748Smckusick 			 	new_src_map = src_map;
178645748Smckusick 				new_src_start = src_entry->start;
178745748Smckusick 				lock_set_recursive(&src_map->lock);
178845748Smckusick 			}
178945748Smckusick 
179045748Smckusick 			if (dst_entry->is_a_map) {
179145748Smckusick 			    	vm_offset_t	new_dst_end;
179245748Smckusick 
179345748Smckusick 				new_dst_map = dst_entry->object.share_map;
179445748Smckusick 				new_dst_start = dst_entry->offset;
179545748Smckusick 
179645748Smckusick 				/*
179745748Smckusick 				 *	Since the destination sharing entries
179845748Smckusick 				 *	will be merely deallocated, we can
179945748Smckusick 				 *	do that now, and replace the region
180045748Smckusick 				 *	with a null object.  [This prevents
180145748Smckusick 				 *	splitting the source map to match
180245748Smckusick 				 *	the form of the destination map.]
180345748Smckusick 				 *	Note that we can only do so if the
180445748Smckusick 				 *	source and destination do not overlap.
180545748Smckusick 				 */
180645748Smckusick 
180745748Smckusick 				new_dst_end = new_dst_start + new_size;
180845748Smckusick 
180945748Smckusick 				if (new_dst_map != new_src_map) {
181045748Smckusick 					vm_map_lock(new_dst_map);
181145748Smckusick 					(void) vm_map_delete(new_dst_map,
181245748Smckusick 							new_dst_start,
181345748Smckusick 							new_dst_end);
181445748Smckusick 					(void) vm_map_insert(new_dst_map,
1815*48383Skarels 							NULL,
181645748Smckusick 							(vm_offset_t) 0,
181745748Smckusick 							new_dst_start,
181845748Smckusick 							new_dst_end);
181945748Smckusick 					vm_map_unlock(new_dst_map);
182045748Smckusick 				}
182145748Smckusick 			}
182245748Smckusick 			else {
182345748Smckusick 			 	new_dst_map = dst_map;
182445748Smckusick 				new_dst_start = dst_entry->start;
182545748Smckusick 				lock_set_recursive(&dst_map->lock);
182645748Smckusick 			}
182745748Smckusick 
182845748Smckusick 			/*
182945748Smckusick 			 *	Recursively copy the sharing map.
183045748Smckusick 			 */
183145748Smckusick 
183245748Smckusick 			(void) vm_map_copy(new_dst_map, new_src_map,
183345748Smckusick 				new_dst_start, new_size, new_src_start,
183445748Smckusick 				FALSE, FALSE);
183545748Smckusick 
183645748Smckusick 			if (dst_map == new_dst_map)
183745748Smckusick 				lock_clear_recursive(&dst_map->lock);
183845748Smckusick 			if (src_map == new_src_map)
183945748Smckusick 				lock_clear_recursive(&src_map->lock);
184045748Smckusick 		}
184145748Smckusick 
184245748Smckusick 		/*
184345748Smckusick 		 *	Update variables for next pass through the loop.
184445748Smckusick 		 */
184545748Smckusick 
184645748Smckusick 		src_start = src_entry->end;
184745748Smckusick 		src_entry = src_entry->next;
184845748Smckusick 		dst_start = dst_entry->end;
184945748Smckusick 		dst_entry = dst_entry->next;
185045748Smckusick 
185145748Smckusick 		/*
185245748Smckusick 		 *	If the source is to be destroyed, here is the
185345748Smckusick 		 *	place to do it.
185445748Smckusick 		 */
185545748Smckusick 
185645748Smckusick 		if (src_destroy && src_map->is_main_map &&
185745748Smckusick 						dst_map->is_main_map)
185845748Smckusick 			vm_map_entry_delete(src_map, src_entry->prev);
185945748Smckusick 	}
186045748Smckusick 
186145748Smckusick 	/*
186245748Smckusick 	 *	Update the physical maps as appropriate
186345748Smckusick 	 */
186445748Smckusick 
186545748Smckusick 	if (src_map->is_main_map && dst_map->is_main_map) {
186645748Smckusick 		if (src_destroy)
186745748Smckusick 			pmap_remove(src_map->pmap, src_addr, src_addr + len);
186845748Smckusick 	}
186945748Smckusick 
187045748Smckusick 	/*
187145748Smckusick 	 *	Unlock the maps
187245748Smckusick 	 */
187345748Smckusick 
187445748Smckusick 	Return: ;
187545748Smckusick 
187645748Smckusick 	if (old_src_destroy)
187745748Smckusick 		vm_map_delete(src_map, src_addr, src_addr + len);
187845748Smckusick 
187945748Smckusick 	vm_map_unlock(src_map);
188045748Smckusick 	if (src_map != dst_map)
188145748Smckusick 		vm_map_unlock(dst_map);
188245748Smckusick 
188345748Smckusick 	return(result);
188445748Smckusick }
188545748Smckusick 
188645748Smckusick /*
1887*48383Skarels  * vmspace_fork:
1888*48383Skarels  * Create a new process vmspace structure and vm_map
1889*48383Skarels  * based on those of an existing process.  The new map
1890*48383Skarels  * is based on the old map, according to the inheritance
1891*48383Skarels  * values on the regions in that map.
189245748Smckusick  *
1893*48383Skarels  * The source map must not be locked.
189445748Smckusick  */
1895*48383Skarels struct vmspace *
1896*48383Skarels vmspace_fork(vm1)
1897*48383Skarels 	register struct vmspace *vm1;
189845748Smckusick {
1899*48383Skarels 	register struct vmspace *vm2;
1900*48383Skarels 	vm_map_t	old_map = &vm1->vm_map;
190145748Smckusick 	vm_map_t	new_map;
190245748Smckusick 	vm_map_entry_t	old_entry;
190345748Smckusick 	vm_map_entry_t	new_entry;
190445748Smckusick 	pmap_t		new_pmap;
190545748Smckusick 
190645748Smckusick 	vm_map_lock(old_map);
190745748Smckusick 
1908*48383Skarels 	vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset,
1909*48383Skarels 	    old_map->entries_pageable);
1910*48383Skarels 	bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
1911*48383Skarels 	    (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
1912*48383Skarels 	new_pmap = &vm2->vm_pmap;		/* XXX */
1913*48383Skarels 	new_map = &vm2->vm_map;			/* XXX */
191445748Smckusick 
191545748Smckusick 	old_entry = old_map->header.next;
191645748Smckusick 
191745748Smckusick 	while (old_entry != &old_map->header) {
191845748Smckusick 		if (old_entry->is_sub_map)
191945748Smckusick 			panic("vm_map_fork: encountered a submap");
192045748Smckusick 
192145748Smckusick 		switch (old_entry->inheritance) {
192245748Smckusick 		case VM_INHERIT_NONE:
192345748Smckusick 			break;
192445748Smckusick 
192545748Smckusick 		case VM_INHERIT_SHARE:
192645748Smckusick 			/*
192745748Smckusick 			 *	If we don't already have a sharing map:
192845748Smckusick 			 */
192945748Smckusick 
193045748Smckusick 			if (!old_entry->is_a_map) {
193145748Smckusick 			 	vm_map_t	new_share_map;
193245748Smckusick 				vm_map_entry_t	new_share_entry;
193345748Smckusick 
193445748Smckusick 				/*
193545748Smckusick 				 *	Create a new sharing map
193645748Smckusick 				 */
193745748Smckusick 
1938*48383Skarels 				new_share_map = vm_map_create(NULL,
193945748Smckusick 							old_entry->start,
194045748Smckusick 							old_entry->end,
194145748Smckusick 							TRUE);
194245748Smckusick 				new_share_map->is_main_map = FALSE;
194345748Smckusick 
194445748Smckusick 				/*
194545748Smckusick 				 *	Create the only sharing entry from the
194645748Smckusick 				 *	old task map entry.
194745748Smckusick 				 */
194845748Smckusick 
194945748Smckusick 				new_share_entry =
195045748Smckusick 					vm_map_entry_create(new_share_map);
195145748Smckusick 				*new_share_entry = *old_entry;
195245748Smckusick 
195345748Smckusick 				/*
195445748Smckusick 				 *	Insert the entry into the new sharing
195545748Smckusick 				 *	map
195645748Smckusick 				 */
195745748Smckusick 
195845748Smckusick 				vm_map_entry_link(new_share_map,
195945748Smckusick 						new_share_map->header.prev,
196045748Smckusick 						new_share_entry);
196145748Smckusick 
196245748Smckusick 				/*
196345748Smckusick 				 *	Fix up the task map entry to refer
196445748Smckusick 				 *	to the sharing map now.
196545748Smckusick 				 */
196645748Smckusick 
196745748Smckusick 				old_entry->is_a_map = TRUE;
196845748Smckusick 				old_entry->object.share_map = new_share_map;
196945748Smckusick 				old_entry->offset = old_entry->start;
197045748Smckusick 			}
197145748Smckusick 
197245748Smckusick 			/*
197345748Smckusick 			 *	Clone the entry, referencing the sharing map.
197445748Smckusick 			 */
197545748Smckusick 
197645748Smckusick 			new_entry = vm_map_entry_create(new_map);
197745748Smckusick 			*new_entry = *old_entry;
197845748Smckusick 			vm_map_reference(new_entry->object.share_map);
197945748Smckusick 
198045748Smckusick 			/*
198145748Smckusick 			 *	Insert the entry into the new map -- we
198245748Smckusick 			 *	know we're inserting at the end of the new
198345748Smckusick 			 *	map.
198445748Smckusick 			 */
198545748Smckusick 
198645748Smckusick 			vm_map_entry_link(new_map, new_map->header.prev,
198745748Smckusick 						new_entry);
198845748Smckusick 
198945748Smckusick 			/*
199045748Smckusick 			 *	Update the physical map
199145748Smckusick 			 */
199245748Smckusick 
199345748Smckusick 			pmap_copy(new_map->pmap, old_map->pmap,
199445748Smckusick 				new_entry->start,
199545748Smckusick 				(old_entry->end - old_entry->start),
199645748Smckusick 				old_entry->start);
199745748Smckusick 			break;
199845748Smckusick 
199945748Smckusick 		case VM_INHERIT_COPY:
200045748Smckusick 			/*
200145748Smckusick 			 *	Clone the entry and link into the map.
200245748Smckusick 			 */
200345748Smckusick 
200445748Smckusick 			new_entry = vm_map_entry_create(new_map);
200545748Smckusick 			*new_entry = *old_entry;
200645748Smckusick 			new_entry->wired_count = 0;
2007*48383Skarels 			new_entry->object.vm_object = NULL;
200845748Smckusick 			new_entry->is_a_map = FALSE;
200945748Smckusick 			vm_map_entry_link(new_map, new_map->header.prev,
201045748Smckusick 							new_entry);
201145748Smckusick 			if (old_entry->is_a_map) {
201245748Smckusick 				int	check;
201345748Smckusick 
201445748Smckusick 				check = vm_map_copy(new_map,
201545748Smckusick 						old_entry->object.share_map,
201645748Smckusick 						new_entry->start,
201745748Smckusick 						(vm_size_t)(new_entry->end -
201845748Smckusick 							new_entry->start),
201945748Smckusick 						old_entry->offset,
202045748Smckusick 						FALSE, FALSE);
202145748Smckusick 				if (check != KERN_SUCCESS)
202245748Smckusick 					printf("vm_map_fork: copy in share_map region failed\n");
202345748Smckusick 			}
202445748Smckusick 			else {
202545748Smckusick 				vm_map_copy_entry(old_map, new_map, old_entry,
202645748Smckusick 						new_entry);
202745748Smckusick 			}
202845748Smckusick 			break;
202945748Smckusick 		}
203045748Smckusick 		old_entry = old_entry->next;
203145748Smckusick 	}
203245748Smckusick 
203345748Smckusick 	new_map->size = old_map->size;
203445748Smckusick 	vm_map_unlock(old_map);
203545748Smckusick 
2036*48383Skarels 	return(vm2);
203745748Smckusick }
203845748Smckusick 
203945748Smckusick /*
204045748Smckusick  *	vm_map_lookup:
204145748Smckusick  *
204245748Smckusick  *	Finds the VM object, offset, and
204345748Smckusick  *	protection for a given virtual address in the
204445748Smckusick  *	specified map, assuming a page fault of the
204545748Smckusick  *	type specified.
204645748Smckusick  *
204745748Smckusick  *	Leaves the map in question locked for read; return
204845748Smckusick  *	values are guaranteed until a vm_map_lookup_done
204945748Smckusick  *	call is performed.  Note that the map argument
205045748Smckusick  *	is in/out; the returned map must be used in
205145748Smckusick  *	the call to vm_map_lookup_done.
205245748Smckusick  *
205345748Smckusick  *	A handle (out_entry) is returned for use in
205445748Smckusick  *	vm_map_lookup_done, to make that fast.
205545748Smckusick  *
205645748Smckusick  *	If a lookup is requested with "write protection"
205745748Smckusick  *	specified, the map may be changed to perform virtual
205845748Smckusick  *	copying operations, although the data referenced will
205945748Smckusick  *	remain the same.
206045748Smckusick  */
206145748Smckusick vm_map_lookup(var_map, vaddr, fault_type, out_entry,
206245748Smckusick 				object, offset, out_prot, wired, single_use)
206345748Smckusick 	vm_map_t		*var_map;	/* IN/OUT */
206445748Smckusick 	register vm_offset_t	vaddr;
206545748Smckusick 	register vm_prot_t	fault_type;
206645748Smckusick 
206745748Smckusick 	vm_map_entry_t		*out_entry;	/* OUT */
206845748Smckusick 	vm_object_t		*object;	/* OUT */
206945748Smckusick 	vm_offset_t		*offset;	/* OUT */
207045748Smckusick 	vm_prot_t		*out_prot;	/* OUT */
207145748Smckusick 	boolean_t		*wired;		/* OUT */
207245748Smckusick 	boolean_t		*single_use;	/* OUT */
207345748Smckusick {
207445748Smckusick 	vm_map_t			share_map;
207545748Smckusick 	vm_offset_t			share_offset;
207645748Smckusick 	register vm_map_entry_t		entry;
207745748Smckusick 	register vm_map_t		map = *var_map;
207845748Smckusick 	register vm_prot_t		prot;
207945748Smckusick 	register boolean_t		su;
208045748Smckusick 
208145748Smckusick 	RetryLookup: ;
208245748Smckusick 
208345748Smckusick 	/*
208445748Smckusick 	 *	Lookup the faulting address.
208545748Smckusick 	 */
208645748Smckusick 
208745748Smckusick 	vm_map_lock_read(map);
208845748Smckusick 
208945748Smckusick #define	RETURN(why) \
209045748Smckusick 		{ \
209145748Smckusick 		vm_map_unlock_read(map); \
209245748Smckusick 		return(why); \
209345748Smckusick 		}
209445748Smckusick 
209545748Smckusick 	/*
209645748Smckusick 	 *	If the map has an interesting hint, try it before calling
209745748Smckusick 	 *	full blown lookup routine.
209845748Smckusick 	 */
209945748Smckusick 
210045748Smckusick 	simple_lock(&map->hint_lock);
210145748Smckusick 	entry = map->hint;
210245748Smckusick 	simple_unlock(&map->hint_lock);
210345748Smckusick 
210445748Smckusick 	*out_entry = entry;
210545748Smckusick 
210645748Smckusick 	if ((entry == &map->header) ||
210745748Smckusick 	    (vaddr < entry->start) || (vaddr >= entry->end)) {
210845748Smckusick 		vm_map_entry_t	tmp_entry;
210945748Smckusick 
211045748Smckusick 		/*
211145748Smckusick 		 *	Entry was either not a valid hint, or the vaddr
211245748Smckusick 		 *	was not contained in the entry, so do a full lookup.
211345748Smckusick 		 */
211445748Smckusick 		if (!vm_map_lookup_entry(map, vaddr, &tmp_entry))
211545748Smckusick 			RETURN(KERN_INVALID_ADDRESS);
211645748Smckusick 
211745748Smckusick 		entry = tmp_entry;
211845748Smckusick 		*out_entry = entry;
211945748Smckusick 	}
212045748Smckusick 
212145748Smckusick 	/*
212245748Smckusick 	 *	Handle submaps.
212345748Smckusick 	 */
212445748Smckusick 
212545748Smckusick 	if (entry->is_sub_map) {
212645748Smckusick 		vm_map_t	old_map = map;
212745748Smckusick 
212845748Smckusick 		*var_map = map = entry->object.sub_map;
212945748Smckusick 		vm_map_unlock_read(old_map);
213045748Smckusick 		goto RetryLookup;
213145748Smckusick 	}
213245748Smckusick 
213345748Smckusick 	/*
213445748Smckusick 	 *	Check whether this task is allowed to have
213545748Smckusick 	 *	this page.
213645748Smckusick 	 */
213745748Smckusick 
213845748Smckusick 	prot = entry->protection;
213945748Smckusick 	if ((fault_type & (prot)) != fault_type)
214045748Smckusick 		RETURN(KERN_PROTECTION_FAILURE);
214145748Smckusick 
214245748Smckusick 	/*
214345748Smckusick 	 *	If this page is not pageable, we have to get
214445748Smckusick 	 *	it for all possible accesses.
214545748Smckusick 	 */
214645748Smckusick 
214745748Smckusick 	if (*wired = (entry->wired_count != 0))
214845748Smckusick 		prot = fault_type = entry->protection;
214945748Smckusick 
215045748Smckusick 	/*
215145748Smckusick 	 *	If we don't already have a VM object, track
215245748Smckusick 	 *	it down.
215345748Smckusick 	 */
215445748Smckusick 
215545748Smckusick 	if (su = !entry->is_a_map) {
215645748Smckusick 	 	share_map = map;
215745748Smckusick 		share_offset = vaddr;
215845748Smckusick 	}
215945748Smckusick 	else {
216045748Smckusick 		vm_map_entry_t	share_entry;
216145748Smckusick 
216245748Smckusick 		/*
216345748Smckusick 		 *	Compute the sharing map, and offset into it.
216445748Smckusick 		 */
216545748Smckusick 
216645748Smckusick 		share_map = entry->object.share_map;
216745748Smckusick 		share_offset = (vaddr - entry->start) + entry->offset;
216845748Smckusick 
216945748Smckusick 		/*
217045748Smckusick 		 *	Look for the backing store object and offset
217145748Smckusick 		 */
217245748Smckusick 
217345748Smckusick 		vm_map_lock_read(share_map);
217445748Smckusick 
217545748Smckusick 		if (!vm_map_lookup_entry(share_map, share_offset,
217645748Smckusick 					&share_entry)) {
217745748Smckusick 			vm_map_unlock_read(share_map);
217845748Smckusick 			RETURN(KERN_INVALID_ADDRESS);
217945748Smckusick 		}
218045748Smckusick 		entry = share_entry;
218145748Smckusick 	}
218245748Smckusick 
218345748Smckusick 	/*
218445748Smckusick 	 *	If the entry was copy-on-write, we either ...
218545748Smckusick 	 */
218645748Smckusick 
218745748Smckusick 	if (entry->needs_copy) {
218845748Smckusick 	    	/*
218945748Smckusick 		 *	If we want to write the page, we may as well
219045748Smckusick 		 *	handle that now since we've got the sharing
219145748Smckusick 		 *	map locked.
219245748Smckusick 		 *
219345748Smckusick 		 *	If we don't need to write the page, we just
219445748Smckusick 		 *	demote the permissions allowed.
219545748Smckusick 		 */
219645748Smckusick 
219745748Smckusick 		if (fault_type & VM_PROT_WRITE) {
219845748Smckusick 			/*
219945748Smckusick 			 *	Make a new object, and place it in the
220045748Smckusick 			 *	object chain.  Note that no new references
220145748Smckusick 			 *	have appeared -- one just moved from the
220245748Smckusick 			 *	share map to the new object.
220345748Smckusick 			 */
220445748Smckusick 
220545748Smckusick 			if (lock_read_to_write(&share_map->lock)) {
220645748Smckusick 				if (share_map != map)
220745748Smckusick 					vm_map_unlock_read(map);
220845748Smckusick 				goto RetryLookup;
220945748Smckusick 			}
221045748Smckusick 
221145748Smckusick 			vm_object_shadow(
221245748Smckusick 				&entry->object.vm_object,
221345748Smckusick 				&entry->offset,
221445748Smckusick 				(vm_size_t) (entry->end - entry->start));
221545748Smckusick 
221645748Smckusick 			entry->needs_copy = FALSE;
221745748Smckusick 
221845748Smckusick 			lock_write_to_read(&share_map->lock);
221945748Smckusick 		}
222045748Smckusick 		else {
222145748Smckusick 			/*
222245748Smckusick 			 *	We're attempting to read a copy-on-write
222345748Smckusick 			 *	page -- don't allow writes.
222445748Smckusick 			 */
222545748Smckusick 
222645748Smckusick 			prot &= (~VM_PROT_WRITE);
222745748Smckusick 		}
222845748Smckusick 	}
222945748Smckusick 
223045748Smckusick 	/*
223145748Smckusick 	 *	Create an object if necessary.
223245748Smckusick 	 */
2233*48383Skarels 	if (entry->object.vm_object == NULL) {
223445748Smckusick 
223545748Smckusick 		if (lock_read_to_write(&share_map->lock)) {
223645748Smckusick 			if (share_map != map)
223745748Smckusick 				vm_map_unlock_read(map);
223845748Smckusick 			goto RetryLookup;
223945748Smckusick 		}
224045748Smckusick 
224145748Smckusick 		entry->object.vm_object = vm_object_allocate(
224245748Smckusick 					(vm_size_t)(entry->end - entry->start));
224345748Smckusick 		entry->offset = 0;
224445748Smckusick 		lock_write_to_read(&share_map->lock);
224545748Smckusick 	}
224645748Smckusick 
224745748Smckusick 	/*
224845748Smckusick 	 *	Return the object/offset from this entry.  If the entry
224945748Smckusick 	 *	was copy-on-write or empty, it has been fixed up.
225045748Smckusick 	 */
225145748Smckusick 
225245748Smckusick 	*offset = (share_offset - entry->start) + entry->offset;
225345748Smckusick 	*object = entry->object.vm_object;
225445748Smckusick 
225545748Smckusick 	/*
225645748Smckusick 	 *	Return whether this is the only map sharing this data.
225745748Smckusick 	 */
225845748Smckusick 
225945748Smckusick 	if (!su) {
226045748Smckusick 		simple_lock(&share_map->ref_lock);
226145748Smckusick 		su = (share_map->ref_count == 1);
226245748Smckusick 		simple_unlock(&share_map->ref_lock);
226345748Smckusick 	}
226445748Smckusick 
226545748Smckusick 	*out_prot = prot;
226645748Smckusick 	*single_use = su;
226745748Smckusick 
226845748Smckusick 	return(KERN_SUCCESS);
226945748Smckusick 
227045748Smckusick #undef	RETURN
227145748Smckusick }
227245748Smckusick 
227345748Smckusick /*
227445748Smckusick  *	vm_map_lookup_done:
227545748Smckusick  *
227645748Smckusick  *	Releases locks acquired by a vm_map_lookup
227745748Smckusick  *	(according to the handle returned by that lookup).
227845748Smckusick  */
227945748Smckusick 
228045748Smckusick void vm_map_lookup_done(map, entry)
228145748Smckusick 	register vm_map_t	map;
228245748Smckusick 	vm_map_entry_t		entry;
228345748Smckusick {
228445748Smckusick 	/*
228545748Smckusick 	 *	If this entry references a map, unlock it first.
228645748Smckusick 	 */
228745748Smckusick 
228845748Smckusick 	if (entry->is_a_map)
228945748Smckusick 		vm_map_unlock_read(entry->object.share_map);
229045748Smckusick 
229145748Smckusick 	/*
229245748Smckusick 	 *	Unlock the main-level map
229345748Smckusick 	 */
229445748Smckusick 
229545748Smckusick 	vm_map_unlock_read(map);
229645748Smckusick }
229745748Smckusick 
229845748Smckusick /*
229945748Smckusick  *	Routine:	vm_map_simplify
230045748Smckusick  *	Purpose:
230145748Smckusick  *		Attempt to simplify the map representation in
230245748Smckusick  *		the vicinity of the given starting address.
230345748Smckusick  *	Note:
230445748Smckusick  *		This routine is intended primarily to keep the
230545748Smckusick  *		kernel maps more compact -- they generally don't
230645748Smckusick  *		benefit from the "expand a map entry" technology
230745748Smckusick  *		at allocation time because the adjacent entry
230845748Smckusick  *		is often wired down.
230945748Smckusick  */
231045748Smckusick void vm_map_simplify(map, start)
231145748Smckusick 	vm_map_t	map;
231245748Smckusick 	vm_offset_t	start;
231345748Smckusick {
231445748Smckusick 	vm_map_entry_t	this_entry;
231545748Smckusick 	vm_map_entry_t	prev_entry;
231645748Smckusick 
231745748Smckusick 	vm_map_lock(map);
231845748Smckusick 	if (
231945748Smckusick 		(vm_map_lookup_entry(map, start, &this_entry)) &&
232045748Smckusick 		((prev_entry = this_entry->prev) != &map->header) &&
232145748Smckusick 
232245748Smckusick 		(prev_entry->end == start) &&
232345748Smckusick 		(map->is_main_map) &&
232445748Smckusick 
232545748Smckusick 		(prev_entry->is_a_map == FALSE) &&
232645748Smckusick 		(prev_entry->is_sub_map == FALSE) &&
232745748Smckusick 
232845748Smckusick 		(this_entry->is_a_map == FALSE) &&
232945748Smckusick 		(this_entry->is_sub_map == FALSE) &&
233045748Smckusick 
233145748Smckusick 		(prev_entry->inheritance == this_entry->inheritance) &&
233245748Smckusick 		(prev_entry->protection == this_entry->protection) &&
233345748Smckusick 		(prev_entry->max_protection == this_entry->max_protection) &&
233445748Smckusick 		(prev_entry->wired_count == this_entry->wired_count) &&
233545748Smckusick 
233645748Smckusick 		(prev_entry->copy_on_write == this_entry->copy_on_write) &&
233745748Smckusick 		(prev_entry->needs_copy == this_entry->needs_copy) &&
233845748Smckusick 
233945748Smckusick 		(prev_entry->object.vm_object == this_entry->object.vm_object) &&
234045748Smckusick 		((prev_entry->offset + (prev_entry->end - prev_entry->start))
234145748Smckusick 		     == this_entry->offset)
234245748Smckusick 	) {
234345748Smckusick 		if (map->first_free == this_entry)
234445748Smckusick 			map->first_free = prev_entry;
234545748Smckusick 
234645748Smckusick 		SAVE_HINT(map, prev_entry);
234745748Smckusick 		vm_map_entry_unlink(map, this_entry);
234845748Smckusick 		prev_entry->end = this_entry->end;
234945748Smckusick 	 	vm_object_deallocate(this_entry->object.vm_object);
235045748Smckusick 		vm_map_entry_dispose(map, this_entry);
235145748Smckusick 	}
235245748Smckusick 	vm_map_unlock(map);
235345748Smckusick }
235445748Smckusick 
235545748Smckusick /*
235645748Smckusick  *	vm_map_print:	[ debug ]
235745748Smckusick  */
235845748Smckusick void vm_map_print(map, full)
235945748Smckusick 	register vm_map_t	map;
236045748Smckusick 	boolean_t		full;
236145748Smckusick {
236245748Smckusick 	register vm_map_entry_t	entry;
236345748Smckusick 	extern int indent;
236445748Smckusick 
236545748Smckusick 	iprintf("%s map 0x%x: pmap=0x%x,ref=%d,nentries=%d,version=%d\n",
236645748Smckusick 		(map->is_main_map ? "Task" : "Share"),
236745748Smckusick  		(int) map, (int) (map->pmap), map->ref_count, map->nentries,
236845748Smckusick 		map->timestamp);
236945748Smckusick 
237045748Smckusick 	if (!full && indent)
237145748Smckusick 		return;
237245748Smckusick 
237345748Smckusick 	indent += 2;
237445748Smckusick 	for (entry = map->header.next; entry != &map->header;
237545748Smckusick 				entry = entry->next) {
237645748Smckusick 		iprintf("map entry 0x%x: start=0x%x, end=0x%x, ",
237745748Smckusick 			(int) entry, (int) entry->start, (int) entry->end);
237845748Smckusick 		if (map->is_main_map) {
237945748Smckusick 		     	static char *inheritance_name[4] =
238045748Smckusick 				{ "share", "copy", "none", "donate_copy"};
238145748Smckusick 			printf("prot=%x/%x/%s, ",
238245748Smckusick 				entry->protection,
238345748Smckusick 				entry->max_protection,
238445748Smckusick 				inheritance_name[entry->inheritance]);
238545748Smckusick 			if (entry->wired_count != 0)
238645748Smckusick 				printf("wired, ");
238745748Smckusick 		}
238845748Smckusick 
238945748Smckusick 		if (entry->is_a_map || entry->is_sub_map) {
239045748Smckusick 		 	printf("share=0x%x, offset=0x%x\n",
239145748Smckusick 				(int) entry->object.share_map,
239245748Smckusick 				(int) entry->offset);
239345748Smckusick 			if ((entry->prev == &map->header) ||
239445748Smckusick 			    (!entry->prev->is_a_map) ||
239545748Smckusick 			    (entry->prev->object.share_map !=
239645748Smckusick 			     entry->object.share_map)) {
239745748Smckusick 				indent += 2;
239845748Smckusick 				vm_map_print(entry->object.share_map, full);
239945748Smckusick 				indent -= 2;
240045748Smckusick 			}
240145748Smckusick 
240245748Smckusick 		}
240345748Smckusick 		else {
240445748Smckusick 			printf("object=0x%x, offset=0x%x",
240545748Smckusick 				(int) entry->object.vm_object,
240645748Smckusick 				(int) entry->offset);
240745748Smckusick 			if (entry->copy_on_write)
240845748Smckusick 				printf(", copy (%s)",
240945748Smckusick 				       entry->needs_copy ? "needed" : "done");
241045748Smckusick 			printf("\n");
241145748Smckusick 
241245748Smckusick 			if ((entry->prev == &map->header) ||
241345748Smckusick 			    (entry->prev->is_a_map) ||
241445748Smckusick 			    (entry->prev->object.vm_object !=
241545748Smckusick 			     entry->object.vm_object)) {
241645748Smckusick 				indent += 2;
241745748Smckusick 				vm_object_print(entry->object.vm_object, full);
241845748Smckusick 				indent -= 2;
241945748Smckusick 			}
242045748Smckusick 		}
242145748Smckusick 	}
242245748Smckusick 	indent -= 2;
242345748Smckusick }
2424