145748Smckusick /* 263379Sbostic * Copyright (c) 1991, 1993 363379Sbostic * The Regents of the University of California. All rights reserved. 445748Smckusick * 545748Smckusick * This code is derived from software contributed to Berkeley by 645748Smckusick * The Mach Operating System project at Carnegie-Mellon University. 745748Smckusick * 848493Smckusick * %sccs.include.redist.c% 945748Smckusick * 10*65479Sbostic * @(#)vm_map.c 8.2 (Berkeley) 01/04/94 1148493Smckusick * 1248493Smckusick * 1348493Smckusick * Copyright (c) 1987, 1990 Carnegie-Mellon University. 1448493Smckusick * All rights reserved. 1548493Smckusick * 1648493Smckusick * Authors: Avadis Tevanian, Jr., Michael Wayne Young 1748493Smckusick * 1848493Smckusick * Permission to use, copy, modify and distribute this software and 1948493Smckusick * its documentation is hereby granted, provided that both the copyright 2048493Smckusick * notice and this permission notice appear in all copies of the 2148493Smckusick * software, derivative works or modified versions, and any portions 2248493Smckusick * thereof, and that both notices appear in supporting documentation. 2348493Smckusick * 2448493Smckusick * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 2548493Smckusick * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 2648493Smckusick * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 2748493Smckusick * 2848493Smckusick * Carnegie Mellon requests users of this software to return to 2948493Smckusick * 3048493Smckusick * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 3148493Smckusick * School of Computer Science 3248493Smckusick * Carnegie Mellon University 3348493Smckusick * Pittsburgh PA 15213-3890 3448493Smckusick * 3548493Smckusick * any improvements or extensions that they make and grant Carnegie the 3648493Smckusick * rights to redistribute these changes. 3745748Smckusick */ 3845748Smckusick 3945748Smckusick /* 4045748Smckusick * Virtual memory mapping module. 4145748Smckusick */ 4245748Smckusick 4353357Sbostic #include <sys/param.h> 4453357Sbostic #include <sys/systm.h> 4553357Sbostic #include <sys/malloc.h> 4645748Smckusick 4753357Sbostic #include <vm/vm.h> 4853357Sbostic #include <vm/vm_page.h> 4953357Sbostic #include <vm/vm_object.h> 5053357Sbostic 5145748Smckusick /* 5245748Smckusick * Virtual memory maps provide for the mapping, protection, 5345748Smckusick * and sharing of virtual memory objects. In addition, 5445748Smckusick * this module provides for an efficient virtual copy of 5545748Smckusick * memory from one map to another. 5645748Smckusick * 5745748Smckusick * Synchronization is required prior to most operations. 5845748Smckusick * 5945748Smckusick * Maps consist of an ordered doubly-linked list of simple 6045748Smckusick * entries; a single hint is used to speed up lookups. 6145748Smckusick * 6245748Smckusick * In order to properly represent the sharing of virtual 6345748Smckusick * memory regions among maps, the map structure is bi-level. 6445748Smckusick * Top-level ("address") maps refer to regions of sharable 6545748Smckusick * virtual memory. These regions are implemented as 6645748Smckusick * ("sharing") maps, which then refer to the actual virtual 6745748Smckusick * memory objects. When two address maps "share" memory, 6845748Smckusick * their top-level maps both have references to the same 6945748Smckusick * sharing map. When memory is virtual-copied from one 7045748Smckusick * address map to another, the references in the sharing 7145748Smckusick * maps are actually copied -- no copying occurs at the 7245748Smckusick * virtual memory object level. 7345748Smckusick * 7445748Smckusick * Since portions of maps are specified by start/end addreses, 7545748Smckusick * which may not align with existing map entries, all 7645748Smckusick * routines merely "clip" entries to these start/end values. 7745748Smckusick * [That is, an entry is split into two, bordering at a 7845748Smckusick * start or end value.] Note that these clippings may not 7945748Smckusick * always be necessary (as the two resulting entries are then 8045748Smckusick * not changed); however, the clipping is done for convenience. 8145748Smckusick * No attempt is currently made to "glue back together" two 8245748Smckusick * abutting entries. 8345748Smckusick * 8445748Smckusick * As mentioned above, virtual copy operations are performed 8545748Smckusick * by copying VM object references from one sharing map to 8645748Smckusick * another, and then marking both regions as copy-on-write. 8745748Smckusick * It is important to note that only one writeable reference 8845748Smckusick * to a VM object region exists in any map -- this means that 8945748Smckusick * shadow object creation can be delayed until a write operation 9045748Smckusick * occurs. 9145748Smckusick */ 9245748Smckusick 9345748Smckusick /* 9448383Skarels * vm_map_startup: 9545748Smckusick * 9645748Smckusick * Initialize the vm_map module. Must be called before 9745748Smckusick * any other vm_map routines. 9845748Smckusick * 9945748Smckusick * Map and entry structures are allocated from the general 10045748Smckusick * purpose memory pool with some exceptions: 10145748Smckusick * 10245748Smckusick * - The kernel map and kmem submap are allocated statically. 10345748Smckusick * - Kernel map entries are allocated out of a static pool. 10445748Smckusick * 10545748Smckusick * These restrictions are necessary since malloc() uses the 10645748Smckusick * maps and requires map entries. 10745748Smckusick */ 10845748Smckusick 10945748Smckusick vm_offset_t kentry_data; 11045748Smckusick vm_size_t kentry_data_size; 11145748Smckusick vm_map_entry_t kentry_free; 11245748Smckusick vm_map_t kmap_free; 11345748Smckusick 11453357Sbostic static void _vm_map_clip_end __P((vm_map_t, vm_map_entry_t, vm_offset_t)); 11553357Sbostic static void _vm_map_clip_start __P((vm_map_t, vm_map_entry_t, vm_offset_t)); 11653357Sbostic 11748383Skarels void vm_map_startup() 11845748Smckusick { 11945748Smckusick register int i; 12045748Smckusick register vm_map_entry_t mep; 12145748Smckusick vm_map_t mp; 12245748Smckusick 12345748Smckusick /* 12445748Smckusick * Static map structures for allocation before initialization of 12545748Smckusick * kernel map or kmem map. vm_map_create knows how to deal with them. 12645748Smckusick */ 12745748Smckusick kmap_free = mp = (vm_map_t) kentry_data; 12845748Smckusick i = MAX_KMAP; 12945748Smckusick while (--i > 0) { 13045748Smckusick mp->header.next = (vm_map_entry_t) (mp + 1); 13145748Smckusick mp++; 13245748Smckusick } 13348383Skarels mp++->header.next = NULL; 13445748Smckusick 13545748Smckusick /* 13645748Smckusick * Form a free list of statically allocated kernel map entries 13745748Smckusick * with the rest. 13845748Smckusick */ 13945748Smckusick kentry_free = mep = (vm_map_entry_t) mp; 14045748Smckusick i = (kentry_data_size - MAX_KMAP * sizeof *mp) / sizeof *mep; 14145748Smckusick while (--i > 0) { 14245748Smckusick mep->next = mep + 1; 14345748Smckusick mep++; 14445748Smckusick } 14548383Skarels mep->next = NULL; 14645748Smckusick } 14745748Smckusick 14845748Smckusick /* 14948383Skarels * Allocate a vmspace structure, including a vm_map and pmap, 15048383Skarels * and initialize those structures. The refcnt is set to 1. 15148383Skarels * The remaining fields must be initialized by the caller. 15248383Skarels */ 15348383Skarels struct vmspace * 15448383Skarels vmspace_alloc(min, max, pageable) 15548383Skarels vm_offset_t min, max; 15648383Skarels int pageable; 15748383Skarels { 15848383Skarels register struct vmspace *vm; 15948383Skarels 16048383Skarels MALLOC(vm, struct vmspace *, sizeof(struct vmspace), M_VMMAP, M_WAITOK); 16148383Skarels bzero(vm, (caddr_t) &vm->vm_startcopy - (caddr_t) vm); 16248383Skarels vm_map_init(&vm->vm_map, min, max, pageable); 16348383Skarels pmap_pinit(&vm->vm_pmap); 16448383Skarels vm->vm_map.pmap = &vm->vm_pmap; /* XXX */ 16548383Skarels vm->vm_refcnt = 1; 16648383Skarels return (vm); 16748383Skarels } 16848383Skarels 16948383Skarels void 17048383Skarels vmspace_free(vm) 17148383Skarels register struct vmspace *vm; 17248383Skarels { 17348383Skarels 17448383Skarels if (--vm->vm_refcnt == 0) { 17548383Skarels /* 17648383Skarels * Lock the map, to wait out all other references to it. 17748383Skarels * Delete all of the mappings and pages they hold, 17848383Skarels * then call the pmap module to reclaim anything left. 17948383Skarels */ 18048383Skarels vm_map_lock(&vm->vm_map); 18148383Skarels (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset, 18248383Skarels vm->vm_map.max_offset); 18348383Skarels pmap_release(&vm->vm_pmap); 18448383Skarels FREE(vm, M_VMMAP); 18548383Skarels } 18648383Skarels } 18748383Skarels 18848383Skarels /* 18945748Smckusick * vm_map_create: 19045748Smckusick * 19145748Smckusick * Creates and returns a new empty VM map with 19245748Smckusick * the given physical map structure, and having 19345748Smckusick * the given lower and upper address bounds. 19445748Smckusick */ 19545748Smckusick vm_map_t vm_map_create(pmap, min, max, pageable) 19645748Smckusick pmap_t pmap; 19745748Smckusick vm_offset_t min, max; 19845748Smckusick boolean_t pageable; 19945748Smckusick { 20045748Smckusick register vm_map_t result; 201*65479Sbostic extern vm_map_t kmem_map; 20245748Smckusick 20348383Skarels if (kmem_map == NULL) { 20445748Smckusick result = kmap_free; 20545748Smckusick kmap_free = (vm_map_t) result->header.next; 20648383Skarels if (result == NULL) 20748383Skarels panic("vm_map_create: out of maps"); 20845748Smckusick } else 20945748Smckusick MALLOC(result, vm_map_t, sizeof(struct vm_map), 21045748Smckusick M_VMMAP, M_WAITOK); 21145748Smckusick 21248383Skarels vm_map_init(result, min, max, pageable); 21345748Smckusick result->pmap = pmap; 21445748Smckusick return(result); 21545748Smckusick } 21645748Smckusick 21745748Smckusick /* 21848383Skarels * Initialize an existing vm_map structure 21948383Skarels * such as that in the vmspace structure. 22048383Skarels * The pmap is set elsewhere. 22148383Skarels */ 22248383Skarels void 22348383Skarels vm_map_init(map, min, max, pageable) 22448383Skarels register struct vm_map *map; 22548383Skarels vm_offset_t min, max; 22648383Skarels boolean_t pageable; 22748383Skarels { 22848383Skarels map->header.next = map->header.prev = &map->header; 22948383Skarels map->nentries = 0; 23048383Skarels map->size = 0; 23148383Skarels map->ref_count = 1; 23248383Skarels map->is_main_map = TRUE; 23348383Skarels map->min_offset = min; 23448383Skarels map->max_offset = max; 23548383Skarels map->entries_pageable = pageable; 23648383Skarels map->first_free = &map->header; 23748383Skarels map->hint = &map->header; 23848383Skarels map->timestamp = 0; 23948383Skarels lock_init(&map->lock, TRUE); 24048383Skarels simple_lock_init(&map->ref_lock); 24148383Skarels simple_lock_init(&map->hint_lock); 24248383Skarels } 24348383Skarels 24448383Skarels /* 24545748Smckusick * vm_map_entry_create: [ internal use only ] 24645748Smckusick * 24745748Smckusick * Allocates a VM map entry for insertion. 24845748Smckusick * No entry fields are filled in. This routine is 24945748Smckusick */ 25045748Smckusick vm_map_entry_t vm_map_entry_create(map) 25145748Smckusick vm_map_t map; 25245748Smckusick { 25345748Smckusick vm_map_entry_t entry; 25458374Smckusick extern vm_map_t kernel_map, kmem_map, mb_map, pager_map; 25545748Smckusick 25658374Smckusick if (map == kernel_map || map == kmem_map || map == mb_map || 25758374Smckusick map == pager_map) { 25845748Smckusick if (entry = kentry_free) 25945748Smckusick kentry_free = kentry_free->next; 26045748Smckusick } else 26145748Smckusick MALLOC(entry, vm_map_entry_t, sizeof(struct vm_map_entry), 26245748Smckusick M_VMMAPENT, M_WAITOK); 26348383Skarels if (entry == NULL) 26445748Smckusick panic("vm_map_entry_create: out of map entries"); 26545748Smckusick 26645748Smckusick return(entry); 26745748Smckusick } 26845748Smckusick 26945748Smckusick /* 27045748Smckusick * vm_map_entry_dispose: [ internal use only ] 27145748Smckusick * 27245748Smckusick * Inverse of vm_map_entry_create. 27345748Smckusick */ 27445748Smckusick void vm_map_entry_dispose(map, entry) 27545748Smckusick vm_map_t map; 27645748Smckusick vm_map_entry_t entry; 27745748Smckusick { 27858374Smckusick extern vm_map_t kernel_map, kmem_map, mb_map, pager_map; 27945748Smckusick 28058374Smckusick if (map == kernel_map || map == kmem_map || map == mb_map || 28158374Smckusick map == pager_map) { 28245748Smckusick entry->next = kentry_free; 28345748Smckusick kentry_free = entry; 28445748Smckusick } else 28545748Smckusick FREE(entry, M_VMMAPENT); 28645748Smckusick } 28745748Smckusick 28845748Smckusick /* 28945748Smckusick * vm_map_entry_{un,}link: 29045748Smckusick * 29145748Smckusick * Insert/remove entries from maps. 29245748Smckusick */ 29345748Smckusick #define vm_map_entry_link(map, after_where, entry) \ 29445748Smckusick { \ 29545748Smckusick (map)->nentries++; \ 29645748Smckusick (entry)->prev = (after_where); \ 29745748Smckusick (entry)->next = (after_where)->next; \ 29845748Smckusick (entry)->prev->next = (entry); \ 29945748Smckusick (entry)->next->prev = (entry); \ 30045748Smckusick } 30145748Smckusick #define vm_map_entry_unlink(map, entry) \ 30245748Smckusick { \ 30345748Smckusick (map)->nentries--; \ 30445748Smckusick (entry)->next->prev = (entry)->prev; \ 30545748Smckusick (entry)->prev->next = (entry)->next; \ 30645748Smckusick } 30745748Smckusick 30845748Smckusick /* 30945748Smckusick * vm_map_reference: 31045748Smckusick * 31145748Smckusick * Creates another valid reference to the given map. 31245748Smckusick * 31345748Smckusick */ 31445748Smckusick void vm_map_reference(map) 31545748Smckusick register vm_map_t map; 31645748Smckusick { 31748383Skarels if (map == NULL) 31845748Smckusick return; 31945748Smckusick 32045748Smckusick simple_lock(&map->ref_lock); 32145748Smckusick map->ref_count++; 32245748Smckusick simple_unlock(&map->ref_lock); 32345748Smckusick } 32445748Smckusick 32545748Smckusick /* 32645748Smckusick * vm_map_deallocate: 32745748Smckusick * 32845748Smckusick * Removes a reference from the specified map, 32945748Smckusick * destroying it if no references remain. 33045748Smckusick * The map should not be locked. 33145748Smckusick */ 33245748Smckusick void vm_map_deallocate(map) 33345748Smckusick register vm_map_t map; 33445748Smckusick { 33545748Smckusick register int c; 33645748Smckusick 33748383Skarels if (map == NULL) 33845748Smckusick return; 33945748Smckusick 34045748Smckusick simple_lock(&map->ref_lock); 34145748Smckusick c = --map->ref_count; 34245748Smckusick simple_unlock(&map->ref_lock); 34345748Smckusick 34445748Smckusick if (c > 0) { 34545748Smckusick return; 34645748Smckusick } 34745748Smckusick 34845748Smckusick /* 34945748Smckusick * Lock the map, to wait out all other references 35045748Smckusick * to it. 35145748Smckusick */ 35245748Smckusick 35345748Smckusick vm_map_lock(map); 35445748Smckusick 35545748Smckusick (void) vm_map_delete(map, map->min_offset, map->max_offset); 35645748Smckusick 35745748Smckusick pmap_destroy(map->pmap); 35845748Smckusick 35945748Smckusick FREE(map, M_VMMAP); 36045748Smckusick } 36145748Smckusick 36245748Smckusick /* 36345748Smckusick * vm_map_insert: [ internal use only ] 36445748Smckusick * 36545748Smckusick * Inserts the given whole VM object into the target 36645748Smckusick * map at the specified address range. The object's 36745748Smckusick * size should match that of the address range. 36845748Smckusick * 36945748Smckusick * Requires that the map be locked, and leaves it so. 37045748Smckusick */ 37153357Sbostic int 37245748Smckusick vm_map_insert(map, object, offset, start, end) 37345748Smckusick vm_map_t map; 37445748Smckusick vm_object_t object; 37545748Smckusick vm_offset_t offset; 37645748Smckusick vm_offset_t start; 37745748Smckusick vm_offset_t end; 37845748Smckusick { 37945748Smckusick register vm_map_entry_t new_entry; 38045748Smckusick register vm_map_entry_t prev_entry; 38145748Smckusick vm_map_entry_t temp_entry; 38245748Smckusick 38345748Smckusick /* 38445748Smckusick * Check that the start and end points are not bogus. 38545748Smckusick */ 38645748Smckusick 38745748Smckusick if ((start < map->min_offset) || (end > map->max_offset) || 38845748Smckusick (start >= end)) 38945748Smckusick return(KERN_INVALID_ADDRESS); 39045748Smckusick 39145748Smckusick /* 39245748Smckusick * Find the entry prior to the proposed 39345748Smckusick * starting address; if it's part of an 39445748Smckusick * existing entry, this range is bogus. 39545748Smckusick */ 39645748Smckusick 39745748Smckusick if (vm_map_lookup_entry(map, start, &temp_entry)) 39845748Smckusick return(KERN_NO_SPACE); 39945748Smckusick 40045748Smckusick prev_entry = temp_entry; 40145748Smckusick 40245748Smckusick /* 40345748Smckusick * Assert that the next entry doesn't overlap the 40445748Smckusick * end point. 40545748Smckusick */ 40645748Smckusick 40745748Smckusick if ((prev_entry->next != &map->header) && 40845748Smckusick (prev_entry->next->start < end)) 40945748Smckusick return(KERN_NO_SPACE); 41045748Smckusick 41145748Smckusick /* 41245748Smckusick * See if we can avoid creating a new entry by 41345748Smckusick * extending one of our neighbors. 41445748Smckusick */ 41545748Smckusick 41648383Skarels if (object == NULL) { 41745748Smckusick if ((prev_entry != &map->header) && 41845748Smckusick (prev_entry->end == start) && 41945748Smckusick (map->is_main_map) && 42045748Smckusick (prev_entry->is_a_map == FALSE) && 42145748Smckusick (prev_entry->is_sub_map == FALSE) && 42245748Smckusick (prev_entry->inheritance == VM_INHERIT_DEFAULT) && 42345748Smckusick (prev_entry->protection == VM_PROT_DEFAULT) && 42445748Smckusick (prev_entry->max_protection == VM_PROT_DEFAULT) && 42545748Smckusick (prev_entry->wired_count == 0)) { 42645748Smckusick 42745748Smckusick if (vm_object_coalesce(prev_entry->object.vm_object, 42848383Skarels NULL, 42945748Smckusick prev_entry->offset, 43045748Smckusick (vm_offset_t) 0, 43145748Smckusick (vm_size_t)(prev_entry->end 43245748Smckusick - prev_entry->start), 43345748Smckusick (vm_size_t)(end - prev_entry->end))) { 43445748Smckusick /* 43545748Smckusick * Coalesced the two objects - can extend 43645748Smckusick * the previous map entry to include the 43745748Smckusick * new range. 43845748Smckusick */ 43945748Smckusick map->size += (end - prev_entry->end); 44045748Smckusick prev_entry->end = end; 44145748Smckusick return(KERN_SUCCESS); 44245748Smckusick } 44345748Smckusick } 44445748Smckusick } 44545748Smckusick 44645748Smckusick /* 44745748Smckusick * Create a new entry 44845748Smckusick */ 44945748Smckusick 45045748Smckusick new_entry = vm_map_entry_create(map); 45145748Smckusick new_entry->start = start; 45245748Smckusick new_entry->end = end; 45345748Smckusick 45445748Smckusick new_entry->is_a_map = FALSE; 45545748Smckusick new_entry->is_sub_map = FALSE; 45645748Smckusick new_entry->object.vm_object = object; 45745748Smckusick new_entry->offset = offset; 45845748Smckusick 45945748Smckusick new_entry->copy_on_write = FALSE; 46045748Smckusick new_entry->needs_copy = FALSE; 46145748Smckusick 46245748Smckusick if (map->is_main_map) { 46345748Smckusick new_entry->inheritance = VM_INHERIT_DEFAULT; 46445748Smckusick new_entry->protection = VM_PROT_DEFAULT; 46545748Smckusick new_entry->max_protection = VM_PROT_DEFAULT; 46645748Smckusick new_entry->wired_count = 0; 46745748Smckusick } 46845748Smckusick 46945748Smckusick /* 47045748Smckusick * Insert the new entry into the list 47145748Smckusick */ 47245748Smckusick 47345748Smckusick vm_map_entry_link(map, prev_entry, new_entry); 47445748Smckusick map->size += new_entry->end - new_entry->start; 47545748Smckusick 47645748Smckusick /* 47745748Smckusick * Update the free space hint 47845748Smckusick */ 47945748Smckusick 48045748Smckusick if ((map->first_free == prev_entry) && (prev_entry->end >= new_entry->start)) 48145748Smckusick map->first_free = new_entry; 48245748Smckusick 48345748Smckusick return(KERN_SUCCESS); 48445748Smckusick } 48545748Smckusick 48645748Smckusick /* 48745748Smckusick * SAVE_HINT: 48845748Smckusick * 48945748Smckusick * Saves the specified entry as the hint for 49045748Smckusick * future lookups. Performs necessary interlocks. 49145748Smckusick */ 49245748Smckusick #define SAVE_HINT(map,value) \ 49345748Smckusick simple_lock(&(map)->hint_lock); \ 49445748Smckusick (map)->hint = (value); \ 49545748Smckusick simple_unlock(&(map)->hint_lock); 49645748Smckusick 49745748Smckusick /* 49845748Smckusick * vm_map_lookup_entry: [ internal use only ] 49945748Smckusick * 50045748Smckusick * Finds the map entry containing (or 50145748Smckusick * immediately preceding) the specified address 50245748Smckusick * in the given map; the entry is returned 50345748Smckusick * in the "entry" parameter. The boolean 50445748Smckusick * result indicates whether the address is 50545748Smckusick * actually contained in the map. 50645748Smckusick */ 50745748Smckusick boolean_t vm_map_lookup_entry(map, address, entry) 50845748Smckusick register vm_map_t map; 50945748Smckusick register vm_offset_t address; 51045748Smckusick vm_map_entry_t *entry; /* OUT */ 51145748Smckusick { 51245748Smckusick register vm_map_entry_t cur; 51345748Smckusick register vm_map_entry_t last; 51445748Smckusick 51545748Smckusick /* 51645748Smckusick * Start looking either from the head of the 51745748Smckusick * list, or from the hint. 51845748Smckusick */ 51945748Smckusick 52045748Smckusick simple_lock(&map->hint_lock); 52145748Smckusick cur = map->hint; 52245748Smckusick simple_unlock(&map->hint_lock); 52345748Smckusick 52445748Smckusick if (cur == &map->header) 52545748Smckusick cur = cur->next; 52645748Smckusick 52745748Smckusick if (address >= cur->start) { 52845748Smckusick /* 52945748Smckusick * Go from hint to end of list. 53045748Smckusick * 53145748Smckusick * But first, make a quick check to see if 53245748Smckusick * we are already looking at the entry we 53345748Smckusick * want (which is usually the case). 53445748Smckusick * Note also that we don't need to save the hint 53545748Smckusick * here... it is the same hint (unless we are 53645748Smckusick * at the header, in which case the hint didn't 53745748Smckusick * buy us anything anyway). 53845748Smckusick */ 53945748Smckusick last = &map->header; 54045748Smckusick if ((cur != last) && (cur->end > address)) { 54145748Smckusick *entry = cur; 54245748Smckusick return(TRUE); 54345748Smckusick } 54445748Smckusick } 54545748Smckusick else { 54645748Smckusick /* 54745748Smckusick * Go from start to hint, *inclusively* 54845748Smckusick */ 54945748Smckusick last = cur->next; 55045748Smckusick cur = map->header.next; 55145748Smckusick } 55245748Smckusick 55345748Smckusick /* 55445748Smckusick * Search linearly 55545748Smckusick */ 55645748Smckusick 55745748Smckusick while (cur != last) { 55845748Smckusick if (cur->end > address) { 55945748Smckusick if (address >= cur->start) { 56045748Smckusick /* 56145748Smckusick * Save this lookup for future 56245748Smckusick * hints, and return 56345748Smckusick */ 56445748Smckusick 56545748Smckusick *entry = cur; 56645748Smckusick SAVE_HINT(map, cur); 56745748Smckusick return(TRUE); 56845748Smckusick } 56945748Smckusick break; 57045748Smckusick } 57145748Smckusick cur = cur->next; 57245748Smckusick } 57345748Smckusick *entry = cur->prev; 57445748Smckusick SAVE_HINT(map, *entry); 57545748Smckusick return(FALSE); 57645748Smckusick } 57745748Smckusick 57845748Smckusick /* 57952610Storek * Find sufficient space for `length' bytes in the given map, starting at 58052610Storek * `start'. The map must be locked. Returns 0 on success, 1 on no space. 58152610Storek */ 58252610Storek int 58352610Storek vm_map_findspace(map, start, length, addr) 58452610Storek register vm_map_t map; 58552610Storek register vm_offset_t start; 58652610Storek vm_size_t length; 58752610Storek vm_offset_t *addr; 58852610Storek { 58952610Storek register vm_map_entry_t entry, next; 59052610Storek register vm_offset_t end; 59152610Storek 59252610Storek if (start < map->min_offset) 59352610Storek start = map->min_offset; 59452610Storek if (start > map->max_offset) 59552610Storek return (1); 59652610Storek 59752610Storek /* 59852610Storek * Look for the first possible address; if there's already 59952610Storek * something at this address, we have to start after it. 60052610Storek */ 60152610Storek if (start == map->min_offset) { 60252610Storek if ((entry = map->first_free) != &map->header) 60352610Storek start = entry->end; 60452610Storek } else { 60552610Storek vm_map_entry_t tmp; 60652610Storek if (vm_map_lookup_entry(map, start, &tmp)) 60752610Storek start = tmp->end; 60852610Storek entry = tmp; 60952610Storek } 61052610Storek 61152610Storek /* 61252610Storek * Look through the rest of the map, trying to fit a new region in 61352610Storek * the gap between existing regions, or after the very last region. 61452610Storek */ 61552610Storek for (;; start = (entry = next)->end) { 61652610Storek /* 61752610Storek * Find the end of the proposed new region. Be sure we didn't 61852610Storek * go beyond the end of the map, or wrap around the address; 61952610Storek * if so, we lose. Otherwise, if this is the last entry, or 62052610Storek * if the proposed new region fits before the next entry, we 62152610Storek * win. 62252610Storek */ 62352610Storek end = start + length; 62452610Storek if (end > map->max_offset || end < start) 62552610Storek return (1); 62652610Storek next = entry->next; 62752610Storek if (next == &map->header || next->start >= end) 62852610Storek break; 62952610Storek } 63052610Storek SAVE_HINT(map, entry); 63152610Storek *addr = start; 63252610Storek return (0); 63352610Storek } 63452610Storek 63552610Storek /* 63645748Smckusick * vm_map_find finds an unallocated region in the target address 63745748Smckusick * map with the given length. The search is defined to be 63845748Smckusick * first-fit from the specified address; the region found is 63945748Smckusick * returned in the same parameter. 64045748Smckusick * 64145748Smckusick */ 64253357Sbostic int 64345748Smckusick vm_map_find(map, object, offset, addr, length, find_space) 64445748Smckusick vm_map_t map; 64545748Smckusick vm_object_t object; 64645748Smckusick vm_offset_t offset; 64745748Smckusick vm_offset_t *addr; /* IN/OUT */ 64845748Smckusick vm_size_t length; 64945748Smckusick boolean_t find_space; 65045748Smckusick { 65145748Smckusick register vm_offset_t start; 65245748Smckusick int result; 65345748Smckusick 65445748Smckusick start = *addr; 65545748Smckusick vm_map_lock(map); 65645748Smckusick if (find_space) { 65752610Storek if (vm_map_findspace(map, start, length, addr)) { 65845748Smckusick vm_map_unlock(map); 65945748Smckusick return (KERN_NO_SPACE); 66045748Smckusick } 66152610Storek start = *addr; 66245748Smckusick } 66345748Smckusick result = vm_map_insert(map, object, offset, start, start + length); 66445748Smckusick vm_map_unlock(map); 66552610Storek return (result); 66645748Smckusick } 66745748Smckusick 66845748Smckusick /* 66945748Smckusick * vm_map_simplify_entry: [ internal use only ] 67045748Smckusick * 67145748Smckusick * Simplify the given map entry by: 67245748Smckusick * removing extra sharing maps 67345748Smckusick * [XXX maybe later] merging with a neighbor 67445748Smckusick */ 67545748Smckusick void vm_map_simplify_entry(map, entry) 67645748Smckusick vm_map_t map; 67745748Smckusick vm_map_entry_t entry; 67845748Smckusick { 67945748Smckusick #ifdef lint 68045748Smckusick map++; 68160345Storek #endif 68245748Smckusick 68345748Smckusick /* 68445748Smckusick * If this entry corresponds to a sharing map, then 68545748Smckusick * see if we can remove the level of indirection. 68645748Smckusick * If it's not a sharing map, then it points to 68745748Smckusick * a VM object, so see if we can merge with either 68845748Smckusick * of our neighbors. 68945748Smckusick */ 69045748Smckusick 69145748Smckusick if (entry->is_sub_map) 69245748Smckusick return; 69345748Smckusick if (entry->is_a_map) { 69445748Smckusick #if 0 69545748Smckusick vm_map_t my_share_map; 69645748Smckusick int count; 69745748Smckusick 69845748Smckusick my_share_map = entry->object.share_map; 69945748Smckusick simple_lock(&my_share_map->ref_lock); 70045748Smckusick count = my_share_map->ref_count; 70145748Smckusick simple_unlock(&my_share_map->ref_lock); 70245748Smckusick 70345748Smckusick if (count == 1) { 70445748Smckusick /* Can move the region from 70545748Smckusick * entry->start to entry->end (+ entry->offset) 70645748Smckusick * in my_share_map into place of entry. 70745748Smckusick * Later. 70845748Smckusick */ 70945748Smckusick } 71060345Storek #endif 71145748Smckusick } 71245748Smckusick else { 71345748Smckusick /* 71445748Smckusick * Try to merge with our neighbors. 71545748Smckusick * 71645748Smckusick * Conditions for merge are: 71745748Smckusick * 71845748Smckusick * 1. entries are adjacent. 71945748Smckusick * 2. both entries point to objects 72045748Smckusick * with null pagers. 72145748Smckusick * 72245748Smckusick * If a merge is possible, we replace the two 72345748Smckusick * entries with a single entry, then merge 72445748Smckusick * the two objects into a single object. 72545748Smckusick * 72645748Smckusick * Now, all that is left to do is write the 72745748Smckusick * code! 72845748Smckusick */ 72945748Smckusick } 73045748Smckusick } 73145748Smckusick 73245748Smckusick /* 73345748Smckusick * vm_map_clip_start: [ internal use only ] 73445748Smckusick * 73545748Smckusick * Asserts that the given entry begins at or after 73645748Smckusick * the specified address; if necessary, 73745748Smckusick * it splits the entry into two. 73845748Smckusick */ 73945748Smckusick #define vm_map_clip_start(map, entry, startaddr) \ 74045748Smckusick { \ 74145748Smckusick if (startaddr > entry->start) \ 74245748Smckusick _vm_map_clip_start(map, entry, startaddr); \ 74345748Smckusick } 74445748Smckusick 74545748Smckusick /* 74645748Smckusick * This routine is called only when it is known that 74745748Smckusick * the entry must be split. 74845748Smckusick */ 74953357Sbostic static void _vm_map_clip_start(map, entry, start) 75045748Smckusick register vm_map_t map; 75145748Smckusick register vm_map_entry_t entry; 75245748Smckusick register vm_offset_t start; 75345748Smckusick { 75445748Smckusick register vm_map_entry_t new_entry; 75545748Smckusick 75645748Smckusick /* 75745748Smckusick * See if we can simplify this entry first 75845748Smckusick */ 75945748Smckusick 76045748Smckusick vm_map_simplify_entry(map, entry); 76145748Smckusick 76245748Smckusick /* 76345748Smckusick * Split off the front portion -- 76445748Smckusick * note that we must insert the new 76545748Smckusick * entry BEFORE this one, so that 76645748Smckusick * this entry has the specified starting 76745748Smckusick * address. 76845748Smckusick */ 76945748Smckusick 77045748Smckusick new_entry = vm_map_entry_create(map); 77145748Smckusick *new_entry = *entry; 77245748Smckusick 77345748Smckusick new_entry->end = start; 77445748Smckusick entry->offset += (start - entry->start); 77545748Smckusick entry->start = start; 77645748Smckusick 77745748Smckusick vm_map_entry_link(map, entry->prev, new_entry); 77845748Smckusick 77945748Smckusick if (entry->is_a_map || entry->is_sub_map) 78045748Smckusick vm_map_reference(new_entry->object.share_map); 78145748Smckusick else 78245748Smckusick vm_object_reference(new_entry->object.vm_object); 78345748Smckusick } 78445748Smckusick 78545748Smckusick /* 78645748Smckusick * vm_map_clip_end: [ internal use only ] 78745748Smckusick * 78845748Smckusick * Asserts that the given entry ends at or before 78945748Smckusick * the specified address; if necessary, 79045748Smckusick * it splits the entry into two. 79145748Smckusick */ 79245748Smckusick 79345748Smckusick #define vm_map_clip_end(map, entry, endaddr) \ 79445748Smckusick { \ 79545748Smckusick if (endaddr < entry->end) \ 79645748Smckusick _vm_map_clip_end(map, entry, endaddr); \ 79745748Smckusick } 79845748Smckusick 79945748Smckusick /* 80045748Smckusick * This routine is called only when it is known that 80145748Smckusick * the entry must be split. 80245748Smckusick */ 80353357Sbostic static void _vm_map_clip_end(map, entry, end) 80445748Smckusick register vm_map_t map; 80545748Smckusick register vm_map_entry_t entry; 80645748Smckusick register vm_offset_t end; 80745748Smckusick { 80845748Smckusick register vm_map_entry_t new_entry; 80945748Smckusick 81045748Smckusick /* 81145748Smckusick * Create a new entry and insert it 81245748Smckusick * AFTER the specified entry 81345748Smckusick */ 81445748Smckusick 81545748Smckusick new_entry = vm_map_entry_create(map); 81645748Smckusick *new_entry = *entry; 81745748Smckusick 81845748Smckusick new_entry->start = entry->end = end; 81945748Smckusick new_entry->offset += (end - entry->start); 82045748Smckusick 82145748Smckusick vm_map_entry_link(map, entry, new_entry); 82245748Smckusick 82345748Smckusick if (entry->is_a_map || entry->is_sub_map) 82445748Smckusick vm_map_reference(new_entry->object.share_map); 82545748Smckusick else 82645748Smckusick vm_object_reference(new_entry->object.vm_object); 82745748Smckusick } 82845748Smckusick 82945748Smckusick /* 83045748Smckusick * VM_MAP_RANGE_CHECK: [ internal use only ] 83145748Smckusick * 83245748Smckusick * Asserts that the starting and ending region 83345748Smckusick * addresses fall within the valid range of the map. 83445748Smckusick */ 83545748Smckusick #define VM_MAP_RANGE_CHECK(map, start, end) \ 83645748Smckusick { \ 83745748Smckusick if (start < vm_map_min(map)) \ 83845748Smckusick start = vm_map_min(map); \ 83945748Smckusick if (end > vm_map_max(map)) \ 84045748Smckusick end = vm_map_max(map); \ 84145748Smckusick if (start > end) \ 84245748Smckusick start = end; \ 84345748Smckusick } 84445748Smckusick 84545748Smckusick /* 84645748Smckusick * vm_map_submap: [ kernel use only ] 84745748Smckusick * 84845748Smckusick * Mark the given range as handled by a subordinate map. 84945748Smckusick * 85045748Smckusick * This range must have been created with vm_map_find, 85145748Smckusick * and no other operations may have been performed on this 85245748Smckusick * range prior to calling vm_map_submap. 85345748Smckusick * 85445748Smckusick * Only a limited number of operations can be performed 85545748Smckusick * within this rage after calling vm_map_submap: 85645748Smckusick * vm_fault 85745748Smckusick * [Don't try vm_map_copy!] 85845748Smckusick * 85945748Smckusick * To remove a submapping, one must first remove the 86045748Smckusick * range from the superior map, and then destroy the 86145748Smckusick * submap (if desired). [Better yet, don't try it.] 86245748Smckusick */ 86353357Sbostic int 86445748Smckusick vm_map_submap(map, start, end, submap) 86545748Smckusick register vm_map_t map; 86645748Smckusick register vm_offset_t start; 86745748Smckusick register vm_offset_t end; 86845748Smckusick vm_map_t submap; 86945748Smckusick { 87045748Smckusick vm_map_entry_t entry; 87145748Smckusick register int result = KERN_INVALID_ARGUMENT; 87245748Smckusick 87345748Smckusick vm_map_lock(map); 87445748Smckusick 87545748Smckusick VM_MAP_RANGE_CHECK(map, start, end); 87645748Smckusick 87745748Smckusick if (vm_map_lookup_entry(map, start, &entry)) { 87845748Smckusick vm_map_clip_start(map, entry, start); 87945748Smckusick } 88045748Smckusick else 88145748Smckusick entry = entry->next; 88245748Smckusick 88345748Smckusick vm_map_clip_end(map, entry, end); 88445748Smckusick 88545748Smckusick if ((entry->start == start) && (entry->end == end) && 88645748Smckusick (!entry->is_a_map) && 88748383Skarels (entry->object.vm_object == NULL) && 88845748Smckusick (!entry->copy_on_write)) { 88945748Smckusick entry->is_a_map = FALSE; 89045748Smckusick entry->is_sub_map = TRUE; 89145748Smckusick vm_map_reference(entry->object.sub_map = submap); 89245748Smckusick result = KERN_SUCCESS; 89345748Smckusick } 89445748Smckusick vm_map_unlock(map); 89545748Smckusick 89645748Smckusick return(result); 89745748Smckusick } 89845748Smckusick 89945748Smckusick /* 90045748Smckusick * vm_map_protect: 90145748Smckusick * 90245748Smckusick * Sets the protection of the specified address 90345748Smckusick * region in the target map. If "set_max" is 90445748Smckusick * specified, the maximum protection is to be set; 90545748Smckusick * otherwise, only the current protection is affected. 90645748Smckusick */ 90753357Sbostic int 90845748Smckusick vm_map_protect(map, start, end, new_prot, set_max) 90945748Smckusick register vm_map_t map; 91045748Smckusick register vm_offset_t start; 91145748Smckusick register vm_offset_t end; 91245748Smckusick register vm_prot_t new_prot; 91345748Smckusick register boolean_t set_max; 91445748Smckusick { 91545748Smckusick register vm_map_entry_t current; 91645748Smckusick vm_map_entry_t entry; 91745748Smckusick 91845748Smckusick vm_map_lock(map); 91945748Smckusick 92045748Smckusick VM_MAP_RANGE_CHECK(map, start, end); 92145748Smckusick 92245748Smckusick if (vm_map_lookup_entry(map, start, &entry)) { 92345748Smckusick vm_map_clip_start(map, entry, start); 92445748Smckusick } 92545748Smckusick else 92645748Smckusick entry = entry->next; 92745748Smckusick 92845748Smckusick /* 92945748Smckusick * Make a first pass to check for protection 93045748Smckusick * violations. 93145748Smckusick */ 93245748Smckusick 93345748Smckusick current = entry; 93445748Smckusick while ((current != &map->header) && (current->start < end)) { 93545748Smckusick if (current->is_sub_map) 93645748Smckusick return(KERN_INVALID_ARGUMENT); 93745748Smckusick if ((new_prot & current->max_protection) != new_prot) { 93845748Smckusick vm_map_unlock(map); 93945748Smckusick return(KERN_PROTECTION_FAILURE); 94045748Smckusick } 94145748Smckusick 94245748Smckusick current = current->next; 94345748Smckusick } 94445748Smckusick 94545748Smckusick /* 94645748Smckusick * Go back and fix up protections. 94745748Smckusick * [Note that clipping is not necessary the second time.] 94845748Smckusick */ 94945748Smckusick 95045748Smckusick current = entry; 95145748Smckusick 95245748Smckusick while ((current != &map->header) && (current->start < end)) { 95345748Smckusick vm_prot_t old_prot; 95445748Smckusick 95545748Smckusick vm_map_clip_end(map, current, end); 95645748Smckusick 95745748Smckusick old_prot = current->protection; 95845748Smckusick if (set_max) 95945748Smckusick current->protection = 96045748Smckusick (current->max_protection = new_prot) & 96145748Smckusick old_prot; 96245748Smckusick else 96345748Smckusick current->protection = new_prot; 96445748Smckusick 96545748Smckusick /* 96645748Smckusick * Update physical map if necessary. 96745748Smckusick * Worry about copy-on-write here -- CHECK THIS XXX 96845748Smckusick */ 96945748Smckusick 97045748Smckusick if (current->protection != old_prot) { 97145748Smckusick 97245748Smckusick #define MASK(entry) ((entry)->copy_on_write ? ~VM_PROT_WRITE : \ 97345748Smckusick VM_PROT_ALL) 97445748Smckusick #define max(a,b) ((a) > (b) ? (a) : (b)) 97545748Smckusick 97645748Smckusick if (current->is_a_map) { 97745748Smckusick vm_map_entry_t share_entry; 97845748Smckusick vm_offset_t share_end; 97945748Smckusick 98045748Smckusick vm_map_lock(current->object.share_map); 98145748Smckusick (void) vm_map_lookup_entry( 98245748Smckusick current->object.share_map, 98345748Smckusick current->offset, 98445748Smckusick &share_entry); 98545748Smckusick share_end = current->offset + 98645748Smckusick (current->end - current->start); 98745748Smckusick while ((share_entry != 98845748Smckusick ¤t->object.share_map->header) && 98945748Smckusick (share_entry->start < share_end)) { 99045748Smckusick 99145748Smckusick pmap_protect(map->pmap, 99245748Smckusick (max(share_entry->start, 99345748Smckusick current->offset) - 99445748Smckusick current->offset + 99545748Smckusick current->start), 99645748Smckusick min(share_entry->end, 99745748Smckusick share_end) - 99845748Smckusick current->offset + 99945748Smckusick current->start, 100045748Smckusick current->protection & 100145748Smckusick MASK(share_entry)); 100245748Smckusick 100345748Smckusick share_entry = share_entry->next; 100445748Smckusick } 100545748Smckusick vm_map_unlock(current->object.share_map); 100645748Smckusick } 100745748Smckusick else 100845748Smckusick pmap_protect(map->pmap, current->start, 100945748Smckusick current->end, 101045748Smckusick current->protection & MASK(entry)); 101145748Smckusick #undef max 101245748Smckusick #undef MASK 101345748Smckusick } 101445748Smckusick current = current->next; 101545748Smckusick } 101645748Smckusick 101745748Smckusick vm_map_unlock(map); 101845748Smckusick return(KERN_SUCCESS); 101945748Smckusick } 102045748Smckusick 102145748Smckusick /* 102245748Smckusick * vm_map_inherit: 102345748Smckusick * 102445748Smckusick * Sets the inheritance of the specified address 102545748Smckusick * range in the target map. Inheritance 102645748Smckusick * affects how the map will be shared with 102745748Smckusick * child maps at the time of vm_map_fork. 102845748Smckusick */ 102953357Sbostic int 103045748Smckusick vm_map_inherit(map, start, end, new_inheritance) 103145748Smckusick register vm_map_t map; 103245748Smckusick register vm_offset_t start; 103345748Smckusick register vm_offset_t end; 103445748Smckusick register vm_inherit_t new_inheritance; 103545748Smckusick { 103645748Smckusick register vm_map_entry_t entry; 103745748Smckusick vm_map_entry_t temp_entry; 103845748Smckusick 103945748Smckusick switch (new_inheritance) { 104045748Smckusick case VM_INHERIT_NONE: 104145748Smckusick case VM_INHERIT_COPY: 104245748Smckusick case VM_INHERIT_SHARE: 104345748Smckusick break; 104445748Smckusick default: 104545748Smckusick return(KERN_INVALID_ARGUMENT); 104645748Smckusick } 104745748Smckusick 104845748Smckusick vm_map_lock(map); 104945748Smckusick 105045748Smckusick VM_MAP_RANGE_CHECK(map, start, end); 105145748Smckusick 105245748Smckusick if (vm_map_lookup_entry(map, start, &temp_entry)) { 105345748Smckusick entry = temp_entry; 105445748Smckusick vm_map_clip_start(map, entry, start); 105545748Smckusick } 105645748Smckusick else 105745748Smckusick entry = temp_entry->next; 105845748Smckusick 105945748Smckusick while ((entry != &map->header) && (entry->start < end)) { 106045748Smckusick vm_map_clip_end(map, entry, end); 106145748Smckusick 106245748Smckusick entry->inheritance = new_inheritance; 106345748Smckusick 106445748Smckusick entry = entry->next; 106545748Smckusick } 106645748Smckusick 106745748Smckusick vm_map_unlock(map); 106845748Smckusick return(KERN_SUCCESS); 106945748Smckusick } 107045748Smckusick 107145748Smckusick /* 107245748Smckusick * vm_map_pageable: 107345748Smckusick * 107445748Smckusick * Sets the pageability of the specified address 107545748Smckusick * range in the target map. Regions specified 107645748Smckusick * as not pageable require locked-down physical 107745748Smckusick * memory and physical page maps. 107845748Smckusick * 107945748Smckusick * The map must not be locked, but a reference 108045748Smckusick * must remain to the map throughout the call. 108145748Smckusick */ 108253357Sbostic int 108345748Smckusick vm_map_pageable(map, start, end, new_pageable) 108445748Smckusick register vm_map_t map; 108545748Smckusick register vm_offset_t start; 108645748Smckusick register vm_offset_t end; 108745748Smckusick register boolean_t new_pageable; 108845748Smckusick { 108945748Smckusick register vm_map_entry_t entry; 109061005Shibler vm_map_entry_t start_entry; 109158596Shibler register vm_offset_t failed; 109258596Shibler int rv; 109345748Smckusick 109445748Smckusick vm_map_lock(map); 109545748Smckusick 109645748Smckusick VM_MAP_RANGE_CHECK(map, start, end); 109745748Smckusick 109845748Smckusick /* 109945748Smckusick * Only one pageability change may take place at one 110045748Smckusick * time, since vm_fault assumes it will be called 110145748Smckusick * only once for each wiring/unwiring. Therefore, we 110245748Smckusick * have to make sure we're actually changing the pageability 110345748Smckusick * for the entire region. We do so before making any changes. 110445748Smckusick */ 110545748Smckusick 110661005Shibler if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) { 110761005Shibler vm_map_unlock(map); 110861005Shibler return(KERN_INVALID_ADDRESS); 110945748Smckusick } 111061005Shibler entry = start_entry; 111145748Smckusick 111245748Smckusick /* 111345748Smckusick * Actions are rather different for wiring and unwiring, 111445748Smckusick * so we have two separate cases. 111545748Smckusick */ 111645748Smckusick 111745748Smckusick if (new_pageable) { 111845748Smckusick 111961005Shibler vm_map_clip_start(map, entry, start); 112061005Shibler 112145748Smckusick /* 112245748Smckusick * Unwiring. First ensure that the range to be 112361005Shibler * unwired is really wired down and that there 112461005Shibler * are no holes. 112545748Smckusick */ 112645748Smckusick while ((entry != &map->header) && (entry->start < end)) { 112745748Smckusick 112861005Shibler if (entry->wired_count == 0 || 112961005Shibler (entry->end < end && 113061005Shibler (entry->next == &map->header || 113161005Shibler entry->next->start > entry->end))) { 113245748Smckusick vm_map_unlock(map); 113345748Smckusick return(KERN_INVALID_ARGUMENT); 113445748Smckusick } 113545748Smckusick entry = entry->next; 113645748Smckusick } 113745748Smckusick 113845748Smckusick /* 113945748Smckusick * Now decrement the wiring count for each region. 114045748Smckusick * If a region becomes completely unwired, 114145748Smckusick * unwire its physical pages and mappings. 114245748Smckusick */ 114345748Smckusick lock_set_recursive(&map->lock); 114445748Smckusick 114561005Shibler entry = start_entry; 114645748Smckusick while ((entry != &map->header) && (entry->start < end)) { 114745748Smckusick vm_map_clip_end(map, entry, end); 114845748Smckusick 114945748Smckusick entry->wired_count--; 115045748Smckusick if (entry->wired_count == 0) 115145748Smckusick vm_fault_unwire(map, entry->start, entry->end); 115245748Smckusick 115345748Smckusick entry = entry->next; 115445748Smckusick } 115545748Smckusick lock_clear_recursive(&map->lock); 115645748Smckusick } 115745748Smckusick 115845748Smckusick else { 115945748Smckusick /* 116045748Smckusick * Wiring. We must do this in two passes: 116145748Smckusick * 116261005Shibler * 1. Holding the write lock, we create any shadow 116361005Shibler * or zero-fill objects that need to be created. 116461005Shibler * Then we clip each map entry to the region to be 116561005Shibler * wired and increment its wiring count. We 116661005Shibler * create objects before clipping the map entries 116761005Shibler * to avoid object proliferation. 116845748Smckusick * 116945748Smckusick * 2. We downgrade to a read lock, and call 117045748Smckusick * vm_fault_wire to fault in the pages for any 117145748Smckusick * newly wired area (wired_count is 1). 117245748Smckusick * 117345748Smckusick * Downgrading to a read lock for vm_fault_wire avoids 117445748Smckusick * a possible deadlock with another thread that may have 117545748Smckusick * faulted on one of the pages to be wired (it would mark 117645748Smckusick * the page busy, blocking us, then in turn block on the 117745748Smckusick * map lock that we hold). Because of problems in the 117845748Smckusick * recursive lock package, we cannot upgrade to a write 117945748Smckusick * lock in vm_map_lookup. Thus, any actions that require 118045748Smckusick * the write lock must be done beforehand. Because we 118145748Smckusick * keep the read lock on the map, the copy-on-write status 118245748Smckusick * of the entries we modify here cannot change. 118345748Smckusick */ 118445748Smckusick 118545748Smckusick /* 118645748Smckusick * Pass 1. 118745748Smckusick */ 118845748Smckusick while ((entry != &map->header) && (entry->start < end)) { 118961005Shibler #if 0 119045748Smckusick vm_map_clip_end(map, entry, end); 119161005Shibler #endif 119261005Shibler if (entry->wired_count == 0) { 119345748Smckusick 119445748Smckusick /* 119545748Smckusick * Perform actions of vm_map_lookup that need 119645748Smckusick * the write lock on the map: create a shadow 119745748Smckusick * object for a copy-on-write region, or an 119845748Smckusick * object for a zero-fill region. 119945748Smckusick * 120045748Smckusick * We don't have to do this for entries that 120145748Smckusick * point to sharing maps, because we won't hold 120245748Smckusick * the lock on the sharing map. 120345748Smckusick */ 120445748Smckusick if (!entry->is_a_map) { 120545748Smckusick if (entry->needs_copy && 120645748Smckusick ((entry->protection & VM_PROT_WRITE) != 0)) { 120745748Smckusick 120845748Smckusick vm_object_shadow(&entry->object.vm_object, 120945748Smckusick &entry->offset, 121045748Smckusick (vm_size_t)(entry->end 121145748Smckusick - entry->start)); 121245748Smckusick entry->needs_copy = FALSE; 121345748Smckusick } 121448383Skarels else if (entry->object.vm_object == NULL) { 121545748Smckusick entry->object.vm_object = 121645748Smckusick vm_object_allocate((vm_size_t)(entry->end 121745748Smckusick - entry->start)); 121845748Smckusick entry->offset = (vm_offset_t)0; 121945748Smckusick } 122045748Smckusick } 122145748Smckusick } 122261005Shibler vm_map_clip_start(map, entry, start); 122361005Shibler vm_map_clip_end(map, entry, end); 122461005Shibler entry->wired_count++; 122545748Smckusick 122661005Shibler /* 122761005Shibler * Check for holes 122861005Shibler */ 122961005Shibler if (entry->end < end && 123061005Shibler (entry->next == &map->header || 123161005Shibler entry->next->start > entry->end)) { 123261005Shibler /* 123361005Shibler * Found one. Object creation actions 123461005Shibler * do not need to be undone, but the 123561005Shibler * wired counts need to be restored. 123661005Shibler */ 123761005Shibler while (entry != &map->header && entry->end > start) { 123861005Shibler entry->wired_count--; 123961005Shibler entry = entry->prev; 124061005Shibler } 124161005Shibler vm_map_unlock(map); 124261005Shibler return(KERN_INVALID_ARGUMENT); 124361005Shibler } 124445748Smckusick entry = entry->next; 124545748Smckusick } 124645748Smckusick 124745748Smckusick /* 124845748Smckusick * Pass 2. 124945748Smckusick */ 125045748Smckusick 125145748Smckusick /* 125245748Smckusick * HACK HACK HACK HACK 125345748Smckusick * 125445748Smckusick * If we are wiring in the kernel map or a submap of it, 125545748Smckusick * unlock the map to avoid deadlocks. We trust that the 125645748Smckusick * kernel threads are well-behaved, and therefore will 125745748Smckusick * not do anything destructive to this region of the map 125845748Smckusick * while we have it unlocked. We cannot trust user threads 125945748Smckusick * to do the same. 126045748Smckusick * 126145748Smckusick * HACK HACK HACK HACK 126245748Smckusick */ 126345748Smckusick if (vm_map_pmap(map) == kernel_pmap) { 126445748Smckusick vm_map_unlock(map); /* trust me ... */ 126545748Smckusick } 126645748Smckusick else { 126745748Smckusick lock_set_recursive(&map->lock); 126845748Smckusick lock_write_to_read(&map->lock); 126945748Smckusick } 127045748Smckusick 127158596Shibler rv = 0; 127261005Shibler entry = start_entry; 127345748Smckusick while (entry != &map->header && entry->start < end) { 127458596Shibler /* 127558596Shibler * If vm_fault_wire fails for any page we need to 127658596Shibler * undo what has been done. We decrement the wiring 127758596Shibler * count for those pages which have not yet been 127858596Shibler * wired (now) and unwire those that have (later). 127958596Shibler * 128058596Shibler * XXX this violates the locking protocol on the map, 128158596Shibler * needs to be fixed. 128258596Shibler */ 128358596Shibler if (rv) 128458596Shibler entry->wired_count--; 128558596Shibler else if (entry->wired_count == 1) { 128658596Shibler rv = vm_fault_wire(map, entry->start, entry->end); 128758596Shibler if (rv) { 128858596Shibler failed = entry->start; 128958596Shibler entry->wired_count--; 129058596Shibler } 129145748Smckusick } 129245748Smckusick entry = entry->next; 129345748Smckusick } 129445748Smckusick 129545748Smckusick if (vm_map_pmap(map) == kernel_pmap) { 129645748Smckusick vm_map_lock(map); 129745748Smckusick } 129845748Smckusick else { 129945748Smckusick lock_clear_recursive(&map->lock); 130045748Smckusick } 130158596Shibler if (rv) { 130258596Shibler vm_map_unlock(map); 130358596Shibler (void) vm_map_pageable(map, start, failed, TRUE); 130458596Shibler return(rv); 130558596Shibler } 130645748Smckusick } 130745748Smckusick 130845748Smckusick vm_map_unlock(map); 130945748Smckusick 131045748Smckusick return(KERN_SUCCESS); 131145748Smckusick } 131245748Smckusick 131345748Smckusick /* 131445748Smckusick * vm_map_entry_unwire: [ internal use only ] 131545748Smckusick * 131645748Smckusick * Make the region specified by this entry pageable. 131745748Smckusick * 131845748Smckusick * The map in question should be locked. 131945748Smckusick * [This is the reason for this routine's existence.] 132045748Smckusick */ 132145748Smckusick void vm_map_entry_unwire(map, entry) 132245748Smckusick vm_map_t map; 132345748Smckusick register vm_map_entry_t entry; 132445748Smckusick { 132545748Smckusick vm_fault_unwire(map, entry->start, entry->end); 132645748Smckusick entry->wired_count = 0; 132745748Smckusick } 132845748Smckusick 132945748Smckusick /* 133045748Smckusick * vm_map_entry_delete: [ internal use only ] 133145748Smckusick * 133245748Smckusick * Deallocate the given entry from the target map. 133345748Smckusick */ 133445748Smckusick void vm_map_entry_delete(map, entry) 133545748Smckusick register vm_map_t map; 133645748Smckusick register vm_map_entry_t entry; 133745748Smckusick { 133845748Smckusick if (entry->wired_count != 0) 133945748Smckusick vm_map_entry_unwire(map, entry); 134045748Smckusick 134145748Smckusick vm_map_entry_unlink(map, entry); 134245748Smckusick map->size -= entry->end - entry->start; 134345748Smckusick 134445748Smckusick if (entry->is_a_map || entry->is_sub_map) 134545748Smckusick vm_map_deallocate(entry->object.share_map); 134645748Smckusick else 134745748Smckusick vm_object_deallocate(entry->object.vm_object); 134845748Smckusick 134945748Smckusick vm_map_entry_dispose(map, entry); 135045748Smckusick } 135145748Smckusick 135245748Smckusick /* 135345748Smckusick * vm_map_delete: [ internal use only ] 135445748Smckusick * 135545748Smckusick * Deallocates the given address range from the target 135645748Smckusick * map. 135745748Smckusick * 135845748Smckusick * When called with a sharing map, removes pages from 135945748Smckusick * that region from all physical maps. 136045748Smckusick */ 136153357Sbostic int 136245748Smckusick vm_map_delete(map, start, end) 136345748Smckusick register vm_map_t map; 136445748Smckusick vm_offset_t start; 136545748Smckusick register vm_offset_t end; 136645748Smckusick { 136745748Smckusick register vm_map_entry_t entry; 136845748Smckusick vm_map_entry_t first_entry; 136945748Smckusick 137045748Smckusick /* 137145748Smckusick * Find the start of the region, and clip it 137245748Smckusick */ 137345748Smckusick 137445748Smckusick if (!vm_map_lookup_entry(map, start, &first_entry)) 137545748Smckusick entry = first_entry->next; 137645748Smckusick else { 137745748Smckusick entry = first_entry; 137845748Smckusick vm_map_clip_start(map, entry, start); 137945748Smckusick 138045748Smckusick /* 138145748Smckusick * Fix the lookup hint now, rather than each 138245748Smckusick * time though the loop. 138345748Smckusick */ 138445748Smckusick 138545748Smckusick SAVE_HINT(map, entry->prev); 138645748Smckusick } 138745748Smckusick 138845748Smckusick /* 138945748Smckusick * Save the free space hint 139045748Smckusick */ 139145748Smckusick 139245748Smckusick if (map->first_free->start >= start) 139345748Smckusick map->first_free = entry->prev; 139445748Smckusick 139545748Smckusick /* 139645748Smckusick * Step through all entries in this region 139745748Smckusick */ 139845748Smckusick 139945748Smckusick while ((entry != &map->header) && (entry->start < end)) { 140045748Smckusick vm_map_entry_t next; 140145748Smckusick register vm_offset_t s, e; 140245748Smckusick register vm_object_t object; 140345748Smckusick 140445748Smckusick vm_map_clip_end(map, entry, end); 140545748Smckusick 140645748Smckusick next = entry->next; 140745748Smckusick s = entry->start; 140845748Smckusick e = entry->end; 140945748Smckusick 141045748Smckusick /* 141145748Smckusick * Unwire before removing addresses from the pmap; 141245748Smckusick * otherwise, unwiring will put the entries back in 141345748Smckusick * the pmap. 141445748Smckusick */ 141545748Smckusick 141645748Smckusick object = entry->object.vm_object; 141745748Smckusick if (entry->wired_count != 0) 141845748Smckusick vm_map_entry_unwire(map, entry); 141945748Smckusick 142045748Smckusick /* 142145748Smckusick * If this is a sharing map, we must remove 142245748Smckusick * *all* references to this data, since we can't 142345748Smckusick * find all of the physical maps which are sharing 142445748Smckusick * it. 142545748Smckusick */ 142645748Smckusick 142745748Smckusick if (object == kernel_object || object == kmem_object) 142845748Smckusick vm_object_page_remove(object, entry->offset, 142945748Smckusick entry->offset + (e - s)); 143045748Smckusick else if (!map->is_main_map) 143145748Smckusick vm_object_pmap_remove(object, 143245748Smckusick entry->offset, 143345748Smckusick entry->offset + (e - s)); 143445748Smckusick else 143545748Smckusick pmap_remove(map->pmap, s, e); 143645748Smckusick 143745748Smckusick /* 143845748Smckusick * Delete the entry (which may delete the object) 143945748Smckusick * only after removing all pmap entries pointing 144045748Smckusick * to its pages. (Otherwise, its page frames may 144145748Smckusick * be reallocated, and any modify bits will be 144245748Smckusick * set in the wrong object!) 144345748Smckusick */ 144445748Smckusick 144545748Smckusick vm_map_entry_delete(map, entry); 144645748Smckusick entry = next; 144745748Smckusick } 144845748Smckusick return(KERN_SUCCESS); 144945748Smckusick } 145045748Smckusick 145145748Smckusick /* 145245748Smckusick * vm_map_remove: 145345748Smckusick * 145445748Smckusick * Remove the given address range from the target map. 145545748Smckusick * This is the exported form of vm_map_delete. 145645748Smckusick */ 145753357Sbostic int 145845748Smckusick vm_map_remove(map, start, end) 145945748Smckusick register vm_map_t map; 146045748Smckusick register vm_offset_t start; 146145748Smckusick register vm_offset_t end; 146245748Smckusick { 146345748Smckusick register int result; 146445748Smckusick 146545748Smckusick vm_map_lock(map); 146645748Smckusick VM_MAP_RANGE_CHECK(map, start, end); 146745748Smckusick result = vm_map_delete(map, start, end); 146845748Smckusick vm_map_unlock(map); 146945748Smckusick 147045748Smckusick return(result); 147145748Smckusick } 147245748Smckusick 147345748Smckusick /* 147445748Smckusick * vm_map_check_protection: 147545748Smckusick * 147645748Smckusick * Assert that the target map allows the specified 147745748Smckusick * privilege on the entire address region given. 147845748Smckusick * The entire region must be allocated. 147945748Smckusick */ 148045748Smckusick boolean_t vm_map_check_protection(map, start, end, protection) 148145748Smckusick register vm_map_t map; 148245748Smckusick register vm_offset_t start; 148345748Smckusick register vm_offset_t end; 148445748Smckusick register vm_prot_t protection; 148545748Smckusick { 148645748Smckusick register vm_map_entry_t entry; 148745748Smckusick vm_map_entry_t tmp_entry; 148845748Smckusick 148945748Smckusick if (!vm_map_lookup_entry(map, start, &tmp_entry)) { 149045748Smckusick return(FALSE); 149145748Smckusick } 149245748Smckusick 149345748Smckusick entry = tmp_entry; 149445748Smckusick 149545748Smckusick while (start < end) { 149645748Smckusick if (entry == &map->header) { 149745748Smckusick return(FALSE); 149845748Smckusick } 149945748Smckusick 150045748Smckusick /* 150145748Smckusick * No holes allowed! 150245748Smckusick */ 150345748Smckusick 150445748Smckusick if (start < entry->start) { 150545748Smckusick return(FALSE); 150645748Smckusick } 150745748Smckusick 150845748Smckusick /* 150945748Smckusick * Check protection associated with entry. 151045748Smckusick */ 151145748Smckusick 151245748Smckusick if ((entry->protection & protection) != protection) { 151345748Smckusick return(FALSE); 151445748Smckusick } 151545748Smckusick 151645748Smckusick /* go to next entry */ 151745748Smckusick 151845748Smckusick start = entry->end; 151945748Smckusick entry = entry->next; 152045748Smckusick } 152145748Smckusick return(TRUE); 152245748Smckusick } 152345748Smckusick 152445748Smckusick /* 152545748Smckusick * vm_map_copy_entry: 152645748Smckusick * 152745748Smckusick * Copies the contents of the source entry to the destination 152845748Smckusick * entry. The entries *must* be aligned properly. 152945748Smckusick */ 153045748Smckusick void vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry) 153145748Smckusick vm_map_t src_map, dst_map; 153245748Smckusick register vm_map_entry_t src_entry, dst_entry; 153345748Smckusick { 153445748Smckusick vm_object_t temp_object; 153545748Smckusick 153645748Smckusick if (src_entry->is_sub_map || dst_entry->is_sub_map) 153745748Smckusick return; 153845748Smckusick 153948383Skarels if (dst_entry->object.vm_object != NULL && 154050919Smckusick (dst_entry->object.vm_object->flags & OBJ_INTERNAL) == 0) 154145748Smckusick printf("vm_map_copy_entry: copying over permanent data!\n"); 154245748Smckusick 154345748Smckusick /* 154445748Smckusick * If our destination map was wired down, 154545748Smckusick * unwire it now. 154645748Smckusick */ 154745748Smckusick 154845748Smckusick if (dst_entry->wired_count != 0) 154945748Smckusick vm_map_entry_unwire(dst_map, dst_entry); 155045748Smckusick 155145748Smckusick /* 155245748Smckusick * If we're dealing with a sharing map, we 155345748Smckusick * must remove the destination pages from 155445748Smckusick * all maps (since we cannot know which maps 155545748Smckusick * this sharing map belongs in). 155645748Smckusick */ 155745748Smckusick 155845748Smckusick if (dst_map->is_main_map) 155945748Smckusick pmap_remove(dst_map->pmap, dst_entry->start, dst_entry->end); 156045748Smckusick else 156145748Smckusick vm_object_pmap_remove(dst_entry->object.vm_object, 156245748Smckusick dst_entry->offset, 156345748Smckusick dst_entry->offset + 156445748Smckusick (dst_entry->end - dst_entry->start)); 156545748Smckusick 156645748Smckusick if (src_entry->wired_count == 0) { 156745748Smckusick 156845748Smckusick boolean_t src_needs_copy; 156945748Smckusick 157045748Smckusick /* 157145748Smckusick * If the source entry is marked needs_copy, 157245748Smckusick * it is already write-protected. 157345748Smckusick */ 157445748Smckusick if (!src_entry->needs_copy) { 157545748Smckusick 157645748Smckusick boolean_t su; 157745748Smckusick 157845748Smckusick /* 157945748Smckusick * If the source entry has only one mapping, 158045748Smckusick * we can just protect the virtual address 158145748Smckusick * range. 158245748Smckusick */ 158345748Smckusick if (!(su = src_map->is_main_map)) { 158445748Smckusick simple_lock(&src_map->ref_lock); 158545748Smckusick su = (src_map->ref_count == 1); 158645748Smckusick simple_unlock(&src_map->ref_lock); 158745748Smckusick } 158845748Smckusick 158945748Smckusick if (su) { 159045748Smckusick pmap_protect(src_map->pmap, 159145748Smckusick src_entry->start, 159245748Smckusick src_entry->end, 159345748Smckusick src_entry->protection & ~VM_PROT_WRITE); 159445748Smckusick } 159545748Smckusick else { 159645748Smckusick vm_object_pmap_copy(src_entry->object.vm_object, 159745748Smckusick src_entry->offset, 159845748Smckusick src_entry->offset + (src_entry->end 159945748Smckusick -src_entry->start)); 160045748Smckusick } 160145748Smckusick } 160245748Smckusick 160345748Smckusick /* 160445748Smckusick * Make a copy of the object. 160545748Smckusick */ 160645748Smckusick temp_object = dst_entry->object.vm_object; 160745748Smckusick vm_object_copy(src_entry->object.vm_object, 160845748Smckusick src_entry->offset, 160945748Smckusick (vm_size_t)(src_entry->end - 161045748Smckusick src_entry->start), 161145748Smckusick &dst_entry->object.vm_object, 161245748Smckusick &dst_entry->offset, 161345748Smckusick &src_needs_copy); 161445748Smckusick /* 161545748Smckusick * If we didn't get a copy-object now, mark the 161645748Smckusick * source map entry so that a shadow will be created 161745748Smckusick * to hold its changed pages. 161845748Smckusick */ 161945748Smckusick if (src_needs_copy) 162045748Smckusick src_entry->needs_copy = TRUE; 162145748Smckusick 162245748Smckusick /* 162345748Smckusick * The destination always needs to have a shadow 162445748Smckusick * created. 162545748Smckusick */ 162645748Smckusick dst_entry->needs_copy = TRUE; 162745748Smckusick 162845748Smckusick /* 162945748Smckusick * Mark the entries copy-on-write, so that write-enabling 163045748Smckusick * the entry won't make copy-on-write pages writable. 163145748Smckusick */ 163245748Smckusick src_entry->copy_on_write = TRUE; 163345748Smckusick dst_entry->copy_on_write = TRUE; 163445748Smckusick /* 163545748Smckusick * Get rid of the old object. 163645748Smckusick */ 163745748Smckusick vm_object_deallocate(temp_object); 163845748Smckusick 163945748Smckusick pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start, 164045748Smckusick dst_entry->end - dst_entry->start, src_entry->start); 164145748Smckusick } 164245748Smckusick else { 164345748Smckusick /* 164445748Smckusick * Of course, wired down pages can't be set copy-on-write. 164545748Smckusick * Cause wired pages to be copied into the new 164645748Smckusick * map by simulating faults (the new pages are 164745748Smckusick * pageable) 164845748Smckusick */ 164945748Smckusick vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry); 165045748Smckusick } 165145748Smckusick } 165245748Smckusick 165345748Smckusick /* 165445748Smckusick * vm_map_copy: 165545748Smckusick * 165645748Smckusick * Perform a virtual memory copy from the source 165745748Smckusick * address map/range to the destination map/range. 165845748Smckusick * 165945748Smckusick * If src_destroy or dst_alloc is requested, 166045748Smckusick * the source and destination regions should be 166145748Smckusick * disjoint, not only in the top-level map, but 166245748Smckusick * in the sharing maps as well. [The best way 166345748Smckusick * to guarantee this is to use a new intermediate 166445748Smckusick * map to make copies. This also reduces map 166545748Smckusick * fragmentation.] 166645748Smckusick */ 166753357Sbostic int 166845748Smckusick vm_map_copy(dst_map, src_map, 166945748Smckusick dst_addr, len, src_addr, 167045748Smckusick dst_alloc, src_destroy) 167145748Smckusick vm_map_t dst_map; 167245748Smckusick vm_map_t src_map; 167345748Smckusick vm_offset_t dst_addr; 167445748Smckusick vm_size_t len; 167545748Smckusick vm_offset_t src_addr; 167645748Smckusick boolean_t dst_alloc; 167745748Smckusick boolean_t src_destroy; 167845748Smckusick { 167945748Smckusick register 168045748Smckusick vm_map_entry_t src_entry; 168145748Smckusick register 168245748Smckusick vm_map_entry_t dst_entry; 168345748Smckusick vm_map_entry_t tmp_entry; 168445748Smckusick vm_offset_t src_start; 168545748Smckusick vm_offset_t src_end; 168645748Smckusick vm_offset_t dst_start; 168745748Smckusick vm_offset_t dst_end; 168845748Smckusick vm_offset_t src_clip; 168945748Smckusick vm_offset_t dst_clip; 169045748Smckusick int result; 169145748Smckusick boolean_t old_src_destroy; 169245748Smckusick 169345748Smckusick /* 169445748Smckusick * XXX While we figure out why src_destroy screws up, 169545748Smckusick * we'll do it by explicitly vm_map_delete'ing at the end. 169645748Smckusick */ 169745748Smckusick 169845748Smckusick old_src_destroy = src_destroy; 169945748Smckusick src_destroy = FALSE; 170045748Smckusick 170145748Smckusick /* 170245748Smckusick * Compute start and end of region in both maps 170345748Smckusick */ 170445748Smckusick 170545748Smckusick src_start = src_addr; 170645748Smckusick src_end = src_start + len; 170745748Smckusick dst_start = dst_addr; 170845748Smckusick dst_end = dst_start + len; 170945748Smckusick 171045748Smckusick /* 171145748Smckusick * Check that the region can exist in both source 171245748Smckusick * and destination. 171345748Smckusick */ 171445748Smckusick 171545748Smckusick if ((dst_end < dst_start) || (src_end < src_start)) 171645748Smckusick return(KERN_NO_SPACE); 171745748Smckusick 171845748Smckusick /* 171945748Smckusick * Lock the maps in question -- we avoid deadlock 172045748Smckusick * by ordering lock acquisition by map value 172145748Smckusick */ 172245748Smckusick 172345748Smckusick if (src_map == dst_map) { 172445748Smckusick vm_map_lock(src_map); 172545748Smckusick } 172645748Smckusick else if ((int) src_map < (int) dst_map) { 172745748Smckusick vm_map_lock(src_map); 172845748Smckusick vm_map_lock(dst_map); 172945748Smckusick } else { 173045748Smckusick vm_map_lock(dst_map); 173145748Smckusick vm_map_lock(src_map); 173245748Smckusick } 173345748Smckusick 173445748Smckusick result = KERN_SUCCESS; 173545748Smckusick 173645748Smckusick /* 173745748Smckusick * Check protections... source must be completely readable and 173845748Smckusick * destination must be completely writable. [Note that if we're 173945748Smckusick * allocating the destination region, we don't have to worry 174045748Smckusick * about protection, but instead about whether the region 174145748Smckusick * exists.] 174245748Smckusick */ 174345748Smckusick 174445748Smckusick if (src_map->is_main_map && dst_map->is_main_map) { 174545748Smckusick if (!vm_map_check_protection(src_map, src_start, src_end, 174645748Smckusick VM_PROT_READ)) { 174745748Smckusick result = KERN_PROTECTION_FAILURE; 174845748Smckusick goto Return; 174945748Smckusick } 175045748Smckusick 175145748Smckusick if (dst_alloc) { 175245748Smckusick /* XXX Consider making this a vm_map_find instead */ 175348383Skarels if ((result = vm_map_insert(dst_map, NULL, 175445748Smckusick (vm_offset_t) 0, dst_start, dst_end)) != KERN_SUCCESS) 175545748Smckusick goto Return; 175645748Smckusick } 175745748Smckusick else if (!vm_map_check_protection(dst_map, dst_start, dst_end, 175845748Smckusick VM_PROT_WRITE)) { 175945748Smckusick result = KERN_PROTECTION_FAILURE; 176045748Smckusick goto Return; 176145748Smckusick } 176245748Smckusick } 176345748Smckusick 176445748Smckusick /* 176545748Smckusick * Find the start entries and clip. 176645748Smckusick * 176745748Smckusick * Note that checking protection asserts that the 176845748Smckusick * lookup cannot fail. 176945748Smckusick * 177045748Smckusick * Also note that we wait to do the second lookup 177145748Smckusick * until we have done the first clip, as the clip 177245748Smckusick * may affect which entry we get! 177345748Smckusick */ 177445748Smckusick 177545748Smckusick (void) vm_map_lookup_entry(src_map, src_addr, &tmp_entry); 177645748Smckusick src_entry = tmp_entry; 177745748Smckusick vm_map_clip_start(src_map, src_entry, src_start); 177845748Smckusick 177945748Smckusick (void) vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry); 178045748Smckusick dst_entry = tmp_entry; 178145748Smckusick vm_map_clip_start(dst_map, dst_entry, dst_start); 178245748Smckusick 178345748Smckusick /* 178445748Smckusick * If both source and destination entries are the same, 178545748Smckusick * retry the first lookup, as it may have changed. 178645748Smckusick */ 178745748Smckusick 178845748Smckusick if (src_entry == dst_entry) { 178945748Smckusick (void) vm_map_lookup_entry(src_map, src_addr, &tmp_entry); 179045748Smckusick src_entry = tmp_entry; 179145748Smckusick } 179245748Smckusick 179345748Smckusick /* 179445748Smckusick * If source and destination entries are still the same, 179545748Smckusick * a null copy is being performed. 179645748Smckusick */ 179745748Smckusick 179845748Smckusick if (src_entry == dst_entry) 179945748Smckusick goto Return; 180045748Smckusick 180145748Smckusick /* 180245748Smckusick * Go through entries until we get to the end of the 180345748Smckusick * region. 180445748Smckusick */ 180545748Smckusick 180645748Smckusick while (src_start < src_end) { 180745748Smckusick /* 180845748Smckusick * Clip the entries to the endpoint of the entire region. 180945748Smckusick */ 181045748Smckusick 181145748Smckusick vm_map_clip_end(src_map, src_entry, src_end); 181245748Smckusick vm_map_clip_end(dst_map, dst_entry, dst_end); 181345748Smckusick 181445748Smckusick /* 181545748Smckusick * Clip each entry to the endpoint of the other entry. 181645748Smckusick */ 181745748Smckusick 181845748Smckusick src_clip = src_entry->start + (dst_entry->end - dst_entry->start); 181945748Smckusick vm_map_clip_end(src_map, src_entry, src_clip); 182045748Smckusick 182145748Smckusick dst_clip = dst_entry->start + (src_entry->end - src_entry->start); 182245748Smckusick vm_map_clip_end(dst_map, dst_entry, dst_clip); 182345748Smckusick 182445748Smckusick /* 182545748Smckusick * Both entries now match in size and relative endpoints. 182645748Smckusick * 182745748Smckusick * If both entries refer to a VM object, we can 182845748Smckusick * deal with them now. 182945748Smckusick */ 183045748Smckusick 183145748Smckusick if (!src_entry->is_a_map && !dst_entry->is_a_map) { 183245748Smckusick vm_map_copy_entry(src_map, dst_map, src_entry, 183345748Smckusick dst_entry); 183445748Smckusick } 183545748Smckusick else { 183645748Smckusick register vm_map_t new_dst_map; 183745748Smckusick vm_offset_t new_dst_start; 183845748Smckusick vm_size_t new_size; 183945748Smckusick vm_map_t new_src_map; 184045748Smckusick vm_offset_t new_src_start; 184145748Smckusick 184245748Smckusick /* 184345748Smckusick * We have to follow at least one sharing map. 184445748Smckusick */ 184545748Smckusick 184645748Smckusick new_size = (dst_entry->end - dst_entry->start); 184745748Smckusick 184845748Smckusick if (src_entry->is_a_map) { 184945748Smckusick new_src_map = src_entry->object.share_map; 185045748Smckusick new_src_start = src_entry->offset; 185145748Smckusick } 185245748Smckusick else { 185345748Smckusick new_src_map = src_map; 185445748Smckusick new_src_start = src_entry->start; 185545748Smckusick lock_set_recursive(&src_map->lock); 185645748Smckusick } 185745748Smckusick 185845748Smckusick if (dst_entry->is_a_map) { 185945748Smckusick vm_offset_t new_dst_end; 186045748Smckusick 186145748Smckusick new_dst_map = dst_entry->object.share_map; 186245748Smckusick new_dst_start = dst_entry->offset; 186345748Smckusick 186445748Smckusick /* 186545748Smckusick * Since the destination sharing entries 186645748Smckusick * will be merely deallocated, we can 186745748Smckusick * do that now, and replace the region 186845748Smckusick * with a null object. [This prevents 186945748Smckusick * splitting the source map to match 187045748Smckusick * the form of the destination map.] 187145748Smckusick * Note that we can only do so if the 187245748Smckusick * source and destination do not overlap. 187345748Smckusick */ 187445748Smckusick 187545748Smckusick new_dst_end = new_dst_start + new_size; 187645748Smckusick 187745748Smckusick if (new_dst_map != new_src_map) { 187845748Smckusick vm_map_lock(new_dst_map); 187945748Smckusick (void) vm_map_delete(new_dst_map, 188045748Smckusick new_dst_start, 188145748Smckusick new_dst_end); 188245748Smckusick (void) vm_map_insert(new_dst_map, 188348383Skarels NULL, 188445748Smckusick (vm_offset_t) 0, 188545748Smckusick new_dst_start, 188645748Smckusick new_dst_end); 188745748Smckusick vm_map_unlock(new_dst_map); 188845748Smckusick } 188945748Smckusick } 189045748Smckusick else { 189145748Smckusick new_dst_map = dst_map; 189245748Smckusick new_dst_start = dst_entry->start; 189345748Smckusick lock_set_recursive(&dst_map->lock); 189445748Smckusick } 189545748Smckusick 189645748Smckusick /* 189745748Smckusick * Recursively copy the sharing map. 189845748Smckusick */ 189945748Smckusick 190045748Smckusick (void) vm_map_copy(new_dst_map, new_src_map, 190145748Smckusick new_dst_start, new_size, new_src_start, 190245748Smckusick FALSE, FALSE); 190345748Smckusick 190445748Smckusick if (dst_map == new_dst_map) 190545748Smckusick lock_clear_recursive(&dst_map->lock); 190645748Smckusick if (src_map == new_src_map) 190745748Smckusick lock_clear_recursive(&src_map->lock); 190845748Smckusick } 190945748Smckusick 191045748Smckusick /* 191145748Smckusick * Update variables for next pass through the loop. 191245748Smckusick */ 191345748Smckusick 191445748Smckusick src_start = src_entry->end; 191545748Smckusick src_entry = src_entry->next; 191645748Smckusick dst_start = dst_entry->end; 191745748Smckusick dst_entry = dst_entry->next; 191845748Smckusick 191945748Smckusick /* 192045748Smckusick * If the source is to be destroyed, here is the 192145748Smckusick * place to do it. 192245748Smckusick */ 192345748Smckusick 192445748Smckusick if (src_destroy && src_map->is_main_map && 192545748Smckusick dst_map->is_main_map) 192645748Smckusick vm_map_entry_delete(src_map, src_entry->prev); 192745748Smckusick } 192845748Smckusick 192945748Smckusick /* 193045748Smckusick * Update the physical maps as appropriate 193145748Smckusick */ 193245748Smckusick 193345748Smckusick if (src_map->is_main_map && dst_map->is_main_map) { 193445748Smckusick if (src_destroy) 193545748Smckusick pmap_remove(src_map->pmap, src_addr, src_addr + len); 193645748Smckusick } 193745748Smckusick 193845748Smckusick /* 193945748Smckusick * Unlock the maps 194045748Smckusick */ 194145748Smckusick 194245748Smckusick Return: ; 194345748Smckusick 194445748Smckusick if (old_src_destroy) 194545748Smckusick vm_map_delete(src_map, src_addr, src_addr + len); 194645748Smckusick 194745748Smckusick vm_map_unlock(src_map); 194845748Smckusick if (src_map != dst_map) 194945748Smckusick vm_map_unlock(dst_map); 195045748Smckusick 195145748Smckusick return(result); 195245748Smckusick } 195345748Smckusick 195445748Smckusick /* 195548383Skarels * vmspace_fork: 195648383Skarels * Create a new process vmspace structure and vm_map 195748383Skarels * based on those of an existing process. The new map 195848383Skarels * is based on the old map, according to the inheritance 195948383Skarels * values on the regions in that map. 196045748Smckusick * 196148383Skarels * The source map must not be locked. 196245748Smckusick */ 196348383Skarels struct vmspace * 196448383Skarels vmspace_fork(vm1) 196548383Skarels register struct vmspace *vm1; 196645748Smckusick { 196748383Skarels register struct vmspace *vm2; 196848383Skarels vm_map_t old_map = &vm1->vm_map; 196945748Smckusick vm_map_t new_map; 197045748Smckusick vm_map_entry_t old_entry; 197145748Smckusick vm_map_entry_t new_entry; 197245748Smckusick pmap_t new_pmap; 197345748Smckusick 197445748Smckusick vm_map_lock(old_map); 197545748Smckusick 197648383Skarels vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset, 197748383Skarels old_map->entries_pageable); 197848383Skarels bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy, 197948383Skarels (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy); 198048383Skarels new_pmap = &vm2->vm_pmap; /* XXX */ 198148383Skarels new_map = &vm2->vm_map; /* XXX */ 198245748Smckusick 198345748Smckusick old_entry = old_map->header.next; 198445748Smckusick 198545748Smckusick while (old_entry != &old_map->header) { 198645748Smckusick if (old_entry->is_sub_map) 198745748Smckusick panic("vm_map_fork: encountered a submap"); 198845748Smckusick 198945748Smckusick switch (old_entry->inheritance) { 199045748Smckusick case VM_INHERIT_NONE: 199145748Smckusick break; 199245748Smckusick 199345748Smckusick case VM_INHERIT_SHARE: 199445748Smckusick /* 199545748Smckusick * If we don't already have a sharing map: 199645748Smckusick */ 199745748Smckusick 199845748Smckusick if (!old_entry->is_a_map) { 199945748Smckusick vm_map_t new_share_map; 200045748Smckusick vm_map_entry_t new_share_entry; 200145748Smckusick 200245748Smckusick /* 200345748Smckusick * Create a new sharing map 200445748Smckusick */ 200545748Smckusick 200648383Skarels new_share_map = vm_map_create(NULL, 200745748Smckusick old_entry->start, 200845748Smckusick old_entry->end, 200945748Smckusick TRUE); 201045748Smckusick new_share_map->is_main_map = FALSE; 201145748Smckusick 201245748Smckusick /* 201345748Smckusick * Create the only sharing entry from the 201445748Smckusick * old task map entry. 201545748Smckusick */ 201645748Smckusick 201745748Smckusick new_share_entry = 201845748Smckusick vm_map_entry_create(new_share_map); 201945748Smckusick *new_share_entry = *old_entry; 202061005Shibler new_share_entry->wired_count = 0; 202145748Smckusick 202245748Smckusick /* 202345748Smckusick * Insert the entry into the new sharing 202445748Smckusick * map 202545748Smckusick */ 202645748Smckusick 202745748Smckusick vm_map_entry_link(new_share_map, 202845748Smckusick new_share_map->header.prev, 202945748Smckusick new_share_entry); 203045748Smckusick 203145748Smckusick /* 203245748Smckusick * Fix up the task map entry to refer 203345748Smckusick * to the sharing map now. 203445748Smckusick */ 203545748Smckusick 203645748Smckusick old_entry->is_a_map = TRUE; 203745748Smckusick old_entry->object.share_map = new_share_map; 203845748Smckusick old_entry->offset = old_entry->start; 203945748Smckusick } 204045748Smckusick 204145748Smckusick /* 204245748Smckusick * Clone the entry, referencing the sharing map. 204345748Smckusick */ 204445748Smckusick 204545748Smckusick new_entry = vm_map_entry_create(new_map); 204645748Smckusick *new_entry = *old_entry; 204761005Shibler new_entry->wired_count = 0; 204845748Smckusick vm_map_reference(new_entry->object.share_map); 204945748Smckusick 205045748Smckusick /* 205145748Smckusick * Insert the entry into the new map -- we 205245748Smckusick * know we're inserting at the end of the new 205345748Smckusick * map. 205445748Smckusick */ 205545748Smckusick 205645748Smckusick vm_map_entry_link(new_map, new_map->header.prev, 205745748Smckusick new_entry); 205845748Smckusick 205945748Smckusick /* 206045748Smckusick * Update the physical map 206145748Smckusick */ 206245748Smckusick 206345748Smckusick pmap_copy(new_map->pmap, old_map->pmap, 206445748Smckusick new_entry->start, 206545748Smckusick (old_entry->end - old_entry->start), 206645748Smckusick old_entry->start); 206745748Smckusick break; 206845748Smckusick 206945748Smckusick case VM_INHERIT_COPY: 207045748Smckusick /* 207145748Smckusick * Clone the entry and link into the map. 207245748Smckusick */ 207345748Smckusick 207445748Smckusick new_entry = vm_map_entry_create(new_map); 207545748Smckusick *new_entry = *old_entry; 207645748Smckusick new_entry->wired_count = 0; 207748383Skarels new_entry->object.vm_object = NULL; 207845748Smckusick new_entry->is_a_map = FALSE; 207945748Smckusick vm_map_entry_link(new_map, new_map->header.prev, 208045748Smckusick new_entry); 208145748Smckusick if (old_entry->is_a_map) { 208245748Smckusick int check; 208345748Smckusick 208445748Smckusick check = vm_map_copy(new_map, 208545748Smckusick old_entry->object.share_map, 208645748Smckusick new_entry->start, 208745748Smckusick (vm_size_t)(new_entry->end - 208845748Smckusick new_entry->start), 208945748Smckusick old_entry->offset, 209045748Smckusick FALSE, FALSE); 209145748Smckusick if (check != KERN_SUCCESS) 209245748Smckusick printf("vm_map_fork: copy in share_map region failed\n"); 209345748Smckusick } 209445748Smckusick else { 209545748Smckusick vm_map_copy_entry(old_map, new_map, old_entry, 209645748Smckusick new_entry); 209745748Smckusick } 209845748Smckusick break; 209945748Smckusick } 210045748Smckusick old_entry = old_entry->next; 210145748Smckusick } 210245748Smckusick 210345748Smckusick new_map->size = old_map->size; 210445748Smckusick vm_map_unlock(old_map); 210545748Smckusick 210648383Skarels return(vm2); 210745748Smckusick } 210845748Smckusick 210945748Smckusick /* 211045748Smckusick * vm_map_lookup: 211145748Smckusick * 211245748Smckusick * Finds the VM object, offset, and 211345748Smckusick * protection for a given virtual address in the 211445748Smckusick * specified map, assuming a page fault of the 211545748Smckusick * type specified. 211645748Smckusick * 211745748Smckusick * Leaves the map in question locked for read; return 211845748Smckusick * values are guaranteed until a vm_map_lookup_done 211945748Smckusick * call is performed. Note that the map argument 212045748Smckusick * is in/out; the returned map must be used in 212145748Smckusick * the call to vm_map_lookup_done. 212245748Smckusick * 212345748Smckusick * A handle (out_entry) is returned for use in 212445748Smckusick * vm_map_lookup_done, to make that fast. 212545748Smckusick * 212645748Smckusick * If a lookup is requested with "write protection" 212745748Smckusick * specified, the map may be changed to perform virtual 212845748Smckusick * copying operations, although the data referenced will 212945748Smckusick * remain the same. 213045748Smckusick */ 213153357Sbostic int 213245748Smckusick vm_map_lookup(var_map, vaddr, fault_type, out_entry, 213345748Smckusick object, offset, out_prot, wired, single_use) 213445748Smckusick vm_map_t *var_map; /* IN/OUT */ 213545748Smckusick register vm_offset_t vaddr; 213645748Smckusick register vm_prot_t fault_type; 213745748Smckusick 213845748Smckusick vm_map_entry_t *out_entry; /* OUT */ 213945748Smckusick vm_object_t *object; /* OUT */ 214045748Smckusick vm_offset_t *offset; /* OUT */ 214145748Smckusick vm_prot_t *out_prot; /* OUT */ 214245748Smckusick boolean_t *wired; /* OUT */ 214345748Smckusick boolean_t *single_use; /* OUT */ 214445748Smckusick { 214545748Smckusick vm_map_t share_map; 214645748Smckusick vm_offset_t share_offset; 214745748Smckusick register vm_map_entry_t entry; 214845748Smckusick register vm_map_t map = *var_map; 214945748Smckusick register vm_prot_t prot; 215045748Smckusick register boolean_t su; 215145748Smckusick 215245748Smckusick RetryLookup: ; 215345748Smckusick 215445748Smckusick /* 215545748Smckusick * Lookup the faulting address. 215645748Smckusick */ 215745748Smckusick 215845748Smckusick vm_map_lock_read(map); 215945748Smckusick 216045748Smckusick #define RETURN(why) \ 216145748Smckusick { \ 216245748Smckusick vm_map_unlock_read(map); \ 216345748Smckusick return(why); \ 216445748Smckusick } 216545748Smckusick 216645748Smckusick /* 216745748Smckusick * If the map has an interesting hint, try it before calling 216845748Smckusick * full blown lookup routine. 216945748Smckusick */ 217045748Smckusick 217145748Smckusick simple_lock(&map->hint_lock); 217245748Smckusick entry = map->hint; 217345748Smckusick simple_unlock(&map->hint_lock); 217445748Smckusick 217545748Smckusick *out_entry = entry; 217645748Smckusick 217745748Smckusick if ((entry == &map->header) || 217845748Smckusick (vaddr < entry->start) || (vaddr >= entry->end)) { 217945748Smckusick vm_map_entry_t tmp_entry; 218045748Smckusick 218145748Smckusick /* 218245748Smckusick * Entry was either not a valid hint, or the vaddr 218345748Smckusick * was not contained in the entry, so do a full lookup. 218445748Smckusick */ 218545748Smckusick if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) 218645748Smckusick RETURN(KERN_INVALID_ADDRESS); 218745748Smckusick 218845748Smckusick entry = tmp_entry; 218945748Smckusick *out_entry = entry; 219045748Smckusick } 219145748Smckusick 219245748Smckusick /* 219345748Smckusick * Handle submaps. 219445748Smckusick */ 219545748Smckusick 219645748Smckusick if (entry->is_sub_map) { 219745748Smckusick vm_map_t old_map = map; 219845748Smckusick 219945748Smckusick *var_map = map = entry->object.sub_map; 220045748Smckusick vm_map_unlock_read(old_map); 220145748Smckusick goto RetryLookup; 220245748Smckusick } 220345748Smckusick 220445748Smckusick /* 220545748Smckusick * Check whether this task is allowed to have 220645748Smckusick * this page. 220745748Smckusick */ 220845748Smckusick 220945748Smckusick prot = entry->protection; 221045748Smckusick if ((fault_type & (prot)) != fault_type) 221145748Smckusick RETURN(KERN_PROTECTION_FAILURE); 221245748Smckusick 221345748Smckusick /* 221445748Smckusick * If this page is not pageable, we have to get 221545748Smckusick * it for all possible accesses. 221645748Smckusick */ 221745748Smckusick 221845748Smckusick if (*wired = (entry->wired_count != 0)) 221945748Smckusick prot = fault_type = entry->protection; 222045748Smckusick 222145748Smckusick /* 222245748Smckusick * If we don't already have a VM object, track 222345748Smckusick * it down. 222445748Smckusick */ 222545748Smckusick 222645748Smckusick if (su = !entry->is_a_map) { 222745748Smckusick share_map = map; 222845748Smckusick share_offset = vaddr; 222945748Smckusick } 223045748Smckusick else { 223145748Smckusick vm_map_entry_t share_entry; 223245748Smckusick 223345748Smckusick /* 223445748Smckusick * Compute the sharing map, and offset into it. 223545748Smckusick */ 223645748Smckusick 223745748Smckusick share_map = entry->object.share_map; 223845748Smckusick share_offset = (vaddr - entry->start) + entry->offset; 223945748Smckusick 224045748Smckusick /* 224145748Smckusick * Look for the backing store object and offset 224245748Smckusick */ 224345748Smckusick 224445748Smckusick vm_map_lock_read(share_map); 224545748Smckusick 224645748Smckusick if (!vm_map_lookup_entry(share_map, share_offset, 224745748Smckusick &share_entry)) { 224845748Smckusick vm_map_unlock_read(share_map); 224945748Smckusick RETURN(KERN_INVALID_ADDRESS); 225045748Smckusick } 225145748Smckusick entry = share_entry; 225245748Smckusick } 225345748Smckusick 225445748Smckusick /* 225545748Smckusick * If the entry was copy-on-write, we either ... 225645748Smckusick */ 225745748Smckusick 225845748Smckusick if (entry->needs_copy) { 225945748Smckusick /* 226045748Smckusick * If we want to write the page, we may as well 226145748Smckusick * handle that now since we've got the sharing 226245748Smckusick * map locked. 226345748Smckusick * 226445748Smckusick * If we don't need to write the page, we just 226545748Smckusick * demote the permissions allowed. 226645748Smckusick */ 226745748Smckusick 226845748Smckusick if (fault_type & VM_PROT_WRITE) { 226945748Smckusick /* 227045748Smckusick * Make a new object, and place it in the 227145748Smckusick * object chain. Note that no new references 227245748Smckusick * have appeared -- one just moved from the 227345748Smckusick * share map to the new object. 227445748Smckusick */ 227545748Smckusick 227645748Smckusick if (lock_read_to_write(&share_map->lock)) { 227745748Smckusick if (share_map != map) 227845748Smckusick vm_map_unlock_read(map); 227945748Smckusick goto RetryLookup; 228045748Smckusick } 228145748Smckusick 228245748Smckusick vm_object_shadow( 228345748Smckusick &entry->object.vm_object, 228445748Smckusick &entry->offset, 228545748Smckusick (vm_size_t) (entry->end - entry->start)); 228645748Smckusick 228745748Smckusick entry->needs_copy = FALSE; 228845748Smckusick 228945748Smckusick lock_write_to_read(&share_map->lock); 229045748Smckusick } 229145748Smckusick else { 229245748Smckusick /* 229345748Smckusick * We're attempting to read a copy-on-write 229445748Smckusick * page -- don't allow writes. 229545748Smckusick */ 229645748Smckusick 229745748Smckusick prot &= (~VM_PROT_WRITE); 229845748Smckusick } 229945748Smckusick } 230045748Smckusick 230145748Smckusick /* 230245748Smckusick * Create an object if necessary. 230345748Smckusick */ 230448383Skarels if (entry->object.vm_object == NULL) { 230545748Smckusick 230645748Smckusick if (lock_read_to_write(&share_map->lock)) { 230745748Smckusick if (share_map != map) 230845748Smckusick vm_map_unlock_read(map); 230945748Smckusick goto RetryLookup; 231045748Smckusick } 231145748Smckusick 231245748Smckusick entry->object.vm_object = vm_object_allocate( 231345748Smckusick (vm_size_t)(entry->end - entry->start)); 231445748Smckusick entry->offset = 0; 231545748Smckusick lock_write_to_read(&share_map->lock); 231645748Smckusick } 231745748Smckusick 231845748Smckusick /* 231945748Smckusick * Return the object/offset from this entry. If the entry 232045748Smckusick * was copy-on-write or empty, it has been fixed up. 232145748Smckusick */ 232245748Smckusick 232345748Smckusick *offset = (share_offset - entry->start) + entry->offset; 232445748Smckusick *object = entry->object.vm_object; 232545748Smckusick 232645748Smckusick /* 232745748Smckusick * Return whether this is the only map sharing this data. 232845748Smckusick */ 232945748Smckusick 233045748Smckusick if (!su) { 233145748Smckusick simple_lock(&share_map->ref_lock); 233245748Smckusick su = (share_map->ref_count == 1); 233345748Smckusick simple_unlock(&share_map->ref_lock); 233445748Smckusick } 233545748Smckusick 233645748Smckusick *out_prot = prot; 233745748Smckusick *single_use = su; 233845748Smckusick 233945748Smckusick return(KERN_SUCCESS); 234045748Smckusick 234145748Smckusick #undef RETURN 234245748Smckusick } 234345748Smckusick 234445748Smckusick /* 234545748Smckusick * vm_map_lookup_done: 234645748Smckusick * 234745748Smckusick * Releases locks acquired by a vm_map_lookup 234845748Smckusick * (according to the handle returned by that lookup). 234945748Smckusick */ 235045748Smckusick 235145748Smckusick void vm_map_lookup_done(map, entry) 235245748Smckusick register vm_map_t map; 235345748Smckusick vm_map_entry_t entry; 235445748Smckusick { 235545748Smckusick /* 235645748Smckusick * If this entry references a map, unlock it first. 235745748Smckusick */ 235845748Smckusick 235945748Smckusick if (entry->is_a_map) 236045748Smckusick vm_map_unlock_read(entry->object.share_map); 236145748Smckusick 236245748Smckusick /* 236345748Smckusick * Unlock the main-level map 236445748Smckusick */ 236545748Smckusick 236645748Smckusick vm_map_unlock_read(map); 236745748Smckusick } 236845748Smckusick 236945748Smckusick /* 237045748Smckusick * Routine: vm_map_simplify 237145748Smckusick * Purpose: 237245748Smckusick * Attempt to simplify the map representation in 237345748Smckusick * the vicinity of the given starting address. 237445748Smckusick * Note: 237545748Smckusick * This routine is intended primarily to keep the 237645748Smckusick * kernel maps more compact -- they generally don't 237745748Smckusick * benefit from the "expand a map entry" technology 237845748Smckusick * at allocation time because the adjacent entry 237945748Smckusick * is often wired down. 238045748Smckusick */ 238145748Smckusick void vm_map_simplify(map, start) 238245748Smckusick vm_map_t map; 238345748Smckusick vm_offset_t start; 238445748Smckusick { 238545748Smckusick vm_map_entry_t this_entry; 238645748Smckusick vm_map_entry_t prev_entry; 238745748Smckusick 238845748Smckusick vm_map_lock(map); 238945748Smckusick if ( 239045748Smckusick (vm_map_lookup_entry(map, start, &this_entry)) && 239145748Smckusick ((prev_entry = this_entry->prev) != &map->header) && 239245748Smckusick 239345748Smckusick (prev_entry->end == start) && 239445748Smckusick (map->is_main_map) && 239545748Smckusick 239645748Smckusick (prev_entry->is_a_map == FALSE) && 239745748Smckusick (prev_entry->is_sub_map == FALSE) && 239845748Smckusick 239945748Smckusick (this_entry->is_a_map == FALSE) && 240045748Smckusick (this_entry->is_sub_map == FALSE) && 240145748Smckusick 240245748Smckusick (prev_entry->inheritance == this_entry->inheritance) && 240345748Smckusick (prev_entry->protection == this_entry->protection) && 240445748Smckusick (prev_entry->max_protection == this_entry->max_protection) && 240545748Smckusick (prev_entry->wired_count == this_entry->wired_count) && 240645748Smckusick 240745748Smckusick (prev_entry->copy_on_write == this_entry->copy_on_write) && 240845748Smckusick (prev_entry->needs_copy == this_entry->needs_copy) && 240945748Smckusick 241045748Smckusick (prev_entry->object.vm_object == this_entry->object.vm_object) && 241145748Smckusick ((prev_entry->offset + (prev_entry->end - prev_entry->start)) 241245748Smckusick == this_entry->offset) 241345748Smckusick ) { 241445748Smckusick if (map->first_free == this_entry) 241545748Smckusick map->first_free = prev_entry; 241645748Smckusick 241745748Smckusick SAVE_HINT(map, prev_entry); 241845748Smckusick vm_map_entry_unlink(map, this_entry); 241945748Smckusick prev_entry->end = this_entry->end; 242045748Smckusick vm_object_deallocate(this_entry->object.vm_object); 242145748Smckusick vm_map_entry_dispose(map, this_entry); 242245748Smckusick } 242345748Smckusick vm_map_unlock(map); 242445748Smckusick } 242545748Smckusick 242645748Smckusick /* 242745748Smckusick * vm_map_print: [ debug ] 242845748Smckusick */ 242945748Smckusick void vm_map_print(map, full) 243045748Smckusick register vm_map_t map; 243145748Smckusick boolean_t full; 243245748Smckusick { 243345748Smckusick register vm_map_entry_t entry; 243445748Smckusick extern int indent; 243545748Smckusick 243645748Smckusick iprintf("%s map 0x%x: pmap=0x%x,ref=%d,nentries=%d,version=%d\n", 243745748Smckusick (map->is_main_map ? "Task" : "Share"), 243845748Smckusick (int) map, (int) (map->pmap), map->ref_count, map->nentries, 243945748Smckusick map->timestamp); 244045748Smckusick 244145748Smckusick if (!full && indent) 244245748Smckusick return; 244345748Smckusick 244445748Smckusick indent += 2; 244545748Smckusick for (entry = map->header.next; entry != &map->header; 244645748Smckusick entry = entry->next) { 244745748Smckusick iprintf("map entry 0x%x: start=0x%x, end=0x%x, ", 244845748Smckusick (int) entry, (int) entry->start, (int) entry->end); 244945748Smckusick if (map->is_main_map) { 245045748Smckusick static char *inheritance_name[4] = 245145748Smckusick { "share", "copy", "none", "donate_copy"}; 245245748Smckusick printf("prot=%x/%x/%s, ", 245345748Smckusick entry->protection, 245445748Smckusick entry->max_protection, 245545748Smckusick inheritance_name[entry->inheritance]); 245645748Smckusick if (entry->wired_count != 0) 245745748Smckusick printf("wired, "); 245845748Smckusick } 245945748Smckusick 246045748Smckusick if (entry->is_a_map || entry->is_sub_map) { 246145748Smckusick printf("share=0x%x, offset=0x%x\n", 246245748Smckusick (int) entry->object.share_map, 246345748Smckusick (int) entry->offset); 246445748Smckusick if ((entry->prev == &map->header) || 246545748Smckusick (!entry->prev->is_a_map) || 246645748Smckusick (entry->prev->object.share_map != 246745748Smckusick entry->object.share_map)) { 246845748Smckusick indent += 2; 246945748Smckusick vm_map_print(entry->object.share_map, full); 247045748Smckusick indent -= 2; 247145748Smckusick } 247245748Smckusick 247345748Smckusick } 247445748Smckusick else { 247545748Smckusick printf("object=0x%x, offset=0x%x", 247645748Smckusick (int) entry->object.vm_object, 247745748Smckusick (int) entry->offset); 247845748Smckusick if (entry->copy_on_write) 247945748Smckusick printf(", copy (%s)", 248045748Smckusick entry->needs_copy ? "needed" : "done"); 248145748Smckusick printf("\n"); 248245748Smckusick 248345748Smckusick if ((entry->prev == &map->header) || 248445748Smckusick (entry->prev->is_a_map) || 248545748Smckusick (entry->prev->object.vm_object != 248645748Smckusick entry->object.vm_object)) { 248745748Smckusick indent += 2; 248845748Smckusick vm_object_print(entry->object.vm_object, full); 248945748Smckusick indent -= 2; 249045748Smckusick } 249145748Smckusick } 249245748Smckusick } 249345748Smckusick indent -= 2; 249445748Smckusick } 2495