145748Smckusick /* 263379Sbostic * Copyright (c) 1991, 1993 363379Sbostic * The Regents of the University of California. All rights reserved. 445748Smckusick * 545748Smckusick * This code is derived from software contributed to Berkeley by 645748Smckusick * The Mach Operating System project at Carnegie-Mellon University. 745748Smckusick * 848493Smckusick * %sccs.include.redist.c% 945748Smckusick * 10*68162Scgd * @(#)vm_map.c 8.4 (Berkeley) 01/09/95 1148493Smckusick * 1248493Smckusick * 1348493Smckusick * Copyright (c) 1987, 1990 Carnegie-Mellon University. 1448493Smckusick * All rights reserved. 1548493Smckusick * 1648493Smckusick * Authors: Avadis Tevanian, Jr., Michael Wayne Young 1748493Smckusick * 1848493Smckusick * Permission to use, copy, modify and distribute this software and 1948493Smckusick * its documentation is hereby granted, provided that both the copyright 2048493Smckusick * notice and this permission notice appear in all copies of the 2148493Smckusick * software, derivative works or modified versions, and any portions 2248493Smckusick * thereof, and that both notices appear in supporting documentation. 2348493Smckusick * 2448493Smckusick * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 2548493Smckusick * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 2648493Smckusick * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 2748493Smckusick * 2848493Smckusick * Carnegie Mellon requests users of this software to return to 2948493Smckusick * 3048493Smckusick * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 3148493Smckusick * School of Computer Science 3248493Smckusick * Carnegie Mellon University 3348493Smckusick * Pittsburgh PA 15213-3890 3448493Smckusick * 3548493Smckusick * any improvements or extensions that they make and grant Carnegie the 3648493Smckusick * rights to redistribute these changes. 3745748Smckusick */ 3845748Smckusick 3945748Smckusick /* 4045748Smckusick * Virtual memory mapping module. 4145748Smckusick */ 4245748Smckusick 4353357Sbostic #include <sys/param.h> 4453357Sbostic #include <sys/systm.h> 4553357Sbostic #include <sys/malloc.h> 4645748Smckusick 4753357Sbostic #include <vm/vm.h> 4853357Sbostic #include <vm/vm_page.h> 4953357Sbostic #include <vm/vm_object.h> 5053357Sbostic 5145748Smckusick /* 5245748Smckusick * Virtual memory maps provide for the mapping, protection, 5345748Smckusick * and sharing of virtual memory objects. In addition, 5445748Smckusick * this module provides for an efficient virtual copy of 5545748Smckusick * memory from one map to another. 5645748Smckusick * 5745748Smckusick * Synchronization is required prior to most operations. 5845748Smckusick * 5945748Smckusick * Maps consist of an ordered doubly-linked list of simple 6045748Smckusick * entries; a single hint is used to speed up lookups. 6145748Smckusick * 6245748Smckusick * In order to properly represent the sharing of virtual 6345748Smckusick * memory regions among maps, the map structure is bi-level. 6445748Smckusick * Top-level ("address") maps refer to regions of sharable 6545748Smckusick * virtual memory. These regions are implemented as 6645748Smckusick * ("sharing") maps, which then refer to the actual virtual 6745748Smckusick * memory objects. When two address maps "share" memory, 6845748Smckusick * their top-level maps both have references to the same 6945748Smckusick * sharing map. When memory is virtual-copied from one 7045748Smckusick * address map to another, the references in the sharing 7145748Smckusick * maps are actually copied -- no copying occurs at the 7245748Smckusick * virtual memory object level. 7345748Smckusick * 7445748Smckusick * Since portions of maps are specified by start/end addreses, 7545748Smckusick * which may not align with existing map entries, all 7645748Smckusick * routines merely "clip" entries to these start/end values. 7745748Smckusick * [That is, an entry is split into two, bordering at a 7845748Smckusick * start or end value.] Note that these clippings may not 7945748Smckusick * always be necessary (as the two resulting entries are then 8045748Smckusick * not changed); however, the clipping is done for convenience. 8145748Smckusick * No attempt is currently made to "glue back together" two 8245748Smckusick * abutting entries. 8345748Smckusick * 8445748Smckusick * As mentioned above, virtual copy operations are performed 8545748Smckusick * by copying VM object references from one sharing map to 8645748Smckusick * another, and then marking both regions as copy-on-write. 8745748Smckusick * It is important to note that only one writeable reference 8845748Smckusick * to a VM object region exists in any map -- this means that 8945748Smckusick * shadow object creation can be delayed until a write operation 9045748Smckusick * occurs. 9145748Smckusick */ 9245748Smckusick 9345748Smckusick /* 9448383Skarels * vm_map_startup: 9545748Smckusick * 9645748Smckusick * Initialize the vm_map module. Must be called before 9745748Smckusick * any other vm_map routines. 9845748Smckusick * 9945748Smckusick * Map and entry structures are allocated from the general 10045748Smckusick * purpose memory pool with some exceptions: 10145748Smckusick * 10245748Smckusick * - The kernel map and kmem submap are allocated statically. 10345748Smckusick * - Kernel map entries are allocated out of a static pool. 10445748Smckusick * 10545748Smckusick * These restrictions are necessary since malloc() uses the 10645748Smckusick * maps and requires map entries. 10745748Smckusick */ 10845748Smckusick 10945748Smckusick vm_offset_t kentry_data; 11045748Smckusick vm_size_t kentry_data_size; 11145748Smckusick vm_map_entry_t kentry_free; 11245748Smckusick vm_map_t kmap_free; 11345748Smckusick 11453357Sbostic static void _vm_map_clip_end __P((vm_map_t, vm_map_entry_t, vm_offset_t)); 11553357Sbostic static void _vm_map_clip_start __P((vm_map_t, vm_map_entry_t, vm_offset_t)); 11653357Sbostic 117*68162Scgd void 118*68162Scgd vm_map_startup() 11945748Smckusick { 12045748Smckusick register int i; 12145748Smckusick register vm_map_entry_t mep; 12245748Smckusick vm_map_t mp; 12345748Smckusick 12445748Smckusick /* 12545748Smckusick * Static map structures for allocation before initialization of 12645748Smckusick * kernel map or kmem map. vm_map_create knows how to deal with them. 12745748Smckusick */ 12845748Smckusick kmap_free = mp = (vm_map_t) kentry_data; 12945748Smckusick i = MAX_KMAP; 13045748Smckusick while (--i > 0) { 13145748Smckusick mp->header.next = (vm_map_entry_t) (mp + 1); 13245748Smckusick mp++; 13345748Smckusick } 13448383Skarels mp++->header.next = NULL; 13545748Smckusick 13645748Smckusick /* 13745748Smckusick * Form a free list of statically allocated kernel map entries 13845748Smckusick * with the rest. 13945748Smckusick */ 14045748Smckusick kentry_free = mep = (vm_map_entry_t) mp; 14145748Smckusick i = (kentry_data_size - MAX_KMAP * sizeof *mp) / sizeof *mep; 14245748Smckusick while (--i > 0) { 14345748Smckusick mep->next = mep + 1; 14445748Smckusick mep++; 14545748Smckusick } 14648383Skarels mep->next = NULL; 14745748Smckusick } 14845748Smckusick 14945748Smckusick /* 15048383Skarels * Allocate a vmspace structure, including a vm_map and pmap, 15148383Skarels * and initialize those structures. The refcnt is set to 1. 15248383Skarels * The remaining fields must be initialized by the caller. 15348383Skarels */ 15448383Skarels struct vmspace * 15548383Skarels vmspace_alloc(min, max, pageable) 15648383Skarels vm_offset_t min, max; 15748383Skarels int pageable; 15848383Skarels { 15948383Skarels register struct vmspace *vm; 16048383Skarels 16148383Skarels MALLOC(vm, struct vmspace *, sizeof(struct vmspace), M_VMMAP, M_WAITOK); 16248383Skarels bzero(vm, (caddr_t) &vm->vm_startcopy - (caddr_t) vm); 16348383Skarels vm_map_init(&vm->vm_map, min, max, pageable); 16448383Skarels pmap_pinit(&vm->vm_pmap); 16548383Skarels vm->vm_map.pmap = &vm->vm_pmap; /* XXX */ 16648383Skarels vm->vm_refcnt = 1; 16748383Skarels return (vm); 16848383Skarels } 16948383Skarels 17048383Skarels void 17148383Skarels vmspace_free(vm) 17248383Skarels register struct vmspace *vm; 17348383Skarels { 17448383Skarels 17548383Skarels if (--vm->vm_refcnt == 0) { 17648383Skarels /* 17748383Skarels * Lock the map, to wait out all other references to it. 17848383Skarels * Delete all of the mappings and pages they hold, 17948383Skarels * then call the pmap module to reclaim anything left. 18048383Skarels */ 18148383Skarels vm_map_lock(&vm->vm_map); 18248383Skarels (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset, 18348383Skarels vm->vm_map.max_offset); 18448383Skarels pmap_release(&vm->vm_pmap); 18548383Skarels FREE(vm, M_VMMAP); 18648383Skarels } 18748383Skarels } 18848383Skarels 18948383Skarels /* 19045748Smckusick * vm_map_create: 19145748Smckusick * 19245748Smckusick * Creates and returns a new empty VM map with 19345748Smckusick * the given physical map structure, and having 19445748Smckusick * the given lower and upper address bounds. 19545748Smckusick */ 196*68162Scgd vm_map_t 197*68162Scgd vm_map_create(pmap, min, max, pageable) 19845748Smckusick pmap_t pmap; 19945748Smckusick vm_offset_t min, max; 20045748Smckusick boolean_t pageable; 20145748Smckusick { 20245748Smckusick register vm_map_t result; 20365479Sbostic extern vm_map_t kmem_map; 20445748Smckusick 20548383Skarels if (kmem_map == NULL) { 20645748Smckusick result = kmap_free; 20748383Skarels if (result == NULL) 20848383Skarels panic("vm_map_create: out of maps"); 209*68162Scgd kmap_free = (vm_map_t) result->header.next; 21045748Smckusick } else 21145748Smckusick MALLOC(result, vm_map_t, sizeof(struct vm_map), 21245748Smckusick M_VMMAP, M_WAITOK); 21345748Smckusick 21448383Skarels vm_map_init(result, min, max, pageable); 21545748Smckusick result->pmap = pmap; 21645748Smckusick return(result); 21745748Smckusick } 21845748Smckusick 21945748Smckusick /* 22048383Skarels * Initialize an existing vm_map structure 22148383Skarels * such as that in the vmspace structure. 22248383Skarels * The pmap is set elsewhere. 22348383Skarels */ 22448383Skarels void 22548383Skarels vm_map_init(map, min, max, pageable) 22648383Skarels register struct vm_map *map; 22748383Skarels vm_offset_t min, max; 22848383Skarels boolean_t pageable; 22948383Skarels { 23048383Skarels map->header.next = map->header.prev = &map->header; 23148383Skarels map->nentries = 0; 23248383Skarels map->size = 0; 23348383Skarels map->ref_count = 1; 23448383Skarels map->is_main_map = TRUE; 23548383Skarels map->min_offset = min; 23648383Skarels map->max_offset = max; 23748383Skarels map->entries_pageable = pageable; 23848383Skarels map->first_free = &map->header; 23948383Skarels map->hint = &map->header; 24048383Skarels map->timestamp = 0; 24148383Skarels lock_init(&map->lock, TRUE); 24248383Skarels simple_lock_init(&map->ref_lock); 24348383Skarels simple_lock_init(&map->hint_lock); 24448383Skarels } 24548383Skarels 24648383Skarels /* 24745748Smckusick * vm_map_entry_create: [ internal use only ] 24845748Smckusick * 24945748Smckusick * Allocates a VM map entry for insertion. 25045748Smckusick * No entry fields are filled in. This routine is 25145748Smckusick */ 252*68162Scgd vm_map_entry_t 253*68162Scgd vm_map_entry_create(map) 25445748Smckusick vm_map_t map; 25545748Smckusick { 25645748Smckusick vm_map_entry_t entry; 25765686Shibler #ifdef DEBUG 25858374Smckusick extern vm_map_t kernel_map, kmem_map, mb_map, pager_map; 25965686Shibler boolean_t isspecial; 26045748Smckusick 26165686Shibler isspecial = (map == kernel_map || map == kmem_map || 26265686Shibler map == mb_map || map == pager_map); 26365686Shibler if (isspecial && map->entries_pageable || 26465686Shibler !isspecial && !map->entries_pageable) 26565686Shibler panic("vm_map_entry_create: bogus map"); 26665686Shibler #endif 26765686Shibler if (map->entries_pageable) { 26865686Shibler MALLOC(entry, vm_map_entry_t, sizeof(struct vm_map_entry), 26965686Shibler M_VMMAPENT, M_WAITOK); 27065686Shibler } else { 27145748Smckusick if (entry = kentry_free) 27245748Smckusick kentry_free = kentry_free->next; 27365686Shibler } 27448383Skarels if (entry == NULL) 27545748Smckusick panic("vm_map_entry_create: out of map entries"); 27645748Smckusick 27745748Smckusick return(entry); 27845748Smckusick } 27945748Smckusick 28045748Smckusick /* 28145748Smckusick * vm_map_entry_dispose: [ internal use only ] 28245748Smckusick * 28345748Smckusick * Inverse of vm_map_entry_create. 28445748Smckusick */ 285*68162Scgd void 286*68162Scgd vm_map_entry_dispose(map, entry) 28745748Smckusick vm_map_t map; 28845748Smckusick vm_map_entry_t entry; 28945748Smckusick { 29065686Shibler #ifdef DEBUG 29158374Smckusick extern vm_map_t kernel_map, kmem_map, mb_map, pager_map; 29265686Shibler boolean_t isspecial; 29345748Smckusick 29465686Shibler isspecial = (map == kernel_map || map == kmem_map || 29565686Shibler map == mb_map || map == pager_map); 29665686Shibler if (isspecial && map->entries_pageable || 29765686Shibler !isspecial && !map->entries_pageable) 29865686Shibler panic("vm_map_entry_dispose: bogus map"); 29965686Shibler #endif 30065686Shibler if (map->entries_pageable) { 30165686Shibler FREE(entry, M_VMMAPENT); 30265686Shibler } else { 30345748Smckusick entry->next = kentry_free; 30445748Smckusick kentry_free = entry; 30565686Shibler } 30645748Smckusick } 30745748Smckusick 30845748Smckusick /* 30945748Smckusick * vm_map_entry_{un,}link: 31045748Smckusick * 31145748Smckusick * Insert/remove entries from maps. 31245748Smckusick */ 31345748Smckusick #define vm_map_entry_link(map, after_where, entry) \ 31445748Smckusick { \ 31545748Smckusick (map)->nentries++; \ 31645748Smckusick (entry)->prev = (after_where); \ 31745748Smckusick (entry)->next = (after_where)->next; \ 31845748Smckusick (entry)->prev->next = (entry); \ 31945748Smckusick (entry)->next->prev = (entry); \ 32045748Smckusick } 32145748Smckusick #define vm_map_entry_unlink(map, entry) \ 32245748Smckusick { \ 32345748Smckusick (map)->nentries--; \ 32445748Smckusick (entry)->next->prev = (entry)->prev; \ 32545748Smckusick (entry)->prev->next = (entry)->next; \ 32645748Smckusick } 32745748Smckusick 32845748Smckusick /* 32945748Smckusick * vm_map_reference: 33045748Smckusick * 33145748Smckusick * Creates another valid reference to the given map. 33245748Smckusick * 33345748Smckusick */ 334*68162Scgd void 335*68162Scgd vm_map_reference(map) 33645748Smckusick register vm_map_t map; 33745748Smckusick { 33848383Skarels if (map == NULL) 33945748Smckusick return; 34045748Smckusick 34145748Smckusick simple_lock(&map->ref_lock); 34245748Smckusick map->ref_count++; 34345748Smckusick simple_unlock(&map->ref_lock); 34445748Smckusick } 34545748Smckusick 34645748Smckusick /* 34745748Smckusick * vm_map_deallocate: 34845748Smckusick * 34945748Smckusick * Removes a reference from the specified map, 35045748Smckusick * destroying it if no references remain. 35145748Smckusick * The map should not be locked. 35245748Smckusick */ 353*68162Scgd void 354*68162Scgd vm_map_deallocate(map) 35545748Smckusick register vm_map_t map; 35645748Smckusick { 35745748Smckusick register int c; 35845748Smckusick 35948383Skarels if (map == NULL) 36045748Smckusick return; 36145748Smckusick 36245748Smckusick simple_lock(&map->ref_lock); 36345748Smckusick c = --map->ref_count; 36445748Smckusick simple_unlock(&map->ref_lock); 36545748Smckusick 36645748Smckusick if (c > 0) { 36745748Smckusick return; 36845748Smckusick } 36945748Smckusick 37045748Smckusick /* 37145748Smckusick * Lock the map, to wait out all other references 37245748Smckusick * to it. 37345748Smckusick */ 37445748Smckusick 37545748Smckusick vm_map_lock(map); 37645748Smckusick 37745748Smckusick (void) vm_map_delete(map, map->min_offset, map->max_offset); 37845748Smckusick 37945748Smckusick pmap_destroy(map->pmap); 38045748Smckusick 38145748Smckusick FREE(map, M_VMMAP); 38245748Smckusick } 38345748Smckusick 38445748Smckusick /* 38565686Shibler * vm_map_insert: 38645748Smckusick * 38745748Smckusick * Inserts the given whole VM object into the target 38845748Smckusick * map at the specified address range. The object's 38945748Smckusick * size should match that of the address range. 39045748Smckusick * 39145748Smckusick * Requires that the map be locked, and leaves it so. 39245748Smckusick */ 39353357Sbostic int 39445748Smckusick vm_map_insert(map, object, offset, start, end) 39545748Smckusick vm_map_t map; 39645748Smckusick vm_object_t object; 39745748Smckusick vm_offset_t offset; 39845748Smckusick vm_offset_t start; 39945748Smckusick vm_offset_t end; 40045748Smckusick { 40145748Smckusick register vm_map_entry_t new_entry; 40245748Smckusick register vm_map_entry_t prev_entry; 40345748Smckusick vm_map_entry_t temp_entry; 40445748Smckusick 40545748Smckusick /* 40645748Smckusick * Check that the start and end points are not bogus. 40745748Smckusick */ 40845748Smckusick 40945748Smckusick if ((start < map->min_offset) || (end > map->max_offset) || 41045748Smckusick (start >= end)) 41145748Smckusick return(KERN_INVALID_ADDRESS); 41245748Smckusick 41345748Smckusick /* 41445748Smckusick * Find the entry prior to the proposed 41545748Smckusick * starting address; if it's part of an 41645748Smckusick * existing entry, this range is bogus. 41745748Smckusick */ 41845748Smckusick 41945748Smckusick if (vm_map_lookup_entry(map, start, &temp_entry)) 42045748Smckusick return(KERN_NO_SPACE); 42145748Smckusick 42245748Smckusick prev_entry = temp_entry; 42345748Smckusick 42445748Smckusick /* 42545748Smckusick * Assert that the next entry doesn't overlap the 42645748Smckusick * end point. 42745748Smckusick */ 42845748Smckusick 42945748Smckusick if ((prev_entry->next != &map->header) && 43045748Smckusick (prev_entry->next->start < end)) 43145748Smckusick return(KERN_NO_SPACE); 43245748Smckusick 43345748Smckusick /* 43445748Smckusick * See if we can avoid creating a new entry by 43545748Smckusick * extending one of our neighbors. 43645748Smckusick */ 43745748Smckusick 43848383Skarels if (object == NULL) { 43945748Smckusick if ((prev_entry != &map->header) && 44045748Smckusick (prev_entry->end == start) && 44145748Smckusick (map->is_main_map) && 44245748Smckusick (prev_entry->is_a_map == FALSE) && 44345748Smckusick (prev_entry->is_sub_map == FALSE) && 44445748Smckusick (prev_entry->inheritance == VM_INHERIT_DEFAULT) && 44545748Smckusick (prev_entry->protection == VM_PROT_DEFAULT) && 44645748Smckusick (prev_entry->max_protection == VM_PROT_DEFAULT) && 44745748Smckusick (prev_entry->wired_count == 0)) { 44845748Smckusick 44945748Smckusick if (vm_object_coalesce(prev_entry->object.vm_object, 45048383Skarels NULL, 45145748Smckusick prev_entry->offset, 45245748Smckusick (vm_offset_t) 0, 45345748Smckusick (vm_size_t)(prev_entry->end 45445748Smckusick - prev_entry->start), 45545748Smckusick (vm_size_t)(end - prev_entry->end))) { 45645748Smckusick /* 45745748Smckusick * Coalesced the two objects - can extend 45845748Smckusick * the previous map entry to include the 45945748Smckusick * new range. 46045748Smckusick */ 46145748Smckusick map->size += (end - prev_entry->end); 46245748Smckusick prev_entry->end = end; 46345748Smckusick return(KERN_SUCCESS); 46445748Smckusick } 46545748Smckusick } 46645748Smckusick } 46745748Smckusick 46845748Smckusick /* 46945748Smckusick * Create a new entry 47045748Smckusick */ 47145748Smckusick 47245748Smckusick new_entry = vm_map_entry_create(map); 47345748Smckusick new_entry->start = start; 47445748Smckusick new_entry->end = end; 47545748Smckusick 47645748Smckusick new_entry->is_a_map = FALSE; 47745748Smckusick new_entry->is_sub_map = FALSE; 47845748Smckusick new_entry->object.vm_object = object; 47945748Smckusick new_entry->offset = offset; 48045748Smckusick 48145748Smckusick new_entry->copy_on_write = FALSE; 48245748Smckusick new_entry->needs_copy = FALSE; 48345748Smckusick 48445748Smckusick if (map->is_main_map) { 48545748Smckusick new_entry->inheritance = VM_INHERIT_DEFAULT; 48645748Smckusick new_entry->protection = VM_PROT_DEFAULT; 48745748Smckusick new_entry->max_protection = VM_PROT_DEFAULT; 48845748Smckusick new_entry->wired_count = 0; 48945748Smckusick } 49045748Smckusick 49145748Smckusick /* 49245748Smckusick * Insert the new entry into the list 49345748Smckusick */ 49445748Smckusick 49545748Smckusick vm_map_entry_link(map, prev_entry, new_entry); 49645748Smckusick map->size += new_entry->end - new_entry->start; 49745748Smckusick 49845748Smckusick /* 49945748Smckusick * Update the free space hint 50045748Smckusick */ 50145748Smckusick 50245748Smckusick if ((map->first_free == prev_entry) && (prev_entry->end >= new_entry->start)) 50345748Smckusick map->first_free = new_entry; 50445748Smckusick 50545748Smckusick return(KERN_SUCCESS); 50645748Smckusick } 50745748Smckusick 50845748Smckusick /* 50945748Smckusick * SAVE_HINT: 51045748Smckusick * 51145748Smckusick * Saves the specified entry as the hint for 51245748Smckusick * future lookups. Performs necessary interlocks. 51345748Smckusick */ 51445748Smckusick #define SAVE_HINT(map,value) \ 51545748Smckusick simple_lock(&(map)->hint_lock); \ 51645748Smckusick (map)->hint = (value); \ 51745748Smckusick simple_unlock(&(map)->hint_lock); 51845748Smckusick 51945748Smckusick /* 52045748Smckusick * vm_map_lookup_entry: [ internal use only ] 52145748Smckusick * 52245748Smckusick * Finds the map entry containing (or 52345748Smckusick * immediately preceding) the specified address 52445748Smckusick * in the given map; the entry is returned 52545748Smckusick * in the "entry" parameter. The boolean 52645748Smckusick * result indicates whether the address is 52745748Smckusick * actually contained in the map. 52845748Smckusick */ 529*68162Scgd boolean_t 530*68162Scgd vm_map_lookup_entry(map, address, entry) 53145748Smckusick register vm_map_t map; 53245748Smckusick register vm_offset_t address; 53345748Smckusick vm_map_entry_t *entry; /* OUT */ 53445748Smckusick { 53545748Smckusick register vm_map_entry_t cur; 53645748Smckusick register vm_map_entry_t last; 53745748Smckusick 53845748Smckusick /* 53945748Smckusick * Start looking either from the head of the 54045748Smckusick * list, or from the hint. 54145748Smckusick */ 54245748Smckusick 54345748Smckusick simple_lock(&map->hint_lock); 54445748Smckusick cur = map->hint; 54545748Smckusick simple_unlock(&map->hint_lock); 54645748Smckusick 54745748Smckusick if (cur == &map->header) 54845748Smckusick cur = cur->next; 54945748Smckusick 55045748Smckusick if (address >= cur->start) { 55145748Smckusick /* 55245748Smckusick * Go from hint to end of list. 55345748Smckusick * 55445748Smckusick * But first, make a quick check to see if 55545748Smckusick * we are already looking at the entry we 55645748Smckusick * want (which is usually the case). 55745748Smckusick * Note also that we don't need to save the hint 55845748Smckusick * here... it is the same hint (unless we are 55945748Smckusick * at the header, in which case the hint didn't 56045748Smckusick * buy us anything anyway). 56145748Smckusick */ 56245748Smckusick last = &map->header; 56345748Smckusick if ((cur != last) && (cur->end > address)) { 56445748Smckusick *entry = cur; 56545748Smckusick return(TRUE); 56645748Smckusick } 56745748Smckusick } 56845748Smckusick else { 56945748Smckusick /* 57045748Smckusick * Go from start to hint, *inclusively* 57145748Smckusick */ 57245748Smckusick last = cur->next; 57345748Smckusick cur = map->header.next; 57445748Smckusick } 57545748Smckusick 57645748Smckusick /* 57745748Smckusick * Search linearly 57845748Smckusick */ 57945748Smckusick 58045748Smckusick while (cur != last) { 58145748Smckusick if (cur->end > address) { 58245748Smckusick if (address >= cur->start) { 58345748Smckusick /* 58445748Smckusick * Save this lookup for future 58545748Smckusick * hints, and return 58645748Smckusick */ 58745748Smckusick 58845748Smckusick *entry = cur; 58945748Smckusick SAVE_HINT(map, cur); 59045748Smckusick return(TRUE); 59145748Smckusick } 59245748Smckusick break; 59345748Smckusick } 59445748Smckusick cur = cur->next; 59545748Smckusick } 59645748Smckusick *entry = cur->prev; 59745748Smckusick SAVE_HINT(map, *entry); 59845748Smckusick return(FALSE); 59945748Smckusick } 60045748Smckusick 60145748Smckusick /* 60252610Storek * Find sufficient space for `length' bytes in the given map, starting at 60352610Storek * `start'. The map must be locked. Returns 0 on success, 1 on no space. 60452610Storek */ 60552610Storek int 60652610Storek vm_map_findspace(map, start, length, addr) 60752610Storek register vm_map_t map; 60852610Storek register vm_offset_t start; 60952610Storek vm_size_t length; 61052610Storek vm_offset_t *addr; 61152610Storek { 61252610Storek register vm_map_entry_t entry, next; 61352610Storek register vm_offset_t end; 61452610Storek 61552610Storek if (start < map->min_offset) 61652610Storek start = map->min_offset; 61752610Storek if (start > map->max_offset) 61852610Storek return (1); 61952610Storek 62052610Storek /* 62152610Storek * Look for the first possible address; if there's already 62252610Storek * something at this address, we have to start after it. 62352610Storek */ 62452610Storek if (start == map->min_offset) { 62552610Storek if ((entry = map->first_free) != &map->header) 62652610Storek start = entry->end; 62752610Storek } else { 62852610Storek vm_map_entry_t tmp; 62952610Storek if (vm_map_lookup_entry(map, start, &tmp)) 63052610Storek start = tmp->end; 63152610Storek entry = tmp; 63252610Storek } 63352610Storek 63452610Storek /* 63552610Storek * Look through the rest of the map, trying to fit a new region in 63652610Storek * the gap between existing regions, or after the very last region. 63752610Storek */ 63852610Storek for (;; start = (entry = next)->end) { 63952610Storek /* 64052610Storek * Find the end of the proposed new region. Be sure we didn't 64152610Storek * go beyond the end of the map, or wrap around the address; 64252610Storek * if so, we lose. Otherwise, if this is the last entry, or 64352610Storek * if the proposed new region fits before the next entry, we 64452610Storek * win. 64552610Storek */ 64652610Storek end = start + length; 64752610Storek if (end > map->max_offset || end < start) 64852610Storek return (1); 64952610Storek next = entry->next; 65052610Storek if (next == &map->header || next->start >= end) 65152610Storek break; 65252610Storek } 65352610Storek SAVE_HINT(map, entry); 65452610Storek *addr = start; 65552610Storek return (0); 65652610Storek } 65752610Storek 65852610Storek /* 65945748Smckusick * vm_map_find finds an unallocated region in the target address 66045748Smckusick * map with the given length. The search is defined to be 66145748Smckusick * first-fit from the specified address; the region found is 66245748Smckusick * returned in the same parameter. 66345748Smckusick * 66445748Smckusick */ 66553357Sbostic int 66645748Smckusick vm_map_find(map, object, offset, addr, length, find_space) 66745748Smckusick vm_map_t map; 66845748Smckusick vm_object_t object; 66945748Smckusick vm_offset_t offset; 67045748Smckusick vm_offset_t *addr; /* IN/OUT */ 67145748Smckusick vm_size_t length; 67245748Smckusick boolean_t find_space; 67345748Smckusick { 67445748Smckusick register vm_offset_t start; 67545748Smckusick int result; 67645748Smckusick 67745748Smckusick start = *addr; 67845748Smckusick vm_map_lock(map); 67945748Smckusick if (find_space) { 68052610Storek if (vm_map_findspace(map, start, length, addr)) { 68145748Smckusick vm_map_unlock(map); 68245748Smckusick return (KERN_NO_SPACE); 68345748Smckusick } 68452610Storek start = *addr; 68545748Smckusick } 68645748Smckusick result = vm_map_insert(map, object, offset, start, start + length); 68745748Smckusick vm_map_unlock(map); 68852610Storek return (result); 68945748Smckusick } 69045748Smckusick 69145748Smckusick /* 69245748Smckusick * vm_map_simplify_entry: [ internal use only ] 69345748Smckusick * 69445748Smckusick * Simplify the given map entry by: 69545748Smckusick * removing extra sharing maps 69645748Smckusick * [XXX maybe later] merging with a neighbor 69745748Smckusick */ 698*68162Scgd void 699*68162Scgd vm_map_simplify_entry(map, entry) 70045748Smckusick vm_map_t map; 70145748Smckusick vm_map_entry_t entry; 70245748Smckusick { 70345748Smckusick #ifdef lint 70445748Smckusick map++; 70560345Storek #endif 70645748Smckusick 70745748Smckusick /* 70845748Smckusick * If this entry corresponds to a sharing map, then 70945748Smckusick * see if we can remove the level of indirection. 71045748Smckusick * If it's not a sharing map, then it points to 71145748Smckusick * a VM object, so see if we can merge with either 71245748Smckusick * of our neighbors. 71345748Smckusick */ 71445748Smckusick 71545748Smckusick if (entry->is_sub_map) 71645748Smckusick return; 71745748Smckusick if (entry->is_a_map) { 71845748Smckusick #if 0 71945748Smckusick vm_map_t my_share_map; 72045748Smckusick int count; 72145748Smckusick 72245748Smckusick my_share_map = entry->object.share_map; 72345748Smckusick simple_lock(&my_share_map->ref_lock); 72445748Smckusick count = my_share_map->ref_count; 72545748Smckusick simple_unlock(&my_share_map->ref_lock); 72645748Smckusick 72745748Smckusick if (count == 1) { 72845748Smckusick /* Can move the region from 72945748Smckusick * entry->start to entry->end (+ entry->offset) 73045748Smckusick * in my_share_map into place of entry. 73145748Smckusick * Later. 73245748Smckusick */ 73345748Smckusick } 73460345Storek #endif 73545748Smckusick } 73645748Smckusick else { 73745748Smckusick /* 73845748Smckusick * Try to merge with our neighbors. 73945748Smckusick * 74045748Smckusick * Conditions for merge are: 74145748Smckusick * 74245748Smckusick * 1. entries are adjacent. 74345748Smckusick * 2. both entries point to objects 74445748Smckusick * with null pagers. 74545748Smckusick * 74645748Smckusick * If a merge is possible, we replace the two 74745748Smckusick * entries with a single entry, then merge 74845748Smckusick * the two objects into a single object. 74945748Smckusick * 75045748Smckusick * Now, all that is left to do is write the 75145748Smckusick * code! 75245748Smckusick */ 75345748Smckusick } 75445748Smckusick } 75545748Smckusick 75645748Smckusick /* 75745748Smckusick * vm_map_clip_start: [ internal use only ] 75845748Smckusick * 75945748Smckusick * Asserts that the given entry begins at or after 76045748Smckusick * the specified address; if necessary, 76145748Smckusick * it splits the entry into two. 76245748Smckusick */ 76345748Smckusick #define vm_map_clip_start(map, entry, startaddr) \ 76445748Smckusick { \ 76545748Smckusick if (startaddr > entry->start) \ 76645748Smckusick _vm_map_clip_start(map, entry, startaddr); \ 76745748Smckusick } 76845748Smckusick 76945748Smckusick /* 77045748Smckusick * This routine is called only when it is known that 77145748Smckusick * the entry must be split. 77245748Smckusick */ 773*68162Scgd static void 774*68162Scgd _vm_map_clip_start(map, entry, start) 77545748Smckusick register vm_map_t map; 77645748Smckusick register vm_map_entry_t entry; 77745748Smckusick register vm_offset_t start; 77845748Smckusick { 77945748Smckusick register vm_map_entry_t new_entry; 78045748Smckusick 78145748Smckusick /* 78245748Smckusick * See if we can simplify this entry first 78345748Smckusick */ 78445748Smckusick 78545748Smckusick vm_map_simplify_entry(map, entry); 78645748Smckusick 78745748Smckusick /* 78845748Smckusick * Split off the front portion -- 78945748Smckusick * note that we must insert the new 79045748Smckusick * entry BEFORE this one, so that 79145748Smckusick * this entry has the specified starting 79245748Smckusick * address. 79345748Smckusick */ 79445748Smckusick 79545748Smckusick new_entry = vm_map_entry_create(map); 79645748Smckusick *new_entry = *entry; 79745748Smckusick 79845748Smckusick new_entry->end = start; 79945748Smckusick entry->offset += (start - entry->start); 80045748Smckusick entry->start = start; 80145748Smckusick 80245748Smckusick vm_map_entry_link(map, entry->prev, new_entry); 80345748Smckusick 80445748Smckusick if (entry->is_a_map || entry->is_sub_map) 80545748Smckusick vm_map_reference(new_entry->object.share_map); 80645748Smckusick else 80745748Smckusick vm_object_reference(new_entry->object.vm_object); 80845748Smckusick } 80945748Smckusick 81045748Smckusick /* 81145748Smckusick * vm_map_clip_end: [ internal use only ] 81245748Smckusick * 81345748Smckusick * Asserts that the given entry ends at or before 81445748Smckusick * the specified address; if necessary, 81545748Smckusick * it splits the entry into two. 81645748Smckusick */ 81745748Smckusick 81845748Smckusick #define vm_map_clip_end(map, entry, endaddr) \ 81945748Smckusick { \ 82045748Smckusick if (endaddr < entry->end) \ 82145748Smckusick _vm_map_clip_end(map, entry, endaddr); \ 82245748Smckusick } 82345748Smckusick 82445748Smckusick /* 82545748Smckusick * This routine is called only when it is known that 82645748Smckusick * the entry must be split. 82745748Smckusick */ 828*68162Scgd static void 829*68162Scgd _vm_map_clip_end(map, entry, end) 83045748Smckusick register vm_map_t map; 83145748Smckusick register vm_map_entry_t entry; 83245748Smckusick register vm_offset_t end; 83345748Smckusick { 83445748Smckusick register vm_map_entry_t new_entry; 83545748Smckusick 83645748Smckusick /* 83745748Smckusick * Create a new entry and insert it 83845748Smckusick * AFTER the specified entry 83945748Smckusick */ 84045748Smckusick 84145748Smckusick new_entry = vm_map_entry_create(map); 84245748Smckusick *new_entry = *entry; 84345748Smckusick 84445748Smckusick new_entry->start = entry->end = end; 84545748Smckusick new_entry->offset += (end - entry->start); 84645748Smckusick 84745748Smckusick vm_map_entry_link(map, entry, new_entry); 84845748Smckusick 84945748Smckusick if (entry->is_a_map || entry->is_sub_map) 85045748Smckusick vm_map_reference(new_entry->object.share_map); 85145748Smckusick else 85245748Smckusick vm_object_reference(new_entry->object.vm_object); 85345748Smckusick } 85445748Smckusick 85545748Smckusick /* 85645748Smckusick * VM_MAP_RANGE_CHECK: [ internal use only ] 85745748Smckusick * 85845748Smckusick * Asserts that the starting and ending region 85945748Smckusick * addresses fall within the valid range of the map. 86045748Smckusick */ 86145748Smckusick #define VM_MAP_RANGE_CHECK(map, start, end) \ 86245748Smckusick { \ 86345748Smckusick if (start < vm_map_min(map)) \ 86445748Smckusick start = vm_map_min(map); \ 86545748Smckusick if (end > vm_map_max(map)) \ 86645748Smckusick end = vm_map_max(map); \ 86745748Smckusick if (start > end) \ 86845748Smckusick start = end; \ 86945748Smckusick } 87045748Smckusick 87145748Smckusick /* 87245748Smckusick * vm_map_submap: [ kernel use only ] 87345748Smckusick * 87445748Smckusick * Mark the given range as handled by a subordinate map. 87545748Smckusick * 87645748Smckusick * This range must have been created with vm_map_find, 87745748Smckusick * and no other operations may have been performed on this 87845748Smckusick * range prior to calling vm_map_submap. 87945748Smckusick * 88045748Smckusick * Only a limited number of operations can be performed 88145748Smckusick * within this rage after calling vm_map_submap: 88245748Smckusick * vm_fault 88345748Smckusick * [Don't try vm_map_copy!] 88445748Smckusick * 88545748Smckusick * To remove a submapping, one must first remove the 88645748Smckusick * range from the superior map, and then destroy the 88745748Smckusick * submap (if desired). [Better yet, don't try it.] 88845748Smckusick */ 88953357Sbostic int 89045748Smckusick vm_map_submap(map, start, end, submap) 89145748Smckusick register vm_map_t map; 89245748Smckusick register vm_offset_t start; 89345748Smckusick register vm_offset_t end; 89445748Smckusick vm_map_t submap; 89545748Smckusick { 89645748Smckusick vm_map_entry_t entry; 89745748Smckusick register int result = KERN_INVALID_ARGUMENT; 89845748Smckusick 89945748Smckusick vm_map_lock(map); 90045748Smckusick 90145748Smckusick VM_MAP_RANGE_CHECK(map, start, end); 90245748Smckusick 90345748Smckusick if (vm_map_lookup_entry(map, start, &entry)) { 90445748Smckusick vm_map_clip_start(map, entry, start); 90545748Smckusick } 90645748Smckusick else 90745748Smckusick entry = entry->next; 90845748Smckusick 90945748Smckusick vm_map_clip_end(map, entry, end); 91045748Smckusick 91145748Smckusick if ((entry->start == start) && (entry->end == end) && 91245748Smckusick (!entry->is_a_map) && 91348383Skarels (entry->object.vm_object == NULL) && 91445748Smckusick (!entry->copy_on_write)) { 91545748Smckusick entry->is_a_map = FALSE; 91645748Smckusick entry->is_sub_map = TRUE; 91745748Smckusick vm_map_reference(entry->object.sub_map = submap); 91845748Smckusick result = KERN_SUCCESS; 91945748Smckusick } 92045748Smckusick vm_map_unlock(map); 92145748Smckusick 92245748Smckusick return(result); 92345748Smckusick } 92445748Smckusick 92545748Smckusick /* 92645748Smckusick * vm_map_protect: 92745748Smckusick * 92845748Smckusick * Sets the protection of the specified address 92945748Smckusick * region in the target map. If "set_max" is 93045748Smckusick * specified, the maximum protection is to be set; 93145748Smckusick * otherwise, only the current protection is affected. 93245748Smckusick */ 93353357Sbostic int 93445748Smckusick vm_map_protect(map, start, end, new_prot, set_max) 93545748Smckusick register vm_map_t map; 93645748Smckusick register vm_offset_t start; 93745748Smckusick register vm_offset_t end; 93845748Smckusick register vm_prot_t new_prot; 93945748Smckusick register boolean_t set_max; 94045748Smckusick { 94145748Smckusick register vm_map_entry_t current; 94245748Smckusick vm_map_entry_t entry; 94345748Smckusick 94445748Smckusick vm_map_lock(map); 94545748Smckusick 94645748Smckusick VM_MAP_RANGE_CHECK(map, start, end); 94745748Smckusick 94845748Smckusick if (vm_map_lookup_entry(map, start, &entry)) { 94945748Smckusick vm_map_clip_start(map, entry, start); 95045748Smckusick } 95145748Smckusick else 95245748Smckusick entry = entry->next; 95345748Smckusick 95445748Smckusick /* 95545748Smckusick * Make a first pass to check for protection 95645748Smckusick * violations. 95745748Smckusick */ 95845748Smckusick 95945748Smckusick current = entry; 96045748Smckusick while ((current != &map->header) && (current->start < end)) { 96145748Smckusick if (current->is_sub_map) 96245748Smckusick return(KERN_INVALID_ARGUMENT); 96345748Smckusick if ((new_prot & current->max_protection) != new_prot) { 96445748Smckusick vm_map_unlock(map); 96545748Smckusick return(KERN_PROTECTION_FAILURE); 96645748Smckusick } 96745748Smckusick 96845748Smckusick current = current->next; 96945748Smckusick } 97045748Smckusick 97145748Smckusick /* 97245748Smckusick * Go back and fix up protections. 97345748Smckusick * [Note that clipping is not necessary the second time.] 97445748Smckusick */ 97545748Smckusick 97645748Smckusick current = entry; 97745748Smckusick 97845748Smckusick while ((current != &map->header) && (current->start < end)) { 97945748Smckusick vm_prot_t old_prot; 98045748Smckusick 98145748Smckusick vm_map_clip_end(map, current, end); 98245748Smckusick 98345748Smckusick old_prot = current->protection; 98445748Smckusick if (set_max) 98545748Smckusick current->protection = 98645748Smckusick (current->max_protection = new_prot) & 98745748Smckusick old_prot; 98845748Smckusick else 98945748Smckusick current->protection = new_prot; 99045748Smckusick 99145748Smckusick /* 99245748Smckusick * Update physical map if necessary. 99345748Smckusick * Worry about copy-on-write here -- CHECK THIS XXX 99445748Smckusick */ 99545748Smckusick 99645748Smckusick if (current->protection != old_prot) { 99745748Smckusick 99845748Smckusick #define MASK(entry) ((entry)->copy_on_write ? ~VM_PROT_WRITE : \ 99945748Smckusick VM_PROT_ALL) 100045748Smckusick #define max(a,b) ((a) > (b) ? (a) : (b)) 100145748Smckusick 100245748Smckusick if (current->is_a_map) { 100345748Smckusick vm_map_entry_t share_entry; 100445748Smckusick vm_offset_t share_end; 100545748Smckusick 100645748Smckusick vm_map_lock(current->object.share_map); 100745748Smckusick (void) vm_map_lookup_entry( 100845748Smckusick current->object.share_map, 100945748Smckusick current->offset, 101045748Smckusick &share_entry); 101145748Smckusick share_end = current->offset + 101245748Smckusick (current->end - current->start); 101345748Smckusick while ((share_entry != 101445748Smckusick ¤t->object.share_map->header) && 101545748Smckusick (share_entry->start < share_end)) { 101645748Smckusick 101745748Smckusick pmap_protect(map->pmap, 101845748Smckusick (max(share_entry->start, 101945748Smckusick current->offset) - 102045748Smckusick current->offset + 102145748Smckusick current->start), 102245748Smckusick min(share_entry->end, 102345748Smckusick share_end) - 102445748Smckusick current->offset + 102545748Smckusick current->start, 102645748Smckusick current->protection & 102745748Smckusick MASK(share_entry)); 102845748Smckusick 102945748Smckusick share_entry = share_entry->next; 103045748Smckusick } 103145748Smckusick vm_map_unlock(current->object.share_map); 103245748Smckusick } 103345748Smckusick else 103445748Smckusick pmap_protect(map->pmap, current->start, 103545748Smckusick current->end, 103645748Smckusick current->protection & MASK(entry)); 103745748Smckusick #undef max 103845748Smckusick #undef MASK 103945748Smckusick } 104045748Smckusick current = current->next; 104145748Smckusick } 104245748Smckusick 104345748Smckusick vm_map_unlock(map); 104445748Smckusick return(KERN_SUCCESS); 104545748Smckusick } 104645748Smckusick 104745748Smckusick /* 104845748Smckusick * vm_map_inherit: 104945748Smckusick * 105045748Smckusick * Sets the inheritance of the specified address 105145748Smckusick * range in the target map. Inheritance 105245748Smckusick * affects how the map will be shared with 105345748Smckusick * child maps at the time of vm_map_fork. 105445748Smckusick */ 105553357Sbostic int 105645748Smckusick vm_map_inherit(map, start, end, new_inheritance) 105745748Smckusick register vm_map_t map; 105845748Smckusick register vm_offset_t start; 105945748Smckusick register vm_offset_t end; 106045748Smckusick register vm_inherit_t new_inheritance; 106145748Smckusick { 106245748Smckusick register vm_map_entry_t entry; 106345748Smckusick vm_map_entry_t temp_entry; 106445748Smckusick 106545748Smckusick switch (new_inheritance) { 106645748Smckusick case VM_INHERIT_NONE: 106745748Smckusick case VM_INHERIT_COPY: 106845748Smckusick case VM_INHERIT_SHARE: 106945748Smckusick break; 107045748Smckusick default: 107145748Smckusick return(KERN_INVALID_ARGUMENT); 107245748Smckusick } 107345748Smckusick 107445748Smckusick vm_map_lock(map); 107545748Smckusick 107645748Smckusick VM_MAP_RANGE_CHECK(map, start, end); 107745748Smckusick 107845748Smckusick if (vm_map_lookup_entry(map, start, &temp_entry)) { 107945748Smckusick entry = temp_entry; 108045748Smckusick vm_map_clip_start(map, entry, start); 108145748Smckusick } 108245748Smckusick else 108345748Smckusick entry = temp_entry->next; 108445748Smckusick 108545748Smckusick while ((entry != &map->header) && (entry->start < end)) { 108645748Smckusick vm_map_clip_end(map, entry, end); 108745748Smckusick 108845748Smckusick entry->inheritance = new_inheritance; 108945748Smckusick 109045748Smckusick entry = entry->next; 109145748Smckusick } 109245748Smckusick 109345748Smckusick vm_map_unlock(map); 109445748Smckusick return(KERN_SUCCESS); 109545748Smckusick } 109645748Smckusick 109745748Smckusick /* 109845748Smckusick * vm_map_pageable: 109945748Smckusick * 110045748Smckusick * Sets the pageability of the specified address 110145748Smckusick * range in the target map. Regions specified 110245748Smckusick * as not pageable require locked-down physical 110345748Smckusick * memory and physical page maps. 110445748Smckusick * 110545748Smckusick * The map must not be locked, but a reference 110645748Smckusick * must remain to the map throughout the call. 110745748Smckusick */ 110853357Sbostic int 110945748Smckusick vm_map_pageable(map, start, end, new_pageable) 111045748Smckusick register vm_map_t map; 111145748Smckusick register vm_offset_t start; 111245748Smckusick register vm_offset_t end; 111345748Smckusick register boolean_t new_pageable; 111445748Smckusick { 111545748Smckusick register vm_map_entry_t entry; 111661005Shibler vm_map_entry_t start_entry; 111758596Shibler register vm_offset_t failed; 111858596Shibler int rv; 111945748Smckusick 112045748Smckusick vm_map_lock(map); 112145748Smckusick 112245748Smckusick VM_MAP_RANGE_CHECK(map, start, end); 112345748Smckusick 112445748Smckusick /* 112545748Smckusick * Only one pageability change may take place at one 112645748Smckusick * time, since vm_fault assumes it will be called 112745748Smckusick * only once for each wiring/unwiring. Therefore, we 112845748Smckusick * have to make sure we're actually changing the pageability 112945748Smckusick * for the entire region. We do so before making any changes. 113045748Smckusick */ 113145748Smckusick 113261005Shibler if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) { 113361005Shibler vm_map_unlock(map); 113461005Shibler return(KERN_INVALID_ADDRESS); 113545748Smckusick } 113661005Shibler entry = start_entry; 113745748Smckusick 113845748Smckusick /* 113945748Smckusick * Actions are rather different for wiring and unwiring, 114045748Smckusick * so we have two separate cases. 114145748Smckusick */ 114245748Smckusick 114345748Smckusick if (new_pageable) { 114445748Smckusick 114561005Shibler vm_map_clip_start(map, entry, start); 114661005Shibler 114745748Smckusick /* 114845748Smckusick * Unwiring. First ensure that the range to be 114961005Shibler * unwired is really wired down and that there 115061005Shibler * are no holes. 115145748Smckusick */ 115245748Smckusick while ((entry != &map->header) && (entry->start < end)) { 115345748Smckusick 115461005Shibler if (entry->wired_count == 0 || 115561005Shibler (entry->end < end && 115661005Shibler (entry->next == &map->header || 115761005Shibler entry->next->start > entry->end))) { 115845748Smckusick vm_map_unlock(map); 115945748Smckusick return(KERN_INVALID_ARGUMENT); 116045748Smckusick } 116145748Smckusick entry = entry->next; 116245748Smckusick } 116345748Smckusick 116445748Smckusick /* 116545748Smckusick * Now decrement the wiring count for each region. 116645748Smckusick * If a region becomes completely unwired, 116745748Smckusick * unwire its physical pages and mappings. 116845748Smckusick */ 116945748Smckusick lock_set_recursive(&map->lock); 117045748Smckusick 117161005Shibler entry = start_entry; 117245748Smckusick while ((entry != &map->header) && (entry->start < end)) { 117345748Smckusick vm_map_clip_end(map, entry, end); 117445748Smckusick 117545748Smckusick entry->wired_count--; 117645748Smckusick if (entry->wired_count == 0) 117745748Smckusick vm_fault_unwire(map, entry->start, entry->end); 117845748Smckusick 117945748Smckusick entry = entry->next; 118045748Smckusick } 118145748Smckusick lock_clear_recursive(&map->lock); 118245748Smckusick } 118345748Smckusick 118445748Smckusick else { 118545748Smckusick /* 118645748Smckusick * Wiring. We must do this in two passes: 118745748Smckusick * 118861005Shibler * 1. Holding the write lock, we create any shadow 118961005Shibler * or zero-fill objects that need to be created. 119061005Shibler * Then we clip each map entry to the region to be 119161005Shibler * wired and increment its wiring count. We 119261005Shibler * create objects before clipping the map entries 119361005Shibler * to avoid object proliferation. 119445748Smckusick * 119545748Smckusick * 2. We downgrade to a read lock, and call 119645748Smckusick * vm_fault_wire to fault in the pages for any 119745748Smckusick * newly wired area (wired_count is 1). 119845748Smckusick * 119945748Smckusick * Downgrading to a read lock for vm_fault_wire avoids 120045748Smckusick * a possible deadlock with another thread that may have 120145748Smckusick * faulted on one of the pages to be wired (it would mark 120245748Smckusick * the page busy, blocking us, then in turn block on the 120345748Smckusick * map lock that we hold). Because of problems in the 120445748Smckusick * recursive lock package, we cannot upgrade to a write 120545748Smckusick * lock in vm_map_lookup. Thus, any actions that require 120645748Smckusick * the write lock must be done beforehand. Because we 120745748Smckusick * keep the read lock on the map, the copy-on-write status 120845748Smckusick * of the entries we modify here cannot change. 120945748Smckusick */ 121045748Smckusick 121145748Smckusick /* 121245748Smckusick * Pass 1. 121345748Smckusick */ 121445748Smckusick while ((entry != &map->header) && (entry->start < end)) { 121561005Shibler if (entry->wired_count == 0) { 121645748Smckusick 121745748Smckusick /* 121845748Smckusick * Perform actions of vm_map_lookup that need 121945748Smckusick * the write lock on the map: create a shadow 122045748Smckusick * object for a copy-on-write region, or an 122145748Smckusick * object for a zero-fill region. 122245748Smckusick * 122345748Smckusick * We don't have to do this for entries that 122445748Smckusick * point to sharing maps, because we won't hold 122545748Smckusick * the lock on the sharing map. 122645748Smckusick */ 122745748Smckusick if (!entry->is_a_map) { 122845748Smckusick if (entry->needs_copy && 122945748Smckusick ((entry->protection & VM_PROT_WRITE) != 0)) { 123045748Smckusick 123145748Smckusick vm_object_shadow(&entry->object.vm_object, 123245748Smckusick &entry->offset, 123345748Smckusick (vm_size_t)(entry->end 123445748Smckusick - entry->start)); 123545748Smckusick entry->needs_copy = FALSE; 123645748Smckusick } 123748383Skarels else if (entry->object.vm_object == NULL) { 123845748Smckusick entry->object.vm_object = 123945748Smckusick vm_object_allocate((vm_size_t)(entry->end 124045748Smckusick - entry->start)); 124145748Smckusick entry->offset = (vm_offset_t)0; 124245748Smckusick } 124345748Smckusick } 124445748Smckusick } 124561005Shibler vm_map_clip_start(map, entry, start); 124661005Shibler vm_map_clip_end(map, entry, end); 124761005Shibler entry->wired_count++; 124845748Smckusick 124961005Shibler /* 125061005Shibler * Check for holes 125161005Shibler */ 125261005Shibler if (entry->end < end && 125361005Shibler (entry->next == &map->header || 125461005Shibler entry->next->start > entry->end)) { 125561005Shibler /* 125661005Shibler * Found one. Object creation actions 125761005Shibler * do not need to be undone, but the 125861005Shibler * wired counts need to be restored. 125961005Shibler */ 126061005Shibler while (entry != &map->header && entry->end > start) { 126161005Shibler entry->wired_count--; 126261005Shibler entry = entry->prev; 126361005Shibler } 126461005Shibler vm_map_unlock(map); 126561005Shibler return(KERN_INVALID_ARGUMENT); 126661005Shibler } 126745748Smckusick entry = entry->next; 126845748Smckusick } 126945748Smckusick 127045748Smckusick /* 127145748Smckusick * Pass 2. 127245748Smckusick */ 127345748Smckusick 127445748Smckusick /* 127545748Smckusick * HACK HACK HACK HACK 127645748Smckusick * 127745748Smckusick * If we are wiring in the kernel map or a submap of it, 127845748Smckusick * unlock the map to avoid deadlocks. We trust that the 127945748Smckusick * kernel threads are well-behaved, and therefore will 128045748Smckusick * not do anything destructive to this region of the map 128145748Smckusick * while we have it unlocked. We cannot trust user threads 128245748Smckusick * to do the same. 128345748Smckusick * 128445748Smckusick * HACK HACK HACK HACK 128545748Smckusick */ 128645748Smckusick if (vm_map_pmap(map) == kernel_pmap) { 128745748Smckusick vm_map_unlock(map); /* trust me ... */ 128845748Smckusick } 128945748Smckusick else { 129045748Smckusick lock_set_recursive(&map->lock); 129145748Smckusick lock_write_to_read(&map->lock); 129245748Smckusick } 129345748Smckusick 129458596Shibler rv = 0; 129561005Shibler entry = start_entry; 129645748Smckusick while (entry != &map->header && entry->start < end) { 129758596Shibler /* 129858596Shibler * If vm_fault_wire fails for any page we need to 129958596Shibler * undo what has been done. We decrement the wiring 130058596Shibler * count for those pages which have not yet been 130158596Shibler * wired (now) and unwire those that have (later). 130258596Shibler * 130358596Shibler * XXX this violates the locking protocol on the map, 130458596Shibler * needs to be fixed. 130558596Shibler */ 130658596Shibler if (rv) 130758596Shibler entry->wired_count--; 130858596Shibler else if (entry->wired_count == 1) { 130958596Shibler rv = vm_fault_wire(map, entry->start, entry->end); 131058596Shibler if (rv) { 131158596Shibler failed = entry->start; 131258596Shibler entry->wired_count--; 131358596Shibler } 131445748Smckusick } 131545748Smckusick entry = entry->next; 131645748Smckusick } 131745748Smckusick 131845748Smckusick if (vm_map_pmap(map) == kernel_pmap) { 131945748Smckusick vm_map_lock(map); 132045748Smckusick } 132145748Smckusick else { 132245748Smckusick lock_clear_recursive(&map->lock); 132345748Smckusick } 132458596Shibler if (rv) { 132558596Shibler vm_map_unlock(map); 132658596Shibler (void) vm_map_pageable(map, start, failed, TRUE); 132758596Shibler return(rv); 132858596Shibler } 132945748Smckusick } 133045748Smckusick 133145748Smckusick vm_map_unlock(map); 133245748Smckusick 133345748Smckusick return(KERN_SUCCESS); 133445748Smckusick } 133545748Smckusick 133645748Smckusick /* 133765686Shibler * vm_map_clean 133865686Shibler * 133965686Shibler * Push any dirty cached pages in the address range to their pager. 134065686Shibler * If syncio is TRUE, dirty pages are written synchronously. 134165686Shibler * If invalidate is TRUE, any cached pages are freed as well. 134265686Shibler * 134365686Shibler * Returns an error if any part of the specified range is not mapped. 134465686Shibler */ 134565686Shibler int 134665686Shibler vm_map_clean(map, start, end, syncio, invalidate) 134765686Shibler vm_map_t map; 134865686Shibler vm_offset_t start; 134965686Shibler vm_offset_t end; 135065686Shibler boolean_t syncio; 135165686Shibler boolean_t invalidate; 135265686Shibler { 135365686Shibler register vm_map_entry_t current; 135465686Shibler vm_map_entry_t entry; 135565686Shibler vm_size_t size; 135665686Shibler vm_object_t object; 135765686Shibler vm_offset_t offset; 135865686Shibler 135965686Shibler vm_map_lock_read(map); 136065686Shibler VM_MAP_RANGE_CHECK(map, start, end); 136165686Shibler if (!vm_map_lookup_entry(map, start, &entry)) { 136265686Shibler vm_map_unlock_read(map); 136365686Shibler return(KERN_INVALID_ADDRESS); 136465686Shibler } 136565686Shibler 136665686Shibler /* 136765686Shibler * Make a first pass to check for holes. 136865686Shibler */ 136965686Shibler for (current = entry; current->start < end; current = current->next) { 137065686Shibler if (current->is_sub_map) { 137165686Shibler vm_map_unlock_read(map); 137265686Shibler return(KERN_INVALID_ARGUMENT); 137365686Shibler } 137465686Shibler if (end > current->end && 137565686Shibler (current->next == &map->header || 137665686Shibler current->end != current->next->start)) { 137765686Shibler vm_map_unlock_read(map); 137865686Shibler return(KERN_INVALID_ADDRESS); 137965686Shibler } 138065686Shibler } 138165686Shibler 138265686Shibler /* 138365686Shibler * Make a second pass, cleaning/uncaching pages from the indicated 138465686Shibler * objects as we go. 138565686Shibler */ 138665686Shibler for (current = entry; current->start < end; current = current->next) { 138765686Shibler offset = current->offset + (start - current->start); 138865686Shibler size = (end <= current->end ? end : current->end) - start; 138965686Shibler if (current->is_a_map) { 139065686Shibler register vm_map_t smap; 139165686Shibler vm_map_entry_t tentry; 139265686Shibler vm_size_t tsize; 139365686Shibler 139465686Shibler smap = current->object.share_map; 139565686Shibler vm_map_lock_read(smap); 139665686Shibler (void) vm_map_lookup_entry(smap, offset, &tentry); 139765686Shibler tsize = tentry->end - offset; 139865686Shibler if (tsize < size) 139965686Shibler size = tsize; 140065686Shibler object = tentry->object.vm_object; 140165686Shibler offset = tentry->offset + (offset - tentry->start); 140265686Shibler vm_object_lock(object); 140365686Shibler vm_map_unlock_read(smap); 140465686Shibler } else { 140565686Shibler object = current->object.vm_object; 140665686Shibler vm_object_lock(object); 140765686Shibler } 140865686Shibler /* 140965686Shibler * Flush pages if writing is allowed. 141065686Shibler * XXX should we continue on an error? 141165686Shibler */ 141265686Shibler if ((current->protection & VM_PROT_WRITE) && 141365686Shibler !vm_object_page_clean(object, offset, offset+size, 141465686Shibler syncio, FALSE)) { 141565686Shibler vm_object_unlock(object); 141665686Shibler vm_map_unlock_read(map); 141765686Shibler return(KERN_FAILURE); 141865686Shibler } 141965686Shibler if (invalidate) 142065686Shibler vm_object_page_remove(object, offset, offset+size); 142165686Shibler vm_object_unlock(object); 142265686Shibler start += size; 142365686Shibler } 142465686Shibler 142565686Shibler vm_map_unlock_read(map); 142665686Shibler return(KERN_SUCCESS); 142765686Shibler } 142865686Shibler 142965686Shibler /* 143045748Smckusick * vm_map_entry_unwire: [ internal use only ] 143145748Smckusick * 143245748Smckusick * Make the region specified by this entry pageable. 143345748Smckusick * 143445748Smckusick * The map in question should be locked. 143545748Smckusick * [This is the reason for this routine's existence.] 143645748Smckusick */ 1437*68162Scgd void 1438*68162Scgd vm_map_entry_unwire(map, entry) 143945748Smckusick vm_map_t map; 144045748Smckusick register vm_map_entry_t entry; 144145748Smckusick { 144245748Smckusick vm_fault_unwire(map, entry->start, entry->end); 144345748Smckusick entry->wired_count = 0; 144445748Smckusick } 144545748Smckusick 144645748Smckusick /* 144745748Smckusick * vm_map_entry_delete: [ internal use only ] 144845748Smckusick * 144945748Smckusick * Deallocate the given entry from the target map. 145045748Smckusick */ 1451*68162Scgd void 1452*68162Scgd vm_map_entry_delete(map, entry) 145345748Smckusick register vm_map_t map; 145445748Smckusick register vm_map_entry_t entry; 145545748Smckusick { 145645748Smckusick if (entry->wired_count != 0) 145745748Smckusick vm_map_entry_unwire(map, entry); 145845748Smckusick 145945748Smckusick vm_map_entry_unlink(map, entry); 146045748Smckusick map->size -= entry->end - entry->start; 146145748Smckusick 146245748Smckusick if (entry->is_a_map || entry->is_sub_map) 146345748Smckusick vm_map_deallocate(entry->object.share_map); 146445748Smckusick else 146545748Smckusick vm_object_deallocate(entry->object.vm_object); 146645748Smckusick 146745748Smckusick vm_map_entry_dispose(map, entry); 146845748Smckusick } 146945748Smckusick 147045748Smckusick /* 147145748Smckusick * vm_map_delete: [ internal use only ] 147245748Smckusick * 147345748Smckusick * Deallocates the given address range from the target 147445748Smckusick * map. 147545748Smckusick * 147645748Smckusick * When called with a sharing map, removes pages from 147745748Smckusick * that region from all physical maps. 147845748Smckusick */ 147953357Sbostic int 148045748Smckusick vm_map_delete(map, start, end) 148145748Smckusick register vm_map_t map; 148245748Smckusick vm_offset_t start; 148345748Smckusick register vm_offset_t end; 148445748Smckusick { 148545748Smckusick register vm_map_entry_t entry; 148645748Smckusick vm_map_entry_t first_entry; 148745748Smckusick 148845748Smckusick /* 148945748Smckusick * Find the start of the region, and clip it 149045748Smckusick */ 149145748Smckusick 149245748Smckusick if (!vm_map_lookup_entry(map, start, &first_entry)) 149345748Smckusick entry = first_entry->next; 149445748Smckusick else { 149545748Smckusick entry = first_entry; 149645748Smckusick vm_map_clip_start(map, entry, start); 149745748Smckusick 149845748Smckusick /* 149945748Smckusick * Fix the lookup hint now, rather than each 150045748Smckusick * time though the loop. 150145748Smckusick */ 150245748Smckusick 150345748Smckusick SAVE_HINT(map, entry->prev); 150445748Smckusick } 150545748Smckusick 150645748Smckusick /* 150745748Smckusick * Save the free space hint 150845748Smckusick */ 150945748Smckusick 151045748Smckusick if (map->first_free->start >= start) 151145748Smckusick map->first_free = entry->prev; 151245748Smckusick 151345748Smckusick /* 151445748Smckusick * Step through all entries in this region 151545748Smckusick */ 151645748Smckusick 151745748Smckusick while ((entry != &map->header) && (entry->start < end)) { 151845748Smckusick vm_map_entry_t next; 151945748Smckusick register vm_offset_t s, e; 152045748Smckusick register vm_object_t object; 152145748Smckusick 152245748Smckusick vm_map_clip_end(map, entry, end); 152345748Smckusick 152445748Smckusick next = entry->next; 152545748Smckusick s = entry->start; 152645748Smckusick e = entry->end; 152745748Smckusick 152845748Smckusick /* 152945748Smckusick * Unwire before removing addresses from the pmap; 153045748Smckusick * otherwise, unwiring will put the entries back in 153145748Smckusick * the pmap. 153245748Smckusick */ 153345748Smckusick 153445748Smckusick object = entry->object.vm_object; 153545748Smckusick if (entry->wired_count != 0) 153645748Smckusick vm_map_entry_unwire(map, entry); 153745748Smckusick 153845748Smckusick /* 153945748Smckusick * If this is a sharing map, we must remove 154045748Smckusick * *all* references to this data, since we can't 154145748Smckusick * find all of the physical maps which are sharing 154245748Smckusick * it. 154345748Smckusick */ 154445748Smckusick 154545748Smckusick if (object == kernel_object || object == kmem_object) 154645748Smckusick vm_object_page_remove(object, entry->offset, 154745748Smckusick entry->offset + (e - s)); 154845748Smckusick else if (!map->is_main_map) 154945748Smckusick vm_object_pmap_remove(object, 155045748Smckusick entry->offset, 155145748Smckusick entry->offset + (e - s)); 155245748Smckusick else 155345748Smckusick pmap_remove(map->pmap, s, e); 155445748Smckusick 155545748Smckusick /* 155645748Smckusick * Delete the entry (which may delete the object) 155745748Smckusick * only after removing all pmap entries pointing 155845748Smckusick * to its pages. (Otherwise, its page frames may 155945748Smckusick * be reallocated, and any modify bits will be 156045748Smckusick * set in the wrong object!) 156145748Smckusick */ 156245748Smckusick 156345748Smckusick vm_map_entry_delete(map, entry); 156445748Smckusick entry = next; 156545748Smckusick } 156645748Smckusick return(KERN_SUCCESS); 156745748Smckusick } 156845748Smckusick 156945748Smckusick /* 157045748Smckusick * vm_map_remove: 157145748Smckusick * 157245748Smckusick * Remove the given address range from the target map. 157345748Smckusick * This is the exported form of vm_map_delete. 157445748Smckusick */ 157553357Sbostic int 157645748Smckusick vm_map_remove(map, start, end) 157745748Smckusick register vm_map_t map; 157845748Smckusick register vm_offset_t start; 157945748Smckusick register vm_offset_t end; 158045748Smckusick { 158145748Smckusick register int result; 158245748Smckusick 158345748Smckusick vm_map_lock(map); 158445748Smckusick VM_MAP_RANGE_CHECK(map, start, end); 158545748Smckusick result = vm_map_delete(map, start, end); 158645748Smckusick vm_map_unlock(map); 158745748Smckusick 158845748Smckusick return(result); 158945748Smckusick } 159045748Smckusick 159145748Smckusick /* 159245748Smckusick * vm_map_check_protection: 159345748Smckusick * 159445748Smckusick * Assert that the target map allows the specified 159545748Smckusick * privilege on the entire address region given. 159645748Smckusick * The entire region must be allocated. 159745748Smckusick */ 1598*68162Scgd boolean_t 1599*68162Scgd vm_map_check_protection(map, start, end, protection) 160045748Smckusick register vm_map_t map; 160145748Smckusick register vm_offset_t start; 160245748Smckusick register vm_offset_t end; 160345748Smckusick register vm_prot_t protection; 160445748Smckusick { 160545748Smckusick register vm_map_entry_t entry; 160645748Smckusick vm_map_entry_t tmp_entry; 160745748Smckusick 160845748Smckusick if (!vm_map_lookup_entry(map, start, &tmp_entry)) { 160945748Smckusick return(FALSE); 161045748Smckusick } 161145748Smckusick 161245748Smckusick entry = tmp_entry; 161345748Smckusick 161445748Smckusick while (start < end) { 161545748Smckusick if (entry == &map->header) { 161645748Smckusick return(FALSE); 161745748Smckusick } 161845748Smckusick 161945748Smckusick /* 162045748Smckusick * No holes allowed! 162145748Smckusick */ 162245748Smckusick 162345748Smckusick if (start < entry->start) { 162445748Smckusick return(FALSE); 162545748Smckusick } 162645748Smckusick 162745748Smckusick /* 162845748Smckusick * Check protection associated with entry. 162945748Smckusick */ 163045748Smckusick 163145748Smckusick if ((entry->protection & protection) != protection) { 163245748Smckusick return(FALSE); 163345748Smckusick } 163445748Smckusick 163545748Smckusick /* go to next entry */ 163645748Smckusick 163745748Smckusick start = entry->end; 163845748Smckusick entry = entry->next; 163945748Smckusick } 164045748Smckusick return(TRUE); 164145748Smckusick } 164245748Smckusick 164345748Smckusick /* 164445748Smckusick * vm_map_copy_entry: 164545748Smckusick * 164645748Smckusick * Copies the contents of the source entry to the destination 164745748Smckusick * entry. The entries *must* be aligned properly. 164845748Smckusick */ 1649*68162Scgd void 1650*68162Scgd vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry) 165145748Smckusick vm_map_t src_map, dst_map; 165245748Smckusick register vm_map_entry_t src_entry, dst_entry; 165345748Smckusick { 165445748Smckusick vm_object_t temp_object; 165545748Smckusick 165645748Smckusick if (src_entry->is_sub_map || dst_entry->is_sub_map) 165745748Smckusick return; 165845748Smckusick 165948383Skarels if (dst_entry->object.vm_object != NULL && 166050919Smckusick (dst_entry->object.vm_object->flags & OBJ_INTERNAL) == 0) 166145748Smckusick printf("vm_map_copy_entry: copying over permanent data!\n"); 166245748Smckusick 166345748Smckusick /* 166445748Smckusick * If our destination map was wired down, 166545748Smckusick * unwire it now. 166645748Smckusick */ 166745748Smckusick 166845748Smckusick if (dst_entry->wired_count != 0) 166945748Smckusick vm_map_entry_unwire(dst_map, dst_entry); 167045748Smckusick 167145748Smckusick /* 167245748Smckusick * If we're dealing with a sharing map, we 167345748Smckusick * must remove the destination pages from 167445748Smckusick * all maps (since we cannot know which maps 167545748Smckusick * this sharing map belongs in). 167645748Smckusick */ 167745748Smckusick 167845748Smckusick if (dst_map->is_main_map) 167945748Smckusick pmap_remove(dst_map->pmap, dst_entry->start, dst_entry->end); 168045748Smckusick else 168145748Smckusick vm_object_pmap_remove(dst_entry->object.vm_object, 168245748Smckusick dst_entry->offset, 168345748Smckusick dst_entry->offset + 168445748Smckusick (dst_entry->end - dst_entry->start)); 168545748Smckusick 168645748Smckusick if (src_entry->wired_count == 0) { 168745748Smckusick 168845748Smckusick boolean_t src_needs_copy; 168945748Smckusick 169045748Smckusick /* 169145748Smckusick * If the source entry is marked needs_copy, 169245748Smckusick * it is already write-protected. 169345748Smckusick */ 169445748Smckusick if (!src_entry->needs_copy) { 169545748Smckusick 169645748Smckusick boolean_t su; 169745748Smckusick 169845748Smckusick /* 169945748Smckusick * If the source entry has only one mapping, 170045748Smckusick * we can just protect the virtual address 170145748Smckusick * range. 170245748Smckusick */ 170345748Smckusick if (!(su = src_map->is_main_map)) { 170445748Smckusick simple_lock(&src_map->ref_lock); 170545748Smckusick su = (src_map->ref_count == 1); 170645748Smckusick simple_unlock(&src_map->ref_lock); 170745748Smckusick } 170845748Smckusick 170945748Smckusick if (su) { 171045748Smckusick pmap_protect(src_map->pmap, 171145748Smckusick src_entry->start, 171245748Smckusick src_entry->end, 171345748Smckusick src_entry->protection & ~VM_PROT_WRITE); 171445748Smckusick } 171545748Smckusick else { 171645748Smckusick vm_object_pmap_copy(src_entry->object.vm_object, 171745748Smckusick src_entry->offset, 171845748Smckusick src_entry->offset + (src_entry->end 171945748Smckusick -src_entry->start)); 172045748Smckusick } 172145748Smckusick } 172245748Smckusick 172345748Smckusick /* 172445748Smckusick * Make a copy of the object. 172545748Smckusick */ 172645748Smckusick temp_object = dst_entry->object.vm_object; 172745748Smckusick vm_object_copy(src_entry->object.vm_object, 172845748Smckusick src_entry->offset, 172945748Smckusick (vm_size_t)(src_entry->end - 173045748Smckusick src_entry->start), 173145748Smckusick &dst_entry->object.vm_object, 173245748Smckusick &dst_entry->offset, 173345748Smckusick &src_needs_copy); 173445748Smckusick /* 173545748Smckusick * If we didn't get a copy-object now, mark the 173645748Smckusick * source map entry so that a shadow will be created 173745748Smckusick * to hold its changed pages. 173845748Smckusick */ 173945748Smckusick if (src_needs_copy) 174045748Smckusick src_entry->needs_copy = TRUE; 174145748Smckusick 174245748Smckusick /* 174345748Smckusick * The destination always needs to have a shadow 174445748Smckusick * created. 174545748Smckusick */ 174645748Smckusick dst_entry->needs_copy = TRUE; 174745748Smckusick 174845748Smckusick /* 174945748Smckusick * Mark the entries copy-on-write, so that write-enabling 175045748Smckusick * the entry won't make copy-on-write pages writable. 175145748Smckusick */ 175245748Smckusick src_entry->copy_on_write = TRUE; 175345748Smckusick dst_entry->copy_on_write = TRUE; 175445748Smckusick /* 175545748Smckusick * Get rid of the old object. 175645748Smckusick */ 175745748Smckusick vm_object_deallocate(temp_object); 175845748Smckusick 175945748Smckusick pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start, 176045748Smckusick dst_entry->end - dst_entry->start, src_entry->start); 176145748Smckusick } 176245748Smckusick else { 176345748Smckusick /* 176445748Smckusick * Of course, wired down pages can't be set copy-on-write. 176545748Smckusick * Cause wired pages to be copied into the new 176645748Smckusick * map by simulating faults (the new pages are 176745748Smckusick * pageable) 176845748Smckusick */ 176945748Smckusick vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry); 177045748Smckusick } 177145748Smckusick } 177245748Smckusick 177345748Smckusick /* 177445748Smckusick * vm_map_copy: 177545748Smckusick * 177645748Smckusick * Perform a virtual memory copy from the source 177745748Smckusick * address map/range to the destination map/range. 177845748Smckusick * 177945748Smckusick * If src_destroy or dst_alloc is requested, 178045748Smckusick * the source and destination regions should be 178145748Smckusick * disjoint, not only in the top-level map, but 178245748Smckusick * in the sharing maps as well. [The best way 178345748Smckusick * to guarantee this is to use a new intermediate 178445748Smckusick * map to make copies. This also reduces map 178545748Smckusick * fragmentation.] 178645748Smckusick */ 178753357Sbostic int 178845748Smckusick vm_map_copy(dst_map, src_map, 178945748Smckusick dst_addr, len, src_addr, 179045748Smckusick dst_alloc, src_destroy) 179145748Smckusick vm_map_t dst_map; 179245748Smckusick vm_map_t src_map; 179345748Smckusick vm_offset_t dst_addr; 179445748Smckusick vm_size_t len; 179545748Smckusick vm_offset_t src_addr; 179645748Smckusick boolean_t dst_alloc; 179745748Smckusick boolean_t src_destroy; 179845748Smckusick { 179945748Smckusick register 180045748Smckusick vm_map_entry_t src_entry; 180145748Smckusick register 180245748Smckusick vm_map_entry_t dst_entry; 180345748Smckusick vm_map_entry_t tmp_entry; 180445748Smckusick vm_offset_t src_start; 180545748Smckusick vm_offset_t src_end; 180645748Smckusick vm_offset_t dst_start; 180745748Smckusick vm_offset_t dst_end; 180845748Smckusick vm_offset_t src_clip; 180945748Smckusick vm_offset_t dst_clip; 181045748Smckusick int result; 181145748Smckusick boolean_t old_src_destroy; 181245748Smckusick 181345748Smckusick /* 181445748Smckusick * XXX While we figure out why src_destroy screws up, 181545748Smckusick * we'll do it by explicitly vm_map_delete'ing at the end. 181645748Smckusick */ 181745748Smckusick 181845748Smckusick old_src_destroy = src_destroy; 181945748Smckusick src_destroy = FALSE; 182045748Smckusick 182145748Smckusick /* 182245748Smckusick * Compute start and end of region in both maps 182345748Smckusick */ 182445748Smckusick 182545748Smckusick src_start = src_addr; 182645748Smckusick src_end = src_start + len; 182745748Smckusick dst_start = dst_addr; 182845748Smckusick dst_end = dst_start + len; 182945748Smckusick 183045748Smckusick /* 183145748Smckusick * Check that the region can exist in both source 183245748Smckusick * and destination. 183345748Smckusick */ 183445748Smckusick 183545748Smckusick if ((dst_end < dst_start) || (src_end < src_start)) 183645748Smckusick return(KERN_NO_SPACE); 183745748Smckusick 183845748Smckusick /* 183945748Smckusick * Lock the maps in question -- we avoid deadlock 184045748Smckusick * by ordering lock acquisition by map value 184145748Smckusick */ 184245748Smckusick 184345748Smckusick if (src_map == dst_map) { 184445748Smckusick vm_map_lock(src_map); 184545748Smckusick } 1846*68162Scgd else if ((long) src_map < (long) dst_map) { 184745748Smckusick vm_map_lock(src_map); 184845748Smckusick vm_map_lock(dst_map); 184945748Smckusick } else { 185045748Smckusick vm_map_lock(dst_map); 185145748Smckusick vm_map_lock(src_map); 185245748Smckusick } 185345748Smckusick 185445748Smckusick result = KERN_SUCCESS; 185545748Smckusick 185645748Smckusick /* 185745748Smckusick * Check protections... source must be completely readable and 185845748Smckusick * destination must be completely writable. [Note that if we're 185945748Smckusick * allocating the destination region, we don't have to worry 186045748Smckusick * about protection, but instead about whether the region 186145748Smckusick * exists.] 186245748Smckusick */ 186345748Smckusick 186445748Smckusick if (src_map->is_main_map && dst_map->is_main_map) { 186545748Smckusick if (!vm_map_check_protection(src_map, src_start, src_end, 186645748Smckusick VM_PROT_READ)) { 186745748Smckusick result = KERN_PROTECTION_FAILURE; 186845748Smckusick goto Return; 186945748Smckusick } 187045748Smckusick 187145748Smckusick if (dst_alloc) { 187245748Smckusick /* XXX Consider making this a vm_map_find instead */ 187348383Skarels if ((result = vm_map_insert(dst_map, NULL, 187445748Smckusick (vm_offset_t) 0, dst_start, dst_end)) != KERN_SUCCESS) 187545748Smckusick goto Return; 187645748Smckusick } 187745748Smckusick else if (!vm_map_check_protection(dst_map, dst_start, dst_end, 187845748Smckusick VM_PROT_WRITE)) { 187945748Smckusick result = KERN_PROTECTION_FAILURE; 188045748Smckusick goto Return; 188145748Smckusick } 188245748Smckusick } 188345748Smckusick 188445748Smckusick /* 188545748Smckusick * Find the start entries and clip. 188645748Smckusick * 188745748Smckusick * Note that checking protection asserts that the 188845748Smckusick * lookup cannot fail. 188945748Smckusick * 189045748Smckusick * Also note that we wait to do the second lookup 189145748Smckusick * until we have done the first clip, as the clip 189245748Smckusick * may affect which entry we get! 189345748Smckusick */ 189445748Smckusick 189545748Smckusick (void) vm_map_lookup_entry(src_map, src_addr, &tmp_entry); 189645748Smckusick src_entry = tmp_entry; 189745748Smckusick vm_map_clip_start(src_map, src_entry, src_start); 189845748Smckusick 189945748Smckusick (void) vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry); 190045748Smckusick dst_entry = tmp_entry; 190145748Smckusick vm_map_clip_start(dst_map, dst_entry, dst_start); 190245748Smckusick 190345748Smckusick /* 190445748Smckusick * If both source and destination entries are the same, 190545748Smckusick * retry the first lookup, as it may have changed. 190645748Smckusick */ 190745748Smckusick 190845748Smckusick if (src_entry == dst_entry) { 190945748Smckusick (void) vm_map_lookup_entry(src_map, src_addr, &tmp_entry); 191045748Smckusick src_entry = tmp_entry; 191145748Smckusick } 191245748Smckusick 191345748Smckusick /* 191445748Smckusick * If source and destination entries are still the same, 191545748Smckusick * a null copy is being performed. 191645748Smckusick */ 191745748Smckusick 191845748Smckusick if (src_entry == dst_entry) 191945748Smckusick goto Return; 192045748Smckusick 192145748Smckusick /* 192245748Smckusick * Go through entries until we get to the end of the 192345748Smckusick * region. 192445748Smckusick */ 192545748Smckusick 192645748Smckusick while (src_start < src_end) { 192745748Smckusick /* 192845748Smckusick * Clip the entries to the endpoint of the entire region. 192945748Smckusick */ 193045748Smckusick 193145748Smckusick vm_map_clip_end(src_map, src_entry, src_end); 193245748Smckusick vm_map_clip_end(dst_map, dst_entry, dst_end); 193345748Smckusick 193445748Smckusick /* 193545748Smckusick * Clip each entry to the endpoint of the other entry. 193645748Smckusick */ 193745748Smckusick 193845748Smckusick src_clip = src_entry->start + (dst_entry->end - dst_entry->start); 193945748Smckusick vm_map_clip_end(src_map, src_entry, src_clip); 194045748Smckusick 194145748Smckusick dst_clip = dst_entry->start + (src_entry->end - src_entry->start); 194245748Smckusick vm_map_clip_end(dst_map, dst_entry, dst_clip); 194345748Smckusick 194445748Smckusick /* 194545748Smckusick * Both entries now match in size and relative endpoints. 194645748Smckusick * 194745748Smckusick * If both entries refer to a VM object, we can 194845748Smckusick * deal with them now. 194945748Smckusick */ 195045748Smckusick 195145748Smckusick if (!src_entry->is_a_map && !dst_entry->is_a_map) { 195245748Smckusick vm_map_copy_entry(src_map, dst_map, src_entry, 195345748Smckusick dst_entry); 195445748Smckusick } 195545748Smckusick else { 195645748Smckusick register vm_map_t new_dst_map; 195745748Smckusick vm_offset_t new_dst_start; 195845748Smckusick vm_size_t new_size; 195945748Smckusick vm_map_t new_src_map; 196045748Smckusick vm_offset_t new_src_start; 196145748Smckusick 196245748Smckusick /* 196345748Smckusick * We have to follow at least one sharing map. 196445748Smckusick */ 196545748Smckusick 196645748Smckusick new_size = (dst_entry->end - dst_entry->start); 196745748Smckusick 196845748Smckusick if (src_entry->is_a_map) { 196945748Smckusick new_src_map = src_entry->object.share_map; 197045748Smckusick new_src_start = src_entry->offset; 197145748Smckusick } 197245748Smckusick else { 197345748Smckusick new_src_map = src_map; 197445748Smckusick new_src_start = src_entry->start; 197545748Smckusick lock_set_recursive(&src_map->lock); 197645748Smckusick } 197745748Smckusick 197845748Smckusick if (dst_entry->is_a_map) { 197945748Smckusick vm_offset_t new_dst_end; 198045748Smckusick 198145748Smckusick new_dst_map = dst_entry->object.share_map; 198245748Smckusick new_dst_start = dst_entry->offset; 198345748Smckusick 198445748Smckusick /* 198545748Smckusick * Since the destination sharing entries 198645748Smckusick * will be merely deallocated, we can 198745748Smckusick * do that now, and replace the region 198845748Smckusick * with a null object. [This prevents 198945748Smckusick * splitting the source map to match 199045748Smckusick * the form of the destination map.] 199145748Smckusick * Note that we can only do so if the 199245748Smckusick * source and destination do not overlap. 199345748Smckusick */ 199445748Smckusick 199545748Smckusick new_dst_end = new_dst_start + new_size; 199645748Smckusick 199745748Smckusick if (new_dst_map != new_src_map) { 199845748Smckusick vm_map_lock(new_dst_map); 199945748Smckusick (void) vm_map_delete(new_dst_map, 200045748Smckusick new_dst_start, 200145748Smckusick new_dst_end); 200245748Smckusick (void) vm_map_insert(new_dst_map, 200348383Skarels NULL, 200445748Smckusick (vm_offset_t) 0, 200545748Smckusick new_dst_start, 200645748Smckusick new_dst_end); 200745748Smckusick vm_map_unlock(new_dst_map); 200845748Smckusick } 200945748Smckusick } 201045748Smckusick else { 201145748Smckusick new_dst_map = dst_map; 201245748Smckusick new_dst_start = dst_entry->start; 201345748Smckusick lock_set_recursive(&dst_map->lock); 201445748Smckusick } 201545748Smckusick 201645748Smckusick /* 201745748Smckusick * Recursively copy the sharing map. 201845748Smckusick */ 201945748Smckusick 202045748Smckusick (void) vm_map_copy(new_dst_map, new_src_map, 202145748Smckusick new_dst_start, new_size, new_src_start, 202245748Smckusick FALSE, FALSE); 202345748Smckusick 202445748Smckusick if (dst_map == new_dst_map) 202545748Smckusick lock_clear_recursive(&dst_map->lock); 202645748Smckusick if (src_map == new_src_map) 202745748Smckusick lock_clear_recursive(&src_map->lock); 202845748Smckusick } 202945748Smckusick 203045748Smckusick /* 203145748Smckusick * Update variables for next pass through the loop. 203245748Smckusick */ 203345748Smckusick 203445748Smckusick src_start = src_entry->end; 203545748Smckusick src_entry = src_entry->next; 203645748Smckusick dst_start = dst_entry->end; 203745748Smckusick dst_entry = dst_entry->next; 203845748Smckusick 203945748Smckusick /* 204045748Smckusick * If the source is to be destroyed, here is the 204145748Smckusick * place to do it. 204245748Smckusick */ 204345748Smckusick 204445748Smckusick if (src_destroy && src_map->is_main_map && 204545748Smckusick dst_map->is_main_map) 204645748Smckusick vm_map_entry_delete(src_map, src_entry->prev); 204745748Smckusick } 204845748Smckusick 204945748Smckusick /* 205045748Smckusick * Update the physical maps as appropriate 205145748Smckusick */ 205245748Smckusick 205345748Smckusick if (src_map->is_main_map && dst_map->is_main_map) { 205445748Smckusick if (src_destroy) 205545748Smckusick pmap_remove(src_map->pmap, src_addr, src_addr + len); 205645748Smckusick } 205745748Smckusick 205845748Smckusick /* 205945748Smckusick * Unlock the maps 206045748Smckusick */ 206145748Smckusick 206245748Smckusick Return: ; 206345748Smckusick 206445748Smckusick if (old_src_destroy) 206545748Smckusick vm_map_delete(src_map, src_addr, src_addr + len); 206645748Smckusick 206745748Smckusick vm_map_unlock(src_map); 206845748Smckusick if (src_map != dst_map) 206945748Smckusick vm_map_unlock(dst_map); 207045748Smckusick 207145748Smckusick return(result); 207245748Smckusick } 207345748Smckusick 207445748Smckusick /* 207548383Skarels * vmspace_fork: 207648383Skarels * Create a new process vmspace structure and vm_map 207748383Skarels * based on those of an existing process. The new map 207848383Skarels * is based on the old map, according to the inheritance 207948383Skarels * values on the regions in that map. 208045748Smckusick * 208148383Skarels * The source map must not be locked. 208245748Smckusick */ 208348383Skarels struct vmspace * 208448383Skarels vmspace_fork(vm1) 208548383Skarels register struct vmspace *vm1; 208645748Smckusick { 208748383Skarels register struct vmspace *vm2; 208848383Skarels vm_map_t old_map = &vm1->vm_map; 208945748Smckusick vm_map_t new_map; 209045748Smckusick vm_map_entry_t old_entry; 209145748Smckusick vm_map_entry_t new_entry; 209245748Smckusick pmap_t new_pmap; 209345748Smckusick 209445748Smckusick vm_map_lock(old_map); 209545748Smckusick 209648383Skarels vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset, 209748383Skarels old_map->entries_pageable); 209848383Skarels bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy, 209948383Skarels (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy); 210048383Skarels new_pmap = &vm2->vm_pmap; /* XXX */ 210148383Skarels new_map = &vm2->vm_map; /* XXX */ 210245748Smckusick 210345748Smckusick old_entry = old_map->header.next; 210445748Smckusick 210545748Smckusick while (old_entry != &old_map->header) { 210645748Smckusick if (old_entry->is_sub_map) 210745748Smckusick panic("vm_map_fork: encountered a submap"); 210845748Smckusick 210945748Smckusick switch (old_entry->inheritance) { 211045748Smckusick case VM_INHERIT_NONE: 211145748Smckusick break; 211245748Smckusick 211345748Smckusick case VM_INHERIT_SHARE: 211445748Smckusick /* 211545748Smckusick * If we don't already have a sharing map: 211645748Smckusick */ 211745748Smckusick 211845748Smckusick if (!old_entry->is_a_map) { 211945748Smckusick vm_map_t new_share_map; 212045748Smckusick vm_map_entry_t new_share_entry; 212145748Smckusick 212245748Smckusick /* 212345748Smckusick * Create a new sharing map 212445748Smckusick */ 212545748Smckusick 212648383Skarels new_share_map = vm_map_create(NULL, 212745748Smckusick old_entry->start, 212845748Smckusick old_entry->end, 212945748Smckusick TRUE); 213045748Smckusick new_share_map->is_main_map = FALSE; 213145748Smckusick 213245748Smckusick /* 213345748Smckusick * Create the only sharing entry from the 213445748Smckusick * old task map entry. 213545748Smckusick */ 213645748Smckusick 213745748Smckusick new_share_entry = 213845748Smckusick vm_map_entry_create(new_share_map); 213945748Smckusick *new_share_entry = *old_entry; 214061005Shibler new_share_entry->wired_count = 0; 214145748Smckusick 214245748Smckusick /* 214345748Smckusick * Insert the entry into the new sharing 214445748Smckusick * map 214545748Smckusick */ 214645748Smckusick 214745748Smckusick vm_map_entry_link(new_share_map, 214845748Smckusick new_share_map->header.prev, 214945748Smckusick new_share_entry); 215045748Smckusick 215145748Smckusick /* 215245748Smckusick * Fix up the task map entry to refer 215345748Smckusick * to the sharing map now. 215445748Smckusick */ 215545748Smckusick 215645748Smckusick old_entry->is_a_map = TRUE; 215745748Smckusick old_entry->object.share_map = new_share_map; 215845748Smckusick old_entry->offset = old_entry->start; 215945748Smckusick } 216045748Smckusick 216145748Smckusick /* 216245748Smckusick * Clone the entry, referencing the sharing map. 216345748Smckusick */ 216445748Smckusick 216545748Smckusick new_entry = vm_map_entry_create(new_map); 216645748Smckusick *new_entry = *old_entry; 216761005Shibler new_entry->wired_count = 0; 216845748Smckusick vm_map_reference(new_entry->object.share_map); 216945748Smckusick 217045748Smckusick /* 217145748Smckusick * Insert the entry into the new map -- we 217245748Smckusick * know we're inserting at the end of the new 217345748Smckusick * map. 217445748Smckusick */ 217545748Smckusick 217645748Smckusick vm_map_entry_link(new_map, new_map->header.prev, 217745748Smckusick new_entry); 217845748Smckusick 217945748Smckusick /* 218045748Smckusick * Update the physical map 218145748Smckusick */ 218245748Smckusick 218345748Smckusick pmap_copy(new_map->pmap, old_map->pmap, 218445748Smckusick new_entry->start, 218545748Smckusick (old_entry->end - old_entry->start), 218645748Smckusick old_entry->start); 218745748Smckusick break; 218845748Smckusick 218945748Smckusick case VM_INHERIT_COPY: 219045748Smckusick /* 219145748Smckusick * Clone the entry and link into the map. 219245748Smckusick */ 219345748Smckusick 219445748Smckusick new_entry = vm_map_entry_create(new_map); 219545748Smckusick *new_entry = *old_entry; 219645748Smckusick new_entry->wired_count = 0; 219748383Skarels new_entry->object.vm_object = NULL; 219845748Smckusick new_entry->is_a_map = FALSE; 219945748Smckusick vm_map_entry_link(new_map, new_map->header.prev, 220045748Smckusick new_entry); 220145748Smckusick if (old_entry->is_a_map) { 220245748Smckusick int check; 220345748Smckusick 220445748Smckusick check = vm_map_copy(new_map, 220545748Smckusick old_entry->object.share_map, 220645748Smckusick new_entry->start, 220745748Smckusick (vm_size_t)(new_entry->end - 220845748Smckusick new_entry->start), 220945748Smckusick old_entry->offset, 221045748Smckusick FALSE, FALSE); 221145748Smckusick if (check != KERN_SUCCESS) 221245748Smckusick printf("vm_map_fork: copy in share_map region failed\n"); 221345748Smckusick } 221445748Smckusick else { 221545748Smckusick vm_map_copy_entry(old_map, new_map, old_entry, 221645748Smckusick new_entry); 221745748Smckusick } 221845748Smckusick break; 221945748Smckusick } 222045748Smckusick old_entry = old_entry->next; 222145748Smckusick } 222245748Smckusick 222345748Smckusick new_map->size = old_map->size; 222445748Smckusick vm_map_unlock(old_map); 222545748Smckusick 222648383Skarels return(vm2); 222745748Smckusick } 222845748Smckusick 222945748Smckusick /* 223045748Smckusick * vm_map_lookup: 223145748Smckusick * 223245748Smckusick * Finds the VM object, offset, and 223345748Smckusick * protection for a given virtual address in the 223445748Smckusick * specified map, assuming a page fault of the 223545748Smckusick * type specified. 223645748Smckusick * 223745748Smckusick * Leaves the map in question locked for read; return 223845748Smckusick * values are guaranteed until a vm_map_lookup_done 223945748Smckusick * call is performed. Note that the map argument 224045748Smckusick * is in/out; the returned map must be used in 224145748Smckusick * the call to vm_map_lookup_done. 224245748Smckusick * 224345748Smckusick * A handle (out_entry) is returned for use in 224445748Smckusick * vm_map_lookup_done, to make that fast. 224545748Smckusick * 224645748Smckusick * If a lookup is requested with "write protection" 224745748Smckusick * specified, the map may be changed to perform virtual 224845748Smckusick * copying operations, although the data referenced will 224945748Smckusick * remain the same. 225045748Smckusick */ 225153357Sbostic int 225245748Smckusick vm_map_lookup(var_map, vaddr, fault_type, out_entry, 225345748Smckusick object, offset, out_prot, wired, single_use) 225445748Smckusick vm_map_t *var_map; /* IN/OUT */ 225545748Smckusick register vm_offset_t vaddr; 225645748Smckusick register vm_prot_t fault_type; 225745748Smckusick 225845748Smckusick vm_map_entry_t *out_entry; /* OUT */ 225945748Smckusick vm_object_t *object; /* OUT */ 226045748Smckusick vm_offset_t *offset; /* OUT */ 226145748Smckusick vm_prot_t *out_prot; /* OUT */ 226245748Smckusick boolean_t *wired; /* OUT */ 226345748Smckusick boolean_t *single_use; /* OUT */ 226445748Smckusick { 226545748Smckusick vm_map_t share_map; 226645748Smckusick vm_offset_t share_offset; 226745748Smckusick register vm_map_entry_t entry; 226845748Smckusick register vm_map_t map = *var_map; 226945748Smckusick register vm_prot_t prot; 227045748Smckusick register boolean_t su; 227145748Smckusick 227245748Smckusick RetryLookup: ; 227345748Smckusick 227445748Smckusick /* 227545748Smckusick * Lookup the faulting address. 227645748Smckusick */ 227745748Smckusick 227845748Smckusick vm_map_lock_read(map); 227945748Smckusick 228045748Smckusick #define RETURN(why) \ 228145748Smckusick { \ 228245748Smckusick vm_map_unlock_read(map); \ 228345748Smckusick return(why); \ 228445748Smckusick } 228545748Smckusick 228645748Smckusick /* 228745748Smckusick * If the map has an interesting hint, try it before calling 228845748Smckusick * full blown lookup routine. 228945748Smckusick */ 229045748Smckusick 229145748Smckusick simple_lock(&map->hint_lock); 229245748Smckusick entry = map->hint; 229345748Smckusick simple_unlock(&map->hint_lock); 229445748Smckusick 229545748Smckusick *out_entry = entry; 229645748Smckusick 229745748Smckusick if ((entry == &map->header) || 229845748Smckusick (vaddr < entry->start) || (vaddr >= entry->end)) { 229945748Smckusick vm_map_entry_t tmp_entry; 230045748Smckusick 230145748Smckusick /* 230245748Smckusick * Entry was either not a valid hint, or the vaddr 230345748Smckusick * was not contained in the entry, so do a full lookup. 230445748Smckusick */ 230545748Smckusick if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) 230645748Smckusick RETURN(KERN_INVALID_ADDRESS); 230745748Smckusick 230845748Smckusick entry = tmp_entry; 230945748Smckusick *out_entry = entry; 231045748Smckusick } 231145748Smckusick 231245748Smckusick /* 231345748Smckusick * Handle submaps. 231445748Smckusick */ 231545748Smckusick 231645748Smckusick if (entry->is_sub_map) { 231745748Smckusick vm_map_t old_map = map; 231845748Smckusick 231945748Smckusick *var_map = map = entry->object.sub_map; 232045748Smckusick vm_map_unlock_read(old_map); 232145748Smckusick goto RetryLookup; 232245748Smckusick } 232345748Smckusick 232445748Smckusick /* 232545748Smckusick * Check whether this task is allowed to have 232645748Smckusick * this page. 232745748Smckusick */ 232845748Smckusick 232945748Smckusick prot = entry->protection; 233045748Smckusick if ((fault_type & (prot)) != fault_type) 233145748Smckusick RETURN(KERN_PROTECTION_FAILURE); 233245748Smckusick 233345748Smckusick /* 233445748Smckusick * If this page is not pageable, we have to get 233545748Smckusick * it for all possible accesses. 233645748Smckusick */ 233745748Smckusick 233845748Smckusick if (*wired = (entry->wired_count != 0)) 233945748Smckusick prot = fault_type = entry->protection; 234045748Smckusick 234145748Smckusick /* 234245748Smckusick * If we don't already have a VM object, track 234345748Smckusick * it down. 234445748Smckusick */ 234545748Smckusick 234645748Smckusick if (su = !entry->is_a_map) { 234745748Smckusick share_map = map; 234845748Smckusick share_offset = vaddr; 234945748Smckusick } 235045748Smckusick else { 235145748Smckusick vm_map_entry_t share_entry; 235245748Smckusick 235345748Smckusick /* 235445748Smckusick * Compute the sharing map, and offset into it. 235545748Smckusick */ 235645748Smckusick 235745748Smckusick share_map = entry->object.share_map; 235845748Smckusick share_offset = (vaddr - entry->start) + entry->offset; 235945748Smckusick 236045748Smckusick /* 236145748Smckusick * Look for the backing store object and offset 236245748Smckusick */ 236345748Smckusick 236445748Smckusick vm_map_lock_read(share_map); 236545748Smckusick 236645748Smckusick if (!vm_map_lookup_entry(share_map, share_offset, 236745748Smckusick &share_entry)) { 236845748Smckusick vm_map_unlock_read(share_map); 236945748Smckusick RETURN(KERN_INVALID_ADDRESS); 237045748Smckusick } 237145748Smckusick entry = share_entry; 237245748Smckusick } 237345748Smckusick 237445748Smckusick /* 237545748Smckusick * If the entry was copy-on-write, we either ... 237645748Smckusick */ 237745748Smckusick 237845748Smckusick if (entry->needs_copy) { 237945748Smckusick /* 238045748Smckusick * If we want to write the page, we may as well 238145748Smckusick * handle that now since we've got the sharing 238245748Smckusick * map locked. 238345748Smckusick * 238445748Smckusick * If we don't need to write the page, we just 238545748Smckusick * demote the permissions allowed. 238645748Smckusick */ 238745748Smckusick 238845748Smckusick if (fault_type & VM_PROT_WRITE) { 238945748Smckusick /* 239045748Smckusick * Make a new object, and place it in the 239145748Smckusick * object chain. Note that no new references 239245748Smckusick * have appeared -- one just moved from the 239345748Smckusick * share map to the new object. 239445748Smckusick */ 239545748Smckusick 239645748Smckusick if (lock_read_to_write(&share_map->lock)) { 239745748Smckusick if (share_map != map) 239845748Smckusick vm_map_unlock_read(map); 239945748Smckusick goto RetryLookup; 240045748Smckusick } 240145748Smckusick 240245748Smckusick vm_object_shadow( 240345748Smckusick &entry->object.vm_object, 240445748Smckusick &entry->offset, 240545748Smckusick (vm_size_t) (entry->end - entry->start)); 240645748Smckusick 240745748Smckusick entry->needs_copy = FALSE; 240845748Smckusick 240945748Smckusick lock_write_to_read(&share_map->lock); 241045748Smckusick } 241145748Smckusick else { 241245748Smckusick /* 241345748Smckusick * We're attempting to read a copy-on-write 241445748Smckusick * page -- don't allow writes. 241545748Smckusick */ 241645748Smckusick 241745748Smckusick prot &= (~VM_PROT_WRITE); 241845748Smckusick } 241945748Smckusick } 242045748Smckusick 242145748Smckusick /* 242245748Smckusick * Create an object if necessary. 242345748Smckusick */ 242448383Skarels if (entry->object.vm_object == NULL) { 242545748Smckusick 242645748Smckusick if (lock_read_to_write(&share_map->lock)) { 242745748Smckusick if (share_map != map) 242845748Smckusick vm_map_unlock_read(map); 242945748Smckusick goto RetryLookup; 243045748Smckusick } 243145748Smckusick 243245748Smckusick entry->object.vm_object = vm_object_allocate( 243345748Smckusick (vm_size_t)(entry->end - entry->start)); 243445748Smckusick entry->offset = 0; 243545748Smckusick lock_write_to_read(&share_map->lock); 243645748Smckusick } 243745748Smckusick 243845748Smckusick /* 243945748Smckusick * Return the object/offset from this entry. If the entry 244045748Smckusick * was copy-on-write or empty, it has been fixed up. 244145748Smckusick */ 244245748Smckusick 244345748Smckusick *offset = (share_offset - entry->start) + entry->offset; 244445748Smckusick *object = entry->object.vm_object; 244545748Smckusick 244645748Smckusick /* 244745748Smckusick * Return whether this is the only map sharing this data. 244845748Smckusick */ 244945748Smckusick 245045748Smckusick if (!su) { 245145748Smckusick simple_lock(&share_map->ref_lock); 245245748Smckusick su = (share_map->ref_count == 1); 245345748Smckusick simple_unlock(&share_map->ref_lock); 245445748Smckusick } 245545748Smckusick 245645748Smckusick *out_prot = prot; 245745748Smckusick *single_use = su; 245845748Smckusick 245945748Smckusick return(KERN_SUCCESS); 246045748Smckusick 246145748Smckusick #undef RETURN 246245748Smckusick } 246345748Smckusick 246445748Smckusick /* 246545748Smckusick * vm_map_lookup_done: 246645748Smckusick * 246745748Smckusick * Releases locks acquired by a vm_map_lookup 246845748Smckusick * (according to the handle returned by that lookup). 246945748Smckusick */ 247045748Smckusick 2471*68162Scgd void 2472*68162Scgd vm_map_lookup_done(map, entry) 247345748Smckusick register vm_map_t map; 247445748Smckusick vm_map_entry_t entry; 247545748Smckusick { 247645748Smckusick /* 247745748Smckusick * If this entry references a map, unlock it first. 247845748Smckusick */ 247945748Smckusick 248045748Smckusick if (entry->is_a_map) 248145748Smckusick vm_map_unlock_read(entry->object.share_map); 248245748Smckusick 248345748Smckusick /* 248445748Smckusick * Unlock the main-level map 248545748Smckusick */ 248645748Smckusick 248745748Smckusick vm_map_unlock_read(map); 248845748Smckusick } 248945748Smckusick 249045748Smckusick /* 249145748Smckusick * Routine: vm_map_simplify 249245748Smckusick * Purpose: 249345748Smckusick * Attempt to simplify the map representation in 249445748Smckusick * the vicinity of the given starting address. 249545748Smckusick * Note: 249645748Smckusick * This routine is intended primarily to keep the 249745748Smckusick * kernel maps more compact -- they generally don't 249845748Smckusick * benefit from the "expand a map entry" technology 249945748Smckusick * at allocation time because the adjacent entry 250045748Smckusick * is often wired down. 250145748Smckusick */ 2502*68162Scgd void 2503*68162Scgd vm_map_simplify(map, start) 250445748Smckusick vm_map_t map; 250545748Smckusick vm_offset_t start; 250645748Smckusick { 250745748Smckusick vm_map_entry_t this_entry; 250845748Smckusick vm_map_entry_t prev_entry; 250945748Smckusick 251045748Smckusick vm_map_lock(map); 251145748Smckusick if ( 251245748Smckusick (vm_map_lookup_entry(map, start, &this_entry)) && 251345748Smckusick ((prev_entry = this_entry->prev) != &map->header) && 251445748Smckusick 251545748Smckusick (prev_entry->end == start) && 251645748Smckusick (map->is_main_map) && 251745748Smckusick 251845748Smckusick (prev_entry->is_a_map == FALSE) && 251945748Smckusick (prev_entry->is_sub_map == FALSE) && 252045748Smckusick 252145748Smckusick (this_entry->is_a_map == FALSE) && 252245748Smckusick (this_entry->is_sub_map == FALSE) && 252345748Smckusick 252445748Smckusick (prev_entry->inheritance == this_entry->inheritance) && 252545748Smckusick (prev_entry->protection == this_entry->protection) && 252645748Smckusick (prev_entry->max_protection == this_entry->max_protection) && 252745748Smckusick (prev_entry->wired_count == this_entry->wired_count) && 252845748Smckusick 252945748Smckusick (prev_entry->copy_on_write == this_entry->copy_on_write) && 253045748Smckusick (prev_entry->needs_copy == this_entry->needs_copy) && 253145748Smckusick 253245748Smckusick (prev_entry->object.vm_object == this_entry->object.vm_object) && 253345748Smckusick ((prev_entry->offset + (prev_entry->end - prev_entry->start)) 253445748Smckusick == this_entry->offset) 253545748Smckusick ) { 253645748Smckusick if (map->first_free == this_entry) 253745748Smckusick map->first_free = prev_entry; 253845748Smckusick 253945748Smckusick SAVE_HINT(map, prev_entry); 254045748Smckusick vm_map_entry_unlink(map, this_entry); 254145748Smckusick prev_entry->end = this_entry->end; 254245748Smckusick vm_object_deallocate(this_entry->object.vm_object); 254345748Smckusick vm_map_entry_dispose(map, this_entry); 254445748Smckusick } 254545748Smckusick vm_map_unlock(map); 254645748Smckusick } 254745748Smckusick 254845748Smckusick /* 254945748Smckusick * vm_map_print: [ debug ] 255045748Smckusick */ 2551*68162Scgd void 2552*68162Scgd vm_map_print(map, full) 255345748Smckusick register vm_map_t map; 255445748Smckusick boolean_t full; 255545748Smckusick { 255645748Smckusick register vm_map_entry_t entry; 255745748Smckusick extern int indent; 255845748Smckusick 255945748Smckusick iprintf("%s map 0x%x: pmap=0x%x,ref=%d,nentries=%d,version=%d\n", 256045748Smckusick (map->is_main_map ? "Task" : "Share"), 256145748Smckusick (int) map, (int) (map->pmap), map->ref_count, map->nentries, 256245748Smckusick map->timestamp); 256345748Smckusick 256445748Smckusick if (!full && indent) 256545748Smckusick return; 256645748Smckusick 256745748Smckusick indent += 2; 256845748Smckusick for (entry = map->header.next; entry != &map->header; 256945748Smckusick entry = entry->next) { 257045748Smckusick iprintf("map entry 0x%x: start=0x%x, end=0x%x, ", 257145748Smckusick (int) entry, (int) entry->start, (int) entry->end); 257245748Smckusick if (map->is_main_map) { 257345748Smckusick static char *inheritance_name[4] = 257445748Smckusick { "share", "copy", "none", "donate_copy"}; 257545748Smckusick printf("prot=%x/%x/%s, ", 257645748Smckusick entry->protection, 257745748Smckusick entry->max_protection, 257845748Smckusick inheritance_name[entry->inheritance]); 257945748Smckusick if (entry->wired_count != 0) 258045748Smckusick printf("wired, "); 258145748Smckusick } 258245748Smckusick 258345748Smckusick if (entry->is_a_map || entry->is_sub_map) { 258445748Smckusick printf("share=0x%x, offset=0x%x\n", 258545748Smckusick (int) entry->object.share_map, 258645748Smckusick (int) entry->offset); 258745748Smckusick if ((entry->prev == &map->header) || 258845748Smckusick (!entry->prev->is_a_map) || 258945748Smckusick (entry->prev->object.share_map != 259045748Smckusick entry->object.share_map)) { 259145748Smckusick indent += 2; 259245748Smckusick vm_map_print(entry->object.share_map, full); 259345748Smckusick indent -= 2; 259445748Smckusick } 259545748Smckusick 259645748Smckusick } 259745748Smckusick else { 259845748Smckusick printf("object=0x%x, offset=0x%x", 259945748Smckusick (int) entry->object.vm_object, 260045748Smckusick (int) entry->offset); 260145748Smckusick if (entry->copy_on_write) 260245748Smckusick printf(", copy (%s)", 260345748Smckusick entry->needs_copy ? "needed" : "done"); 260445748Smckusick printf("\n"); 260545748Smckusick 260645748Smckusick if ((entry->prev == &map->header) || 260745748Smckusick (entry->prev->is_a_map) || 260845748Smckusick (entry->prev->object.vm_object != 260945748Smckusick entry->object.vm_object)) { 261045748Smckusick indent += 2; 261145748Smckusick vm_object_print(entry->object.vm_object, full); 261245748Smckusick indent -= 2; 261345748Smckusick } 261445748Smckusick } 261545748Smckusick } 261645748Smckusick indent -= 2; 261745748Smckusick } 2618