145748Smckusick /* 245748Smckusick * Copyright (c) 1991 Regents of the University of California. 345748Smckusick * All rights reserved. 445748Smckusick * 545748Smckusick * This code is derived from software contributed to Berkeley by 645748Smckusick * The Mach Operating System project at Carnegie-Mellon University. 745748Smckusick * 8*48493Smckusick * %sccs.include.redist.c% 945748Smckusick * 10*48493Smckusick * @(#)vm_map.c 7.3 (Berkeley) 04/21/91 11*48493Smckusick * 12*48493Smckusick * 13*48493Smckusick * Copyright (c) 1987, 1990 Carnegie-Mellon University. 14*48493Smckusick * All rights reserved. 15*48493Smckusick * 16*48493Smckusick * Authors: Avadis Tevanian, Jr., Michael Wayne Young 17*48493Smckusick * 18*48493Smckusick * Permission to use, copy, modify and distribute this software and 19*48493Smckusick * its documentation is hereby granted, provided that both the copyright 20*48493Smckusick * notice and this permission notice appear in all copies of the 21*48493Smckusick * software, derivative works or modified versions, and any portions 22*48493Smckusick * thereof, and that both notices appear in supporting documentation. 23*48493Smckusick * 24*48493Smckusick * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 25*48493Smckusick * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 26*48493Smckusick * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 27*48493Smckusick * 28*48493Smckusick * Carnegie Mellon requests users of this software to return to 29*48493Smckusick * 30*48493Smckusick * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 31*48493Smckusick * School of Computer Science 32*48493Smckusick * Carnegie Mellon University 33*48493Smckusick * Pittsburgh PA 15213-3890 34*48493Smckusick * 35*48493Smckusick * any improvements or extensions that they make and grant Carnegie the 36*48493Smckusick * rights to redistribute these changes. 3745748Smckusick */ 3845748Smckusick 3945748Smckusick /* 4045748Smckusick * Virtual memory mapping module. 4145748Smckusick */ 4245748Smckusick 4348383Skarels #include "param.h" 4445748Smckusick #include "malloc.h" 4548383Skarels #include "vm.h" 4648383Skarels #include "vm_page.h" 4748383Skarels #include "vm_object.h" 4845748Smckusick 4945748Smckusick /* 5045748Smckusick * Virtual memory maps provide for the mapping, protection, 5145748Smckusick * and sharing of virtual memory objects. In addition, 5245748Smckusick * this module provides for an efficient virtual copy of 5345748Smckusick * memory from one map to another. 5445748Smckusick * 5545748Smckusick * Synchronization is required prior to most operations. 5645748Smckusick * 5745748Smckusick * Maps consist of an ordered doubly-linked list of simple 5845748Smckusick * entries; a single hint is used to speed up lookups. 5945748Smckusick * 6045748Smckusick * In order to properly represent the sharing of virtual 6145748Smckusick * memory regions among maps, the map structure is bi-level. 6245748Smckusick * Top-level ("address") maps refer to regions of sharable 6345748Smckusick * virtual memory. These regions are implemented as 6445748Smckusick * ("sharing") maps, which then refer to the actual virtual 6545748Smckusick * memory objects. When two address maps "share" memory, 6645748Smckusick * their top-level maps both have references to the same 6745748Smckusick * sharing map. When memory is virtual-copied from one 6845748Smckusick * address map to another, the references in the sharing 6945748Smckusick * maps are actually copied -- no copying occurs at the 7045748Smckusick * virtual memory object level. 7145748Smckusick * 7245748Smckusick * Since portions of maps are specified by start/end addreses, 7345748Smckusick * which may not align with existing map entries, all 7445748Smckusick * routines merely "clip" entries to these start/end values. 7545748Smckusick * [That is, an entry is split into two, bordering at a 7645748Smckusick * start or end value.] Note that these clippings may not 7745748Smckusick * always be necessary (as the two resulting entries are then 7845748Smckusick * not changed); however, the clipping is done for convenience. 7945748Smckusick * No attempt is currently made to "glue back together" two 8045748Smckusick * abutting entries. 8145748Smckusick * 8245748Smckusick * As mentioned above, virtual copy operations are performed 8345748Smckusick * by copying VM object references from one sharing map to 8445748Smckusick * another, and then marking both regions as copy-on-write. 8545748Smckusick * It is important to note that only one writeable reference 8645748Smckusick * to a VM object region exists in any map -- this means that 8745748Smckusick * shadow object creation can be delayed until a write operation 8845748Smckusick * occurs. 8945748Smckusick */ 9045748Smckusick 9145748Smckusick /* 9248383Skarels * vm_map_startup: 9345748Smckusick * 9445748Smckusick * Initialize the vm_map module. Must be called before 9545748Smckusick * any other vm_map routines. 9645748Smckusick * 9745748Smckusick * Map and entry structures are allocated from the general 9845748Smckusick * purpose memory pool with some exceptions: 9945748Smckusick * 10045748Smckusick * - The kernel map and kmem submap are allocated statically. 10145748Smckusick * - Kernel map entries are allocated out of a static pool. 10245748Smckusick * 10345748Smckusick * These restrictions are necessary since malloc() uses the 10445748Smckusick * maps and requires map entries. 10545748Smckusick */ 10645748Smckusick 10745748Smckusick vm_offset_t kentry_data; 10845748Smckusick vm_size_t kentry_data_size; 10945748Smckusick vm_map_entry_t kentry_free; 11045748Smckusick vm_map_t kmap_free; 11145748Smckusick 11248383Skarels void vm_map_startup() 11345748Smckusick { 11445748Smckusick register int i; 11545748Smckusick register vm_map_entry_t mep; 11645748Smckusick vm_map_t mp; 11745748Smckusick 11845748Smckusick /* 11945748Smckusick * Static map structures for allocation before initialization of 12045748Smckusick * kernel map or kmem map. vm_map_create knows how to deal with them. 12145748Smckusick */ 12245748Smckusick kmap_free = mp = (vm_map_t) kentry_data; 12345748Smckusick i = MAX_KMAP; 12445748Smckusick while (--i > 0) { 12545748Smckusick mp->header.next = (vm_map_entry_t) (mp + 1); 12645748Smckusick mp++; 12745748Smckusick } 12848383Skarels mp++->header.next = NULL; 12945748Smckusick 13045748Smckusick /* 13145748Smckusick * Form a free list of statically allocated kernel map entries 13245748Smckusick * with the rest. 13345748Smckusick */ 13445748Smckusick kentry_free = mep = (vm_map_entry_t) mp; 13545748Smckusick i = (kentry_data_size - MAX_KMAP * sizeof *mp) / sizeof *mep; 13645748Smckusick while (--i > 0) { 13745748Smckusick mep->next = mep + 1; 13845748Smckusick mep++; 13945748Smckusick } 14048383Skarels mep->next = NULL; 14145748Smckusick } 14245748Smckusick 14345748Smckusick /* 14448383Skarels * Allocate a vmspace structure, including a vm_map and pmap, 14548383Skarels * and initialize those structures. The refcnt is set to 1. 14648383Skarels * The remaining fields must be initialized by the caller. 14748383Skarels */ 14848383Skarels struct vmspace * 14948383Skarels vmspace_alloc(min, max, pageable) 15048383Skarels vm_offset_t min, max; 15148383Skarels int pageable; 15248383Skarels { 15348383Skarels register struct vmspace *vm; 15448383Skarels 15548383Skarels MALLOC(vm, struct vmspace *, sizeof(struct vmspace), M_VMMAP, M_WAITOK); 15648383Skarels bzero(vm, (caddr_t) &vm->vm_startcopy - (caddr_t) vm); 15748383Skarels vm_map_init(&vm->vm_map, min, max, pageable); 15848383Skarels pmap_pinit(&vm->vm_pmap); 15948383Skarels vm->vm_map.pmap = &vm->vm_pmap; /* XXX */ 16048383Skarels vm->vm_refcnt = 1; 16148383Skarels return (vm); 16248383Skarels } 16348383Skarels 16448383Skarels void 16548383Skarels vmspace_free(vm) 16648383Skarels register struct vmspace *vm; 16748383Skarels { 16848383Skarels 16948383Skarels if (--vm->vm_refcnt == 0) { 17048383Skarels /* 17148383Skarels * Lock the map, to wait out all other references to it. 17248383Skarels * Delete all of the mappings and pages they hold, 17348383Skarels * then call the pmap module to reclaim anything left. 17448383Skarels */ 17548383Skarels vm_map_lock(&vm->vm_map); 17648383Skarels (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset, 17748383Skarels vm->vm_map.max_offset); 17848383Skarels pmap_release(&vm->vm_pmap); 17948383Skarels FREE(vm, M_VMMAP); 18048383Skarels } 18148383Skarels } 18248383Skarels 18348383Skarels /* 18445748Smckusick * vm_map_create: 18545748Smckusick * 18645748Smckusick * Creates and returns a new empty VM map with 18745748Smckusick * the given physical map structure, and having 18845748Smckusick * the given lower and upper address bounds. 18945748Smckusick */ 19045748Smckusick vm_map_t vm_map_create(pmap, min, max, pageable) 19145748Smckusick pmap_t pmap; 19245748Smckusick vm_offset_t min, max; 19345748Smckusick boolean_t pageable; 19445748Smckusick { 19545748Smckusick register vm_map_t result; 19645748Smckusick extern vm_map_t kernel_map, kmem_map; 19745748Smckusick 19848383Skarels if (kmem_map == NULL) { 19945748Smckusick result = kmap_free; 20045748Smckusick kmap_free = (vm_map_t) result->header.next; 20148383Skarels if (result == NULL) 20248383Skarels panic("vm_map_create: out of maps"); 20345748Smckusick } else 20445748Smckusick MALLOC(result, vm_map_t, sizeof(struct vm_map), 20545748Smckusick M_VMMAP, M_WAITOK); 20645748Smckusick 20748383Skarels vm_map_init(result, min, max, pageable); 20845748Smckusick result->pmap = pmap; 20945748Smckusick return(result); 21045748Smckusick } 21145748Smckusick 21245748Smckusick /* 21348383Skarels * Initialize an existing vm_map structure 21448383Skarels * such as that in the vmspace structure. 21548383Skarels * The pmap is set elsewhere. 21648383Skarels */ 21748383Skarels void 21848383Skarels vm_map_init(map, min, max, pageable) 21948383Skarels register struct vm_map *map; 22048383Skarels vm_offset_t min, max; 22148383Skarels boolean_t pageable; 22248383Skarels { 22348383Skarels map->header.next = map->header.prev = &map->header; 22448383Skarels map->nentries = 0; 22548383Skarels map->size = 0; 22648383Skarels map->ref_count = 1; 22748383Skarels map->is_main_map = TRUE; 22848383Skarels map->min_offset = min; 22948383Skarels map->max_offset = max; 23048383Skarels map->entries_pageable = pageable; 23148383Skarels map->first_free = &map->header; 23248383Skarels map->hint = &map->header; 23348383Skarels map->timestamp = 0; 23448383Skarels lock_init(&map->lock, TRUE); 23548383Skarels simple_lock_init(&map->ref_lock); 23648383Skarels simple_lock_init(&map->hint_lock); 23748383Skarels } 23848383Skarels 23948383Skarels /* 24045748Smckusick * vm_map_entry_create: [ internal use only ] 24145748Smckusick * 24245748Smckusick * Allocates a VM map entry for insertion. 24345748Smckusick * No entry fields are filled in. This routine is 24445748Smckusick */ 24545748Smckusick vm_map_entry_t vm_map_entry_create(map) 24645748Smckusick vm_map_t map; 24745748Smckusick { 24845748Smckusick vm_map_entry_t entry; 24945748Smckusick extern vm_map_t kernel_map, kmem_map, mb_map; 25045748Smckusick 25145748Smckusick if (map == kernel_map || map == kmem_map || map == mb_map) { 25245748Smckusick if (entry = kentry_free) 25345748Smckusick kentry_free = kentry_free->next; 25445748Smckusick } else 25545748Smckusick MALLOC(entry, vm_map_entry_t, sizeof(struct vm_map_entry), 25645748Smckusick M_VMMAPENT, M_WAITOK); 25748383Skarels if (entry == NULL) 25845748Smckusick panic("vm_map_entry_create: out of map entries"); 25945748Smckusick 26045748Smckusick return(entry); 26145748Smckusick } 26245748Smckusick 26345748Smckusick /* 26445748Smckusick * vm_map_entry_dispose: [ internal use only ] 26545748Smckusick * 26645748Smckusick * Inverse of vm_map_entry_create. 26745748Smckusick */ 26845748Smckusick void vm_map_entry_dispose(map, entry) 26945748Smckusick vm_map_t map; 27045748Smckusick vm_map_entry_t entry; 27145748Smckusick { 27245748Smckusick extern vm_map_t kernel_map, kmem_map, mb_map; 27345748Smckusick 27445748Smckusick if (map == kernel_map || map == kmem_map || map == mb_map) { 27545748Smckusick entry->next = kentry_free; 27645748Smckusick kentry_free = entry; 27745748Smckusick } else 27845748Smckusick FREE(entry, M_VMMAPENT); 27945748Smckusick } 28045748Smckusick 28145748Smckusick /* 28245748Smckusick * vm_map_entry_{un,}link: 28345748Smckusick * 28445748Smckusick * Insert/remove entries from maps. 28545748Smckusick */ 28645748Smckusick #define vm_map_entry_link(map, after_where, entry) \ 28745748Smckusick { \ 28845748Smckusick (map)->nentries++; \ 28945748Smckusick (entry)->prev = (after_where); \ 29045748Smckusick (entry)->next = (after_where)->next; \ 29145748Smckusick (entry)->prev->next = (entry); \ 29245748Smckusick (entry)->next->prev = (entry); \ 29345748Smckusick } 29445748Smckusick #define vm_map_entry_unlink(map, entry) \ 29545748Smckusick { \ 29645748Smckusick (map)->nentries--; \ 29745748Smckusick (entry)->next->prev = (entry)->prev; \ 29845748Smckusick (entry)->prev->next = (entry)->next; \ 29945748Smckusick } 30045748Smckusick 30145748Smckusick /* 30245748Smckusick * vm_map_reference: 30345748Smckusick * 30445748Smckusick * Creates another valid reference to the given map. 30545748Smckusick * 30645748Smckusick */ 30745748Smckusick void vm_map_reference(map) 30845748Smckusick register vm_map_t map; 30945748Smckusick { 31048383Skarels if (map == NULL) 31145748Smckusick return; 31245748Smckusick 31345748Smckusick simple_lock(&map->ref_lock); 31445748Smckusick map->ref_count++; 31545748Smckusick simple_unlock(&map->ref_lock); 31645748Smckusick } 31745748Smckusick 31845748Smckusick /* 31945748Smckusick * vm_map_deallocate: 32045748Smckusick * 32145748Smckusick * Removes a reference from the specified map, 32245748Smckusick * destroying it if no references remain. 32345748Smckusick * The map should not be locked. 32445748Smckusick */ 32545748Smckusick void vm_map_deallocate(map) 32645748Smckusick register vm_map_t map; 32745748Smckusick { 32845748Smckusick register int c; 32945748Smckusick 33048383Skarels if (map == NULL) 33145748Smckusick return; 33245748Smckusick 33345748Smckusick simple_lock(&map->ref_lock); 33445748Smckusick c = --map->ref_count; 33545748Smckusick simple_unlock(&map->ref_lock); 33645748Smckusick 33745748Smckusick if (c > 0) { 33845748Smckusick return; 33945748Smckusick } 34045748Smckusick 34145748Smckusick /* 34245748Smckusick * Lock the map, to wait out all other references 34345748Smckusick * to it. 34445748Smckusick */ 34545748Smckusick 34645748Smckusick vm_map_lock(map); 34745748Smckusick 34845748Smckusick (void) vm_map_delete(map, map->min_offset, map->max_offset); 34945748Smckusick 35045748Smckusick pmap_destroy(map->pmap); 35145748Smckusick 35245748Smckusick FREE(map, M_VMMAP); 35345748Smckusick } 35445748Smckusick 35545748Smckusick /* 35645748Smckusick * vm_map_insert: [ internal use only ] 35745748Smckusick * 35845748Smckusick * Inserts the given whole VM object into the target 35945748Smckusick * map at the specified address range. The object's 36045748Smckusick * size should match that of the address range. 36145748Smckusick * 36245748Smckusick * Requires that the map be locked, and leaves it so. 36345748Smckusick */ 36445748Smckusick vm_map_insert(map, object, offset, start, end) 36545748Smckusick vm_map_t map; 36645748Smckusick vm_object_t object; 36745748Smckusick vm_offset_t offset; 36845748Smckusick vm_offset_t start; 36945748Smckusick vm_offset_t end; 37045748Smckusick { 37145748Smckusick register vm_map_entry_t new_entry; 37245748Smckusick register vm_map_entry_t prev_entry; 37345748Smckusick vm_map_entry_t temp_entry; 37445748Smckusick 37545748Smckusick /* 37645748Smckusick * Check that the start and end points are not bogus. 37745748Smckusick */ 37845748Smckusick 37945748Smckusick if ((start < map->min_offset) || (end > map->max_offset) || 38045748Smckusick (start >= end)) 38145748Smckusick return(KERN_INVALID_ADDRESS); 38245748Smckusick 38345748Smckusick /* 38445748Smckusick * Find the entry prior to the proposed 38545748Smckusick * starting address; if it's part of an 38645748Smckusick * existing entry, this range is bogus. 38745748Smckusick */ 38845748Smckusick 38945748Smckusick if (vm_map_lookup_entry(map, start, &temp_entry)) 39045748Smckusick return(KERN_NO_SPACE); 39145748Smckusick 39245748Smckusick prev_entry = temp_entry; 39345748Smckusick 39445748Smckusick /* 39545748Smckusick * Assert that the next entry doesn't overlap the 39645748Smckusick * end point. 39745748Smckusick */ 39845748Smckusick 39945748Smckusick if ((prev_entry->next != &map->header) && 40045748Smckusick (prev_entry->next->start < end)) 40145748Smckusick return(KERN_NO_SPACE); 40245748Smckusick 40345748Smckusick /* 40445748Smckusick * See if we can avoid creating a new entry by 40545748Smckusick * extending one of our neighbors. 40645748Smckusick */ 40745748Smckusick 40848383Skarels if (object == NULL) { 40945748Smckusick if ((prev_entry != &map->header) && 41045748Smckusick (prev_entry->end == start) && 41145748Smckusick (map->is_main_map) && 41245748Smckusick (prev_entry->is_a_map == FALSE) && 41345748Smckusick (prev_entry->is_sub_map == FALSE) && 41445748Smckusick (prev_entry->inheritance == VM_INHERIT_DEFAULT) && 41545748Smckusick (prev_entry->protection == VM_PROT_DEFAULT) && 41645748Smckusick (prev_entry->max_protection == VM_PROT_DEFAULT) && 41745748Smckusick (prev_entry->wired_count == 0)) { 41845748Smckusick 41945748Smckusick if (vm_object_coalesce(prev_entry->object.vm_object, 42048383Skarels NULL, 42145748Smckusick prev_entry->offset, 42245748Smckusick (vm_offset_t) 0, 42345748Smckusick (vm_size_t)(prev_entry->end 42445748Smckusick - prev_entry->start), 42545748Smckusick (vm_size_t)(end - prev_entry->end))) { 42645748Smckusick /* 42745748Smckusick * Coalesced the two objects - can extend 42845748Smckusick * the previous map entry to include the 42945748Smckusick * new range. 43045748Smckusick */ 43145748Smckusick map->size += (end - prev_entry->end); 43245748Smckusick prev_entry->end = end; 43345748Smckusick return(KERN_SUCCESS); 43445748Smckusick } 43545748Smckusick } 43645748Smckusick } 43745748Smckusick 43845748Smckusick /* 43945748Smckusick * Create a new entry 44045748Smckusick */ 44145748Smckusick 44245748Smckusick new_entry = vm_map_entry_create(map); 44345748Smckusick new_entry->start = start; 44445748Smckusick new_entry->end = end; 44545748Smckusick 44645748Smckusick new_entry->is_a_map = FALSE; 44745748Smckusick new_entry->is_sub_map = FALSE; 44845748Smckusick new_entry->object.vm_object = object; 44945748Smckusick new_entry->offset = offset; 45045748Smckusick 45145748Smckusick new_entry->copy_on_write = FALSE; 45245748Smckusick new_entry->needs_copy = FALSE; 45345748Smckusick 45445748Smckusick if (map->is_main_map) { 45545748Smckusick new_entry->inheritance = VM_INHERIT_DEFAULT; 45645748Smckusick new_entry->protection = VM_PROT_DEFAULT; 45745748Smckusick new_entry->max_protection = VM_PROT_DEFAULT; 45845748Smckusick new_entry->wired_count = 0; 45945748Smckusick } 46045748Smckusick 46145748Smckusick /* 46245748Smckusick * Insert the new entry into the list 46345748Smckusick */ 46445748Smckusick 46545748Smckusick vm_map_entry_link(map, prev_entry, new_entry); 46645748Smckusick map->size += new_entry->end - new_entry->start; 46745748Smckusick 46845748Smckusick /* 46945748Smckusick * Update the free space hint 47045748Smckusick */ 47145748Smckusick 47245748Smckusick if ((map->first_free == prev_entry) && (prev_entry->end >= new_entry->start)) 47345748Smckusick map->first_free = new_entry; 47445748Smckusick 47545748Smckusick return(KERN_SUCCESS); 47645748Smckusick } 47745748Smckusick 47845748Smckusick /* 47945748Smckusick * SAVE_HINT: 48045748Smckusick * 48145748Smckusick * Saves the specified entry as the hint for 48245748Smckusick * future lookups. Performs necessary interlocks. 48345748Smckusick */ 48445748Smckusick #define SAVE_HINT(map,value) \ 48545748Smckusick simple_lock(&(map)->hint_lock); \ 48645748Smckusick (map)->hint = (value); \ 48745748Smckusick simple_unlock(&(map)->hint_lock); 48845748Smckusick 48945748Smckusick /* 49045748Smckusick * vm_map_lookup_entry: [ internal use only ] 49145748Smckusick * 49245748Smckusick * Finds the map entry containing (or 49345748Smckusick * immediately preceding) the specified address 49445748Smckusick * in the given map; the entry is returned 49545748Smckusick * in the "entry" parameter. The boolean 49645748Smckusick * result indicates whether the address is 49745748Smckusick * actually contained in the map. 49845748Smckusick */ 49945748Smckusick boolean_t vm_map_lookup_entry(map, address, entry) 50045748Smckusick register vm_map_t map; 50145748Smckusick register vm_offset_t address; 50245748Smckusick vm_map_entry_t *entry; /* OUT */ 50345748Smckusick { 50445748Smckusick register vm_map_entry_t cur; 50545748Smckusick register vm_map_entry_t last; 50645748Smckusick 50745748Smckusick /* 50845748Smckusick * Start looking either from the head of the 50945748Smckusick * list, or from the hint. 51045748Smckusick */ 51145748Smckusick 51245748Smckusick simple_lock(&map->hint_lock); 51345748Smckusick cur = map->hint; 51445748Smckusick simple_unlock(&map->hint_lock); 51545748Smckusick 51645748Smckusick if (cur == &map->header) 51745748Smckusick cur = cur->next; 51845748Smckusick 51945748Smckusick if (address >= cur->start) { 52045748Smckusick /* 52145748Smckusick * Go from hint to end of list. 52245748Smckusick * 52345748Smckusick * But first, make a quick check to see if 52445748Smckusick * we are already looking at the entry we 52545748Smckusick * want (which is usually the case). 52645748Smckusick * Note also that we don't need to save the hint 52745748Smckusick * here... it is the same hint (unless we are 52845748Smckusick * at the header, in which case the hint didn't 52945748Smckusick * buy us anything anyway). 53045748Smckusick */ 53145748Smckusick last = &map->header; 53245748Smckusick if ((cur != last) && (cur->end > address)) { 53345748Smckusick *entry = cur; 53445748Smckusick return(TRUE); 53545748Smckusick } 53645748Smckusick } 53745748Smckusick else { 53845748Smckusick /* 53945748Smckusick * Go from start to hint, *inclusively* 54045748Smckusick */ 54145748Smckusick last = cur->next; 54245748Smckusick cur = map->header.next; 54345748Smckusick } 54445748Smckusick 54545748Smckusick /* 54645748Smckusick * Search linearly 54745748Smckusick */ 54845748Smckusick 54945748Smckusick while (cur != last) { 55045748Smckusick if (cur->end > address) { 55145748Smckusick if (address >= cur->start) { 55245748Smckusick /* 55345748Smckusick * Save this lookup for future 55445748Smckusick * hints, and return 55545748Smckusick */ 55645748Smckusick 55745748Smckusick *entry = cur; 55845748Smckusick SAVE_HINT(map, cur); 55945748Smckusick return(TRUE); 56045748Smckusick } 56145748Smckusick break; 56245748Smckusick } 56345748Smckusick cur = cur->next; 56445748Smckusick } 56545748Smckusick *entry = cur->prev; 56645748Smckusick SAVE_HINT(map, *entry); 56745748Smckusick return(FALSE); 56845748Smckusick } 56945748Smckusick 57045748Smckusick /* 57145748Smckusick * vm_map_find finds an unallocated region in the target address 57245748Smckusick * map with the given length. The search is defined to be 57345748Smckusick * first-fit from the specified address; the region found is 57445748Smckusick * returned in the same parameter. 57545748Smckusick * 57645748Smckusick */ 57745748Smckusick vm_map_find(map, object, offset, addr, length, find_space) 57845748Smckusick vm_map_t map; 57945748Smckusick vm_object_t object; 58045748Smckusick vm_offset_t offset; 58145748Smckusick vm_offset_t *addr; /* IN/OUT */ 58245748Smckusick vm_size_t length; 58345748Smckusick boolean_t find_space; 58445748Smckusick { 58545748Smckusick register vm_map_entry_t entry; 58645748Smckusick register vm_offset_t start; 58745748Smckusick register vm_offset_t end; 58845748Smckusick int result; 58945748Smckusick 59045748Smckusick start = *addr; 59145748Smckusick 59245748Smckusick vm_map_lock(map); 59345748Smckusick 59445748Smckusick if (find_space) { 59545748Smckusick /* 59645748Smckusick * Calculate the first possible address. 59745748Smckusick */ 59845748Smckusick 59945748Smckusick if (start < map->min_offset) 60045748Smckusick start = map->min_offset; 60145748Smckusick if (start > map->max_offset) { 60245748Smckusick vm_map_unlock(map); 60345748Smckusick return (KERN_NO_SPACE); 60445748Smckusick } 60545748Smckusick 60645748Smckusick /* 60745748Smckusick * Look for the first possible address; 60845748Smckusick * if there's already something at this 60945748Smckusick * address, we have to start after it. 61045748Smckusick */ 61145748Smckusick 61245748Smckusick if (start == map->min_offset) { 61345748Smckusick if ((entry = map->first_free) != &map->header) 61445748Smckusick start = entry->end; 61545748Smckusick } else { 61645748Smckusick vm_map_entry_t tmp_entry; 61745748Smckusick if (vm_map_lookup_entry(map, start, &tmp_entry)) 61845748Smckusick start = tmp_entry->end; 61945748Smckusick entry = tmp_entry; 62045748Smckusick } 62145748Smckusick 62245748Smckusick /* 62345748Smckusick * In any case, the "entry" always precedes 62445748Smckusick * the proposed new region throughout the 62545748Smckusick * loop: 62645748Smckusick */ 62745748Smckusick 62845748Smckusick while (TRUE) { 62945748Smckusick register vm_map_entry_t next; 63045748Smckusick 63145748Smckusick /* 63245748Smckusick * Find the end of the proposed new region. 63345748Smckusick * Be sure we didn't go beyond the end, or 63445748Smckusick * wrap around the address. 63545748Smckusick */ 63645748Smckusick 63745748Smckusick end = start + length; 63845748Smckusick 63945748Smckusick if ((end > map->max_offset) || (end < start)) { 64045748Smckusick vm_map_unlock(map); 64145748Smckusick return (KERN_NO_SPACE); 64245748Smckusick } 64345748Smckusick 64445748Smckusick /* 64545748Smckusick * If there are no more entries, we must win. 64645748Smckusick */ 64745748Smckusick 64845748Smckusick next = entry->next; 64945748Smckusick if (next == &map->header) 65045748Smckusick break; 65145748Smckusick 65245748Smckusick /* 65345748Smckusick * If there is another entry, it must be 65445748Smckusick * after the end of the potential new region. 65545748Smckusick */ 65645748Smckusick 65745748Smckusick if (next->start >= end) 65845748Smckusick break; 65945748Smckusick 66045748Smckusick /* 66145748Smckusick * Didn't fit -- move to the next entry. 66245748Smckusick */ 66345748Smckusick 66445748Smckusick entry = next; 66545748Smckusick start = entry->end; 66645748Smckusick } 66745748Smckusick *addr = start; 66845748Smckusick 66945748Smckusick SAVE_HINT(map, entry); 67045748Smckusick } 67145748Smckusick 67245748Smckusick result = vm_map_insert(map, object, offset, start, start + length); 67345748Smckusick 67445748Smckusick vm_map_unlock(map); 67545748Smckusick return(result); 67645748Smckusick } 67745748Smckusick 67845748Smckusick /* 67945748Smckusick * vm_map_simplify_entry: [ internal use only ] 68045748Smckusick * 68145748Smckusick * Simplify the given map entry by: 68245748Smckusick * removing extra sharing maps 68345748Smckusick * [XXX maybe later] merging with a neighbor 68445748Smckusick */ 68545748Smckusick void vm_map_simplify_entry(map, entry) 68645748Smckusick vm_map_t map; 68745748Smckusick vm_map_entry_t entry; 68845748Smckusick { 68945748Smckusick #ifdef lint 69045748Smckusick map++; 69145748Smckusick #endif lint 69245748Smckusick 69345748Smckusick /* 69445748Smckusick * If this entry corresponds to a sharing map, then 69545748Smckusick * see if we can remove the level of indirection. 69645748Smckusick * If it's not a sharing map, then it points to 69745748Smckusick * a VM object, so see if we can merge with either 69845748Smckusick * of our neighbors. 69945748Smckusick */ 70045748Smckusick 70145748Smckusick if (entry->is_sub_map) 70245748Smckusick return; 70345748Smckusick if (entry->is_a_map) { 70445748Smckusick #if 0 70545748Smckusick vm_map_t my_share_map; 70645748Smckusick int count; 70745748Smckusick 70845748Smckusick my_share_map = entry->object.share_map; 70945748Smckusick simple_lock(&my_share_map->ref_lock); 71045748Smckusick count = my_share_map->ref_count; 71145748Smckusick simple_unlock(&my_share_map->ref_lock); 71245748Smckusick 71345748Smckusick if (count == 1) { 71445748Smckusick /* Can move the region from 71545748Smckusick * entry->start to entry->end (+ entry->offset) 71645748Smckusick * in my_share_map into place of entry. 71745748Smckusick * Later. 71845748Smckusick */ 71945748Smckusick } 72045748Smckusick #endif 0 72145748Smckusick } 72245748Smckusick else { 72345748Smckusick /* 72445748Smckusick * Try to merge with our neighbors. 72545748Smckusick * 72645748Smckusick * Conditions for merge are: 72745748Smckusick * 72845748Smckusick * 1. entries are adjacent. 72945748Smckusick * 2. both entries point to objects 73045748Smckusick * with null pagers. 73145748Smckusick * 73245748Smckusick * If a merge is possible, we replace the two 73345748Smckusick * entries with a single entry, then merge 73445748Smckusick * the two objects into a single object. 73545748Smckusick * 73645748Smckusick * Now, all that is left to do is write the 73745748Smckusick * code! 73845748Smckusick */ 73945748Smckusick } 74045748Smckusick } 74145748Smckusick 74245748Smckusick /* 74345748Smckusick * vm_map_clip_start: [ internal use only ] 74445748Smckusick * 74545748Smckusick * Asserts that the given entry begins at or after 74645748Smckusick * the specified address; if necessary, 74745748Smckusick * it splits the entry into two. 74845748Smckusick */ 74945748Smckusick #define vm_map_clip_start(map, entry, startaddr) \ 75045748Smckusick { \ 75145748Smckusick if (startaddr > entry->start) \ 75245748Smckusick _vm_map_clip_start(map, entry, startaddr); \ 75345748Smckusick } 75445748Smckusick 75545748Smckusick /* 75645748Smckusick * This routine is called only when it is known that 75745748Smckusick * the entry must be split. 75845748Smckusick */ 75945748Smckusick void _vm_map_clip_start(map, entry, start) 76045748Smckusick register vm_map_t map; 76145748Smckusick register vm_map_entry_t entry; 76245748Smckusick register vm_offset_t start; 76345748Smckusick { 76445748Smckusick register vm_map_entry_t new_entry; 76545748Smckusick 76645748Smckusick /* 76745748Smckusick * See if we can simplify this entry first 76845748Smckusick */ 76945748Smckusick 77045748Smckusick vm_map_simplify_entry(map, entry); 77145748Smckusick 77245748Smckusick /* 77345748Smckusick * Split off the front portion -- 77445748Smckusick * note that we must insert the new 77545748Smckusick * entry BEFORE this one, so that 77645748Smckusick * this entry has the specified starting 77745748Smckusick * address. 77845748Smckusick */ 77945748Smckusick 78045748Smckusick new_entry = vm_map_entry_create(map); 78145748Smckusick *new_entry = *entry; 78245748Smckusick 78345748Smckusick new_entry->end = start; 78445748Smckusick entry->offset += (start - entry->start); 78545748Smckusick entry->start = start; 78645748Smckusick 78745748Smckusick vm_map_entry_link(map, entry->prev, new_entry); 78845748Smckusick 78945748Smckusick if (entry->is_a_map || entry->is_sub_map) 79045748Smckusick vm_map_reference(new_entry->object.share_map); 79145748Smckusick else 79245748Smckusick vm_object_reference(new_entry->object.vm_object); 79345748Smckusick } 79445748Smckusick 79545748Smckusick /* 79645748Smckusick * vm_map_clip_end: [ internal use only ] 79745748Smckusick * 79845748Smckusick * Asserts that the given entry ends at or before 79945748Smckusick * the specified address; if necessary, 80045748Smckusick * it splits the entry into two. 80145748Smckusick */ 80245748Smckusick 80345748Smckusick void _vm_map_clip_end(); 80445748Smckusick #define vm_map_clip_end(map, entry, endaddr) \ 80545748Smckusick { \ 80645748Smckusick if (endaddr < entry->end) \ 80745748Smckusick _vm_map_clip_end(map, entry, endaddr); \ 80845748Smckusick } 80945748Smckusick 81045748Smckusick /* 81145748Smckusick * This routine is called only when it is known that 81245748Smckusick * the entry must be split. 81345748Smckusick */ 81445748Smckusick void _vm_map_clip_end(map, entry, end) 81545748Smckusick register vm_map_t map; 81645748Smckusick register vm_map_entry_t entry; 81745748Smckusick register vm_offset_t end; 81845748Smckusick { 81945748Smckusick register vm_map_entry_t new_entry; 82045748Smckusick 82145748Smckusick /* 82245748Smckusick * Create a new entry and insert it 82345748Smckusick * AFTER the specified entry 82445748Smckusick */ 82545748Smckusick 82645748Smckusick new_entry = vm_map_entry_create(map); 82745748Smckusick *new_entry = *entry; 82845748Smckusick 82945748Smckusick new_entry->start = entry->end = end; 83045748Smckusick new_entry->offset += (end - entry->start); 83145748Smckusick 83245748Smckusick vm_map_entry_link(map, entry, new_entry); 83345748Smckusick 83445748Smckusick if (entry->is_a_map || entry->is_sub_map) 83545748Smckusick vm_map_reference(new_entry->object.share_map); 83645748Smckusick else 83745748Smckusick vm_object_reference(new_entry->object.vm_object); 83845748Smckusick } 83945748Smckusick 84045748Smckusick /* 84145748Smckusick * VM_MAP_RANGE_CHECK: [ internal use only ] 84245748Smckusick * 84345748Smckusick * Asserts that the starting and ending region 84445748Smckusick * addresses fall within the valid range of the map. 84545748Smckusick */ 84645748Smckusick #define VM_MAP_RANGE_CHECK(map, start, end) \ 84745748Smckusick { \ 84845748Smckusick if (start < vm_map_min(map)) \ 84945748Smckusick start = vm_map_min(map); \ 85045748Smckusick if (end > vm_map_max(map)) \ 85145748Smckusick end = vm_map_max(map); \ 85245748Smckusick if (start > end) \ 85345748Smckusick start = end; \ 85445748Smckusick } 85545748Smckusick 85645748Smckusick /* 85745748Smckusick * vm_map_submap: [ kernel use only ] 85845748Smckusick * 85945748Smckusick * Mark the given range as handled by a subordinate map. 86045748Smckusick * 86145748Smckusick * This range must have been created with vm_map_find, 86245748Smckusick * and no other operations may have been performed on this 86345748Smckusick * range prior to calling vm_map_submap. 86445748Smckusick * 86545748Smckusick * Only a limited number of operations can be performed 86645748Smckusick * within this rage after calling vm_map_submap: 86745748Smckusick * vm_fault 86845748Smckusick * [Don't try vm_map_copy!] 86945748Smckusick * 87045748Smckusick * To remove a submapping, one must first remove the 87145748Smckusick * range from the superior map, and then destroy the 87245748Smckusick * submap (if desired). [Better yet, don't try it.] 87345748Smckusick */ 87445748Smckusick vm_map_submap(map, start, end, submap) 87545748Smckusick register vm_map_t map; 87645748Smckusick register vm_offset_t start; 87745748Smckusick register vm_offset_t end; 87845748Smckusick vm_map_t submap; 87945748Smckusick { 88045748Smckusick vm_map_entry_t entry; 88145748Smckusick register int result = KERN_INVALID_ARGUMENT; 88245748Smckusick 88345748Smckusick vm_map_lock(map); 88445748Smckusick 88545748Smckusick VM_MAP_RANGE_CHECK(map, start, end); 88645748Smckusick 88745748Smckusick if (vm_map_lookup_entry(map, start, &entry)) { 88845748Smckusick vm_map_clip_start(map, entry, start); 88945748Smckusick } 89045748Smckusick else 89145748Smckusick entry = entry->next; 89245748Smckusick 89345748Smckusick vm_map_clip_end(map, entry, end); 89445748Smckusick 89545748Smckusick if ((entry->start == start) && (entry->end == end) && 89645748Smckusick (!entry->is_a_map) && 89748383Skarels (entry->object.vm_object == NULL) && 89845748Smckusick (!entry->copy_on_write)) { 89945748Smckusick entry->is_a_map = FALSE; 90045748Smckusick entry->is_sub_map = TRUE; 90145748Smckusick vm_map_reference(entry->object.sub_map = submap); 90245748Smckusick result = KERN_SUCCESS; 90345748Smckusick } 90445748Smckusick vm_map_unlock(map); 90545748Smckusick 90645748Smckusick return(result); 90745748Smckusick } 90845748Smckusick 90945748Smckusick /* 91045748Smckusick * vm_map_protect: 91145748Smckusick * 91245748Smckusick * Sets the protection of the specified address 91345748Smckusick * region in the target map. If "set_max" is 91445748Smckusick * specified, the maximum protection is to be set; 91545748Smckusick * otherwise, only the current protection is affected. 91645748Smckusick */ 91745748Smckusick vm_map_protect(map, start, end, new_prot, set_max) 91845748Smckusick register vm_map_t map; 91945748Smckusick register vm_offset_t start; 92045748Smckusick register vm_offset_t end; 92145748Smckusick register vm_prot_t new_prot; 92245748Smckusick register boolean_t set_max; 92345748Smckusick { 92445748Smckusick register vm_map_entry_t current; 92545748Smckusick vm_map_entry_t entry; 92645748Smckusick 92745748Smckusick vm_map_lock(map); 92845748Smckusick 92945748Smckusick VM_MAP_RANGE_CHECK(map, start, end); 93045748Smckusick 93145748Smckusick if (vm_map_lookup_entry(map, start, &entry)) { 93245748Smckusick vm_map_clip_start(map, entry, start); 93345748Smckusick } 93445748Smckusick else 93545748Smckusick entry = entry->next; 93645748Smckusick 93745748Smckusick /* 93845748Smckusick * Make a first pass to check for protection 93945748Smckusick * violations. 94045748Smckusick */ 94145748Smckusick 94245748Smckusick current = entry; 94345748Smckusick while ((current != &map->header) && (current->start < end)) { 94445748Smckusick if (current->is_sub_map) 94545748Smckusick return(KERN_INVALID_ARGUMENT); 94645748Smckusick if ((new_prot & current->max_protection) != new_prot) { 94745748Smckusick vm_map_unlock(map); 94845748Smckusick return(KERN_PROTECTION_FAILURE); 94945748Smckusick } 95045748Smckusick 95145748Smckusick current = current->next; 95245748Smckusick } 95345748Smckusick 95445748Smckusick /* 95545748Smckusick * Go back and fix up protections. 95645748Smckusick * [Note that clipping is not necessary the second time.] 95745748Smckusick */ 95845748Smckusick 95945748Smckusick current = entry; 96045748Smckusick 96145748Smckusick while ((current != &map->header) && (current->start < end)) { 96245748Smckusick vm_prot_t old_prot; 96345748Smckusick 96445748Smckusick vm_map_clip_end(map, current, end); 96545748Smckusick 96645748Smckusick old_prot = current->protection; 96745748Smckusick if (set_max) 96845748Smckusick current->protection = 96945748Smckusick (current->max_protection = new_prot) & 97045748Smckusick old_prot; 97145748Smckusick else 97245748Smckusick current->protection = new_prot; 97345748Smckusick 97445748Smckusick /* 97545748Smckusick * Update physical map if necessary. 97645748Smckusick * Worry about copy-on-write here -- CHECK THIS XXX 97745748Smckusick */ 97845748Smckusick 97945748Smckusick if (current->protection != old_prot) { 98045748Smckusick 98145748Smckusick #define MASK(entry) ((entry)->copy_on_write ? ~VM_PROT_WRITE : \ 98245748Smckusick VM_PROT_ALL) 98345748Smckusick #define max(a,b) ((a) > (b) ? (a) : (b)) 98445748Smckusick 98545748Smckusick if (current->is_a_map) { 98645748Smckusick vm_map_entry_t share_entry; 98745748Smckusick vm_offset_t share_end; 98845748Smckusick 98945748Smckusick vm_map_lock(current->object.share_map); 99045748Smckusick (void) vm_map_lookup_entry( 99145748Smckusick current->object.share_map, 99245748Smckusick current->offset, 99345748Smckusick &share_entry); 99445748Smckusick share_end = current->offset + 99545748Smckusick (current->end - current->start); 99645748Smckusick while ((share_entry != 99745748Smckusick ¤t->object.share_map->header) && 99845748Smckusick (share_entry->start < share_end)) { 99945748Smckusick 100045748Smckusick pmap_protect(map->pmap, 100145748Smckusick (max(share_entry->start, 100245748Smckusick current->offset) - 100345748Smckusick current->offset + 100445748Smckusick current->start), 100545748Smckusick min(share_entry->end, 100645748Smckusick share_end) - 100745748Smckusick current->offset + 100845748Smckusick current->start, 100945748Smckusick current->protection & 101045748Smckusick MASK(share_entry)); 101145748Smckusick 101245748Smckusick share_entry = share_entry->next; 101345748Smckusick } 101445748Smckusick vm_map_unlock(current->object.share_map); 101545748Smckusick } 101645748Smckusick else 101745748Smckusick pmap_protect(map->pmap, current->start, 101845748Smckusick current->end, 101945748Smckusick current->protection & MASK(entry)); 102045748Smckusick #undef max 102145748Smckusick #undef MASK 102245748Smckusick } 102345748Smckusick current = current->next; 102445748Smckusick } 102545748Smckusick 102645748Smckusick vm_map_unlock(map); 102745748Smckusick return(KERN_SUCCESS); 102845748Smckusick } 102945748Smckusick 103045748Smckusick /* 103145748Smckusick * vm_map_inherit: 103245748Smckusick * 103345748Smckusick * Sets the inheritance of the specified address 103445748Smckusick * range in the target map. Inheritance 103545748Smckusick * affects how the map will be shared with 103645748Smckusick * child maps at the time of vm_map_fork. 103745748Smckusick */ 103845748Smckusick vm_map_inherit(map, start, end, new_inheritance) 103945748Smckusick register vm_map_t map; 104045748Smckusick register vm_offset_t start; 104145748Smckusick register vm_offset_t end; 104245748Smckusick register vm_inherit_t new_inheritance; 104345748Smckusick { 104445748Smckusick register vm_map_entry_t entry; 104545748Smckusick vm_map_entry_t temp_entry; 104645748Smckusick 104745748Smckusick switch (new_inheritance) { 104845748Smckusick case VM_INHERIT_NONE: 104945748Smckusick case VM_INHERIT_COPY: 105045748Smckusick case VM_INHERIT_SHARE: 105145748Smckusick break; 105245748Smckusick default: 105345748Smckusick return(KERN_INVALID_ARGUMENT); 105445748Smckusick } 105545748Smckusick 105645748Smckusick vm_map_lock(map); 105745748Smckusick 105845748Smckusick VM_MAP_RANGE_CHECK(map, start, end); 105945748Smckusick 106045748Smckusick if (vm_map_lookup_entry(map, start, &temp_entry)) { 106145748Smckusick entry = temp_entry; 106245748Smckusick vm_map_clip_start(map, entry, start); 106345748Smckusick } 106445748Smckusick else 106545748Smckusick entry = temp_entry->next; 106645748Smckusick 106745748Smckusick while ((entry != &map->header) && (entry->start < end)) { 106845748Smckusick vm_map_clip_end(map, entry, end); 106945748Smckusick 107045748Smckusick entry->inheritance = new_inheritance; 107145748Smckusick 107245748Smckusick entry = entry->next; 107345748Smckusick } 107445748Smckusick 107545748Smckusick vm_map_unlock(map); 107645748Smckusick return(KERN_SUCCESS); 107745748Smckusick } 107845748Smckusick 107945748Smckusick /* 108045748Smckusick * vm_map_pageable: 108145748Smckusick * 108245748Smckusick * Sets the pageability of the specified address 108345748Smckusick * range in the target map. Regions specified 108445748Smckusick * as not pageable require locked-down physical 108545748Smckusick * memory and physical page maps. 108645748Smckusick * 108745748Smckusick * The map must not be locked, but a reference 108845748Smckusick * must remain to the map throughout the call. 108945748Smckusick */ 109045748Smckusick vm_map_pageable(map, start, end, new_pageable) 109145748Smckusick register vm_map_t map; 109245748Smckusick register vm_offset_t start; 109345748Smckusick register vm_offset_t end; 109445748Smckusick register boolean_t new_pageable; 109545748Smckusick { 109645748Smckusick register vm_map_entry_t entry; 109745748Smckusick vm_map_entry_t temp_entry; 109845748Smckusick 109945748Smckusick vm_map_lock(map); 110045748Smckusick 110145748Smckusick VM_MAP_RANGE_CHECK(map, start, end); 110245748Smckusick 110345748Smckusick /* 110445748Smckusick * Only one pageability change may take place at one 110545748Smckusick * time, since vm_fault assumes it will be called 110645748Smckusick * only once for each wiring/unwiring. Therefore, we 110745748Smckusick * have to make sure we're actually changing the pageability 110845748Smckusick * for the entire region. We do so before making any changes. 110945748Smckusick */ 111045748Smckusick 111145748Smckusick if (vm_map_lookup_entry(map, start, &temp_entry)) { 111245748Smckusick entry = temp_entry; 111345748Smckusick vm_map_clip_start(map, entry, start); 111445748Smckusick } 111545748Smckusick else 111645748Smckusick entry = temp_entry->next; 111745748Smckusick temp_entry = entry; 111845748Smckusick 111945748Smckusick /* 112045748Smckusick * Actions are rather different for wiring and unwiring, 112145748Smckusick * so we have two separate cases. 112245748Smckusick */ 112345748Smckusick 112445748Smckusick if (new_pageable) { 112545748Smckusick 112645748Smckusick /* 112745748Smckusick * Unwiring. First ensure that the range to be 112845748Smckusick * unwired is really wired down. 112945748Smckusick */ 113045748Smckusick while ((entry != &map->header) && (entry->start < end)) { 113145748Smckusick 113245748Smckusick if (entry->wired_count == 0) { 113345748Smckusick vm_map_unlock(map); 113445748Smckusick return(KERN_INVALID_ARGUMENT); 113545748Smckusick } 113645748Smckusick entry = entry->next; 113745748Smckusick } 113845748Smckusick 113945748Smckusick /* 114045748Smckusick * Now decrement the wiring count for each region. 114145748Smckusick * If a region becomes completely unwired, 114245748Smckusick * unwire its physical pages and mappings. 114345748Smckusick */ 114445748Smckusick lock_set_recursive(&map->lock); 114545748Smckusick 114645748Smckusick entry = temp_entry; 114745748Smckusick while ((entry != &map->header) && (entry->start < end)) { 114845748Smckusick vm_map_clip_end(map, entry, end); 114945748Smckusick 115045748Smckusick entry->wired_count--; 115145748Smckusick if (entry->wired_count == 0) 115245748Smckusick vm_fault_unwire(map, entry->start, entry->end); 115345748Smckusick 115445748Smckusick entry = entry->next; 115545748Smckusick } 115645748Smckusick lock_clear_recursive(&map->lock); 115745748Smckusick } 115845748Smckusick 115945748Smckusick else { 116045748Smckusick /* 116145748Smckusick * Wiring. We must do this in two passes: 116245748Smckusick * 116345748Smckusick * 1. Holding the write lock, we increment the 116445748Smckusick * wiring count. For any area that is not already 116545748Smckusick * wired, we create any shadow objects that need 116645748Smckusick * to be created. 116745748Smckusick * 116845748Smckusick * 2. We downgrade to a read lock, and call 116945748Smckusick * vm_fault_wire to fault in the pages for any 117045748Smckusick * newly wired area (wired_count is 1). 117145748Smckusick * 117245748Smckusick * Downgrading to a read lock for vm_fault_wire avoids 117345748Smckusick * a possible deadlock with another thread that may have 117445748Smckusick * faulted on one of the pages to be wired (it would mark 117545748Smckusick * the page busy, blocking us, then in turn block on the 117645748Smckusick * map lock that we hold). Because of problems in the 117745748Smckusick * recursive lock package, we cannot upgrade to a write 117845748Smckusick * lock in vm_map_lookup. Thus, any actions that require 117945748Smckusick * the write lock must be done beforehand. Because we 118045748Smckusick * keep the read lock on the map, the copy-on-write status 118145748Smckusick * of the entries we modify here cannot change. 118245748Smckusick */ 118345748Smckusick 118445748Smckusick /* 118545748Smckusick * Pass 1. 118645748Smckusick */ 118745748Smckusick entry = temp_entry; 118845748Smckusick while ((entry != &map->header) && (entry->start < end)) { 118945748Smckusick vm_map_clip_end(map, entry, end); 119045748Smckusick 119145748Smckusick entry->wired_count++; 119245748Smckusick if (entry->wired_count == 1) { 119345748Smckusick 119445748Smckusick /* 119545748Smckusick * Perform actions of vm_map_lookup that need 119645748Smckusick * the write lock on the map: create a shadow 119745748Smckusick * object for a copy-on-write region, or an 119845748Smckusick * object for a zero-fill region. 119945748Smckusick * 120045748Smckusick * We don't have to do this for entries that 120145748Smckusick * point to sharing maps, because we won't hold 120245748Smckusick * the lock on the sharing map. 120345748Smckusick */ 120445748Smckusick if (!entry->is_a_map) { 120545748Smckusick if (entry->needs_copy && 120645748Smckusick ((entry->protection & VM_PROT_WRITE) != 0)) { 120745748Smckusick 120845748Smckusick vm_object_shadow(&entry->object.vm_object, 120945748Smckusick &entry->offset, 121045748Smckusick (vm_size_t)(entry->end 121145748Smckusick - entry->start)); 121245748Smckusick entry->needs_copy = FALSE; 121345748Smckusick } 121448383Skarels else if (entry->object.vm_object == NULL) { 121545748Smckusick entry->object.vm_object = 121645748Smckusick vm_object_allocate((vm_size_t)(entry->end 121745748Smckusick - entry->start)); 121845748Smckusick entry->offset = (vm_offset_t)0; 121945748Smckusick } 122045748Smckusick } 122145748Smckusick } 122245748Smckusick 122345748Smckusick entry = entry->next; 122445748Smckusick } 122545748Smckusick 122645748Smckusick /* 122745748Smckusick * Pass 2. 122845748Smckusick */ 122945748Smckusick 123045748Smckusick /* 123145748Smckusick * HACK HACK HACK HACK 123245748Smckusick * 123345748Smckusick * If we are wiring in the kernel map or a submap of it, 123445748Smckusick * unlock the map to avoid deadlocks. We trust that the 123545748Smckusick * kernel threads are well-behaved, and therefore will 123645748Smckusick * not do anything destructive to this region of the map 123745748Smckusick * while we have it unlocked. We cannot trust user threads 123845748Smckusick * to do the same. 123945748Smckusick * 124045748Smckusick * HACK HACK HACK HACK 124145748Smckusick */ 124245748Smckusick if (vm_map_pmap(map) == kernel_pmap) { 124345748Smckusick vm_map_unlock(map); /* trust me ... */ 124445748Smckusick } 124545748Smckusick else { 124645748Smckusick lock_set_recursive(&map->lock); 124745748Smckusick lock_write_to_read(&map->lock); 124845748Smckusick } 124945748Smckusick 125045748Smckusick entry = temp_entry; 125145748Smckusick while (entry != &map->header && entry->start < end) { 125245748Smckusick if (entry->wired_count == 1) { 125345748Smckusick vm_fault_wire(map, entry->start, entry->end); 125445748Smckusick } 125545748Smckusick entry = entry->next; 125645748Smckusick } 125745748Smckusick 125845748Smckusick if (vm_map_pmap(map) == kernel_pmap) { 125945748Smckusick vm_map_lock(map); 126045748Smckusick } 126145748Smckusick else { 126245748Smckusick lock_clear_recursive(&map->lock); 126345748Smckusick } 126445748Smckusick } 126545748Smckusick 126645748Smckusick vm_map_unlock(map); 126745748Smckusick 126845748Smckusick return(KERN_SUCCESS); 126945748Smckusick } 127045748Smckusick 127145748Smckusick /* 127245748Smckusick * vm_map_entry_unwire: [ internal use only ] 127345748Smckusick * 127445748Smckusick * Make the region specified by this entry pageable. 127545748Smckusick * 127645748Smckusick * The map in question should be locked. 127745748Smckusick * [This is the reason for this routine's existence.] 127845748Smckusick */ 127945748Smckusick void vm_map_entry_unwire(map, entry) 128045748Smckusick vm_map_t map; 128145748Smckusick register vm_map_entry_t entry; 128245748Smckusick { 128345748Smckusick vm_fault_unwire(map, entry->start, entry->end); 128445748Smckusick entry->wired_count = 0; 128545748Smckusick } 128645748Smckusick 128745748Smckusick /* 128845748Smckusick * vm_map_entry_delete: [ internal use only ] 128945748Smckusick * 129045748Smckusick * Deallocate the given entry from the target map. 129145748Smckusick */ 129245748Smckusick void vm_map_entry_delete(map, entry) 129345748Smckusick register vm_map_t map; 129445748Smckusick register vm_map_entry_t entry; 129545748Smckusick { 129645748Smckusick if (entry->wired_count != 0) 129745748Smckusick vm_map_entry_unwire(map, entry); 129845748Smckusick 129945748Smckusick vm_map_entry_unlink(map, entry); 130045748Smckusick map->size -= entry->end - entry->start; 130145748Smckusick 130245748Smckusick if (entry->is_a_map || entry->is_sub_map) 130345748Smckusick vm_map_deallocate(entry->object.share_map); 130445748Smckusick else 130545748Smckusick vm_object_deallocate(entry->object.vm_object); 130645748Smckusick 130745748Smckusick vm_map_entry_dispose(map, entry); 130845748Smckusick } 130945748Smckusick 131045748Smckusick /* 131145748Smckusick * vm_map_delete: [ internal use only ] 131245748Smckusick * 131345748Smckusick * Deallocates the given address range from the target 131445748Smckusick * map. 131545748Smckusick * 131645748Smckusick * When called with a sharing map, removes pages from 131745748Smckusick * that region from all physical maps. 131845748Smckusick */ 131945748Smckusick vm_map_delete(map, start, end) 132045748Smckusick register vm_map_t map; 132145748Smckusick vm_offset_t start; 132245748Smckusick register vm_offset_t end; 132345748Smckusick { 132445748Smckusick register vm_map_entry_t entry; 132545748Smckusick vm_map_entry_t first_entry; 132645748Smckusick 132745748Smckusick /* 132845748Smckusick * Find the start of the region, and clip it 132945748Smckusick */ 133045748Smckusick 133145748Smckusick if (!vm_map_lookup_entry(map, start, &first_entry)) 133245748Smckusick entry = first_entry->next; 133345748Smckusick else { 133445748Smckusick entry = first_entry; 133545748Smckusick vm_map_clip_start(map, entry, start); 133645748Smckusick 133745748Smckusick /* 133845748Smckusick * Fix the lookup hint now, rather than each 133945748Smckusick * time though the loop. 134045748Smckusick */ 134145748Smckusick 134245748Smckusick SAVE_HINT(map, entry->prev); 134345748Smckusick } 134445748Smckusick 134545748Smckusick /* 134645748Smckusick * Save the free space hint 134745748Smckusick */ 134845748Smckusick 134945748Smckusick if (map->first_free->start >= start) 135045748Smckusick map->first_free = entry->prev; 135145748Smckusick 135245748Smckusick /* 135345748Smckusick * Step through all entries in this region 135445748Smckusick */ 135545748Smckusick 135645748Smckusick while ((entry != &map->header) && (entry->start < end)) { 135745748Smckusick vm_map_entry_t next; 135845748Smckusick register vm_offset_t s, e; 135945748Smckusick register vm_object_t object; 136045748Smckusick 136145748Smckusick vm_map_clip_end(map, entry, end); 136245748Smckusick 136345748Smckusick next = entry->next; 136445748Smckusick s = entry->start; 136545748Smckusick e = entry->end; 136645748Smckusick 136745748Smckusick /* 136845748Smckusick * Unwire before removing addresses from the pmap; 136945748Smckusick * otherwise, unwiring will put the entries back in 137045748Smckusick * the pmap. 137145748Smckusick */ 137245748Smckusick 137345748Smckusick object = entry->object.vm_object; 137445748Smckusick if (entry->wired_count != 0) 137545748Smckusick vm_map_entry_unwire(map, entry); 137645748Smckusick 137745748Smckusick /* 137845748Smckusick * If this is a sharing map, we must remove 137945748Smckusick * *all* references to this data, since we can't 138045748Smckusick * find all of the physical maps which are sharing 138145748Smckusick * it. 138245748Smckusick */ 138345748Smckusick 138445748Smckusick if (object == kernel_object || object == kmem_object) 138545748Smckusick vm_object_page_remove(object, entry->offset, 138645748Smckusick entry->offset + (e - s)); 138745748Smckusick else if (!map->is_main_map) 138845748Smckusick vm_object_pmap_remove(object, 138945748Smckusick entry->offset, 139045748Smckusick entry->offset + (e - s)); 139145748Smckusick else 139245748Smckusick pmap_remove(map->pmap, s, e); 139345748Smckusick 139445748Smckusick /* 139545748Smckusick * Delete the entry (which may delete the object) 139645748Smckusick * only after removing all pmap entries pointing 139745748Smckusick * to its pages. (Otherwise, its page frames may 139845748Smckusick * be reallocated, and any modify bits will be 139945748Smckusick * set in the wrong object!) 140045748Smckusick */ 140145748Smckusick 140245748Smckusick vm_map_entry_delete(map, entry); 140345748Smckusick entry = next; 140445748Smckusick } 140545748Smckusick return(KERN_SUCCESS); 140645748Smckusick } 140745748Smckusick 140845748Smckusick /* 140945748Smckusick * vm_map_remove: 141045748Smckusick * 141145748Smckusick * Remove the given address range from the target map. 141245748Smckusick * This is the exported form of vm_map_delete. 141345748Smckusick */ 141445748Smckusick vm_map_remove(map, start, end) 141545748Smckusick register vm_map_t map; 141645748Smckusick register vm_offset_t start; 141745748Smckusick register vm_offset_t end; 141845748Smckusick { 141945748Smckusick register int result; 142045748Smckusick 142145748Smckusick vm_map_lock(map); 142245748Smckusick VM_MAP_RANGE_CHECK(map, start, end); 142345748Smckusick result = vm_map_delete(map, start, end); 142445748Smckusick vm_map_unlock(map); 142545748Smckusick 142645748Smckusick return(result); 142745748Smckusick } 142845748Smckusick 142945748Smckusick /* 143045748Smckusick * vm_map_check_protection: 143145748Smckusick * 143245748Smckusick * Assert that the target map allows the specified 143345748Smckusick * privilege on the entire address region given. 143445748Smckusick * The entire region must be allocated. 143545748Smckusick */ 143645748Smckusick boolean_t vm_map_check_protection(map, start, end, protection) 143745748Smckusick register vm_map_t map; 143845748Smckusick register vm_offset_t start; 143945748Smckusick register vm_offset_t end; 144045748Smckusick register vm_prot_t protection; 144145748Smckusick { 144245748Smckusick register vm_map_entry_t entry; 144345748Smckusick vm_map_entry_t tmp_entry; 144445748Smckusick 144545748Smckusick if (!vm_map_lookup_entry(map, start, &tmp_entry)) { 144645748Smckusick return(FALSE); 144745748Smckusick } 144845748Smckusick 144945748Smckusick entry = tmp_entry; 145045748Smckusick 145145748Smckusick while (start < end) { 145245748Smckusick if (entry == &map->header) { 145345748Smckusick return(FALSE); 145445748Smckusick } 145545748Smckusick 145645748Smckusick /* 145745748Smckusick * No holes allowed! 145845748Smckusick */ 145945748Smckusick 146045748Smckusick if (start < entry->start) { 146145748Smckusick return(FALSE); 146245748Smckusick } 146345748Smckusick 146445748Smckusick /* 146545748Smckusick * Check protection associated with entry. 146645748Smckusick */ 146745748Smckusick 146845748Smckusick if ((entry->protection & protection) != protection) { 146945748Smckusick return(FALSE); 147045748Smckusick } 147145748Smckusick 147245748Smckusick /* go to next entry */ 147345748Smckusick 147445748Smckusick start = entry->end; 147545748Smckusick entry = entry->next; 147645748Smckusick } 147745748Smckusick return(TRUE); 147845748Smckusick } 147945748Smckusick 148045748Smckusick /* 148145748Smckusick * vm_map_copy_entry: 148245748Smckusick * 148345748Smckusick * Copies the contents of the source entry to the destination 148445748Smckusick * entry. The entries *must* be aligned properly. 148545748Smckusick */ 148645748Smckusick void vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry) 148745748Smckusick vm_map_t src_map, dst_map; 148845748Smckusick register vm_map_entry_t src_entry, dst_entry; 148945748Smckusick { 149045748Smckusick vm_object_t temp_object; 149145748Smckusick 149245748Smckusick if (src_entry->is_sub_map || dst_entry->is_sub_map) 149345748Smckusick return; 149445748Smckusick 149548383Skarels if (dst_entry->object.vm_object != NULL && 149645748Smckusick !dst_entry->object.vm_object->internal) 149745748Smckusick printf("vm_map_copy_entry: copying over permanent data!\n"); 149845748Smckusick 149945748Smckusick /* 150045748Smckusick * If our destination map was wired down, 150145748Smckusick * unwire it now. 150245748Smckusick */ 150345748Smckusick 150445748Smckusick if (dst_entry->wired_count != 0) 150545748Smckusick vm_map_entry_unwire(dst_map, dst_entry); 150645748Smckusick 150745748Smckusick /* 150845748Smckusick * If we're dealing with a sharing map, we 150945748Smckusick * must remove the destination pages from 151045748Smckusick * all maps (since we cannot know which maps 151145748Smckusick * this sharing map belongs in). 151245748Smckusick */ 151345748Smckusick 151445748Smckusick if (dst_map->is_main_map) 151545748Smckusick pmap_remove(dst_map->pmap, dst_entry->start, dst_entry->end); 151645748Smckusick else 151745748Smckusick vm_object_pmap_remove(dst_entry->object.vm_object, 151845748Smckusick dst_entry->offset, 151945748Smckusick dst_entry->offset + 152045748Smckusick (dst_entry->end - dst_entry->start)); 152145748Smckusick 152245748Smckusick if (src_entry->wired_count == 0) { 152345748Smckusick 152445748Smckusick boolean_t src_needs_copy; 152545748Smckusick 152645748Smckusick /* 152745748Smckusick * If the source entry is marked needs_copy, 152845748Smckusick * it is already write-protected. 152945748Smckusick */ 153045748Smckusick if (!src_entry->needs_copy) { 153145748Smckusick 153245748Smckusick boolean_t su; 153345748Smckusick 153445748Smckusick /* 153545748Smckusick * If the source entry has only one mapping, 153645748Smckusick * we can just protect the virtual address 153745748Smckusick * range. 153845748Smckusick */ 153945748Smckusick if (!(su = src_map->is_main_map)) { 154045748Smckusick simple_lock(&src_map->ref_lock); 154145748Smckusick su = (src_map->ref_count == 1); 154245748Smckusick simple_unlock(&src_map->ref_lock); 154345748Smckusick } 154445748Smckusick 154545748Smckusick if (su) { 154645748Smckusick pmap_protect(src_map->pmap, 154745748Smckusick src_entry->start, 154845748Smckusick src_entry->end, 154945748Smckusick src_entry->protection & ~VM_PROT_WRITE); 155045748Smckusick } 155145748Smckusick else { 155245748Smckusick vm_object_pmap_copy(src_entry->object.vm_object, 155345748Smckusick src_entry->offset, 155445748Smckusick src_entry->offset + (src_entry->end 155545748Smckusick -src_entry->start)); 155645748Smckusick } 155745748Smckusick } 155845748Smckusick 155945748Smckusick /* 156045748Smckusick * Make a copy of the object. 156145748Smckusick */ 156245748Smckusick temp_object = dst_entry->object.vm_object; 156345748Smckusick vm_object_copy(src_entry->object.vm_object, 156445748Smckusick src_entry->offset, 156545748Smckusick (vm_size_t)(src_entry->end - 156645748Smckusick src_entry->start), 156745748Smckusick &dst_entry->object.vm_object, 156845748Smckusick &dst_entry->offset, 156945748Smckusick &src_needs_copy); 157045748Smckusick /* 157145748Smckusick * If we didn't get a copy-object now, mark the 157245748Smckusick * source map entry so that a shadow will be created 157345748Smckusick * to hold its changed pages. 157445748Smckusick */ 157545748Smckusick if (src_needs_copy) 157645748Smckusick src_entry->needs_copy = TRUE; 157745748Smckusick 157845748Smckusick /* 157945748Smckusick * The destination always needs to have a shadow 158045748Smckusick * created. 158145748Smckusick */ 158245748Smckusick dst_entry->needs_copy = TRUE; 158345748Smckusick 158445748Smckusick /* 158545748Smckusick * Mark the entries copy-on-write, so that write-enabling 158645748Smckusick * the entry won't make copy-on-write pages writable. 158745748Smckusick */ 158845748Smckusick src_entry->copy_on_write = TRUE; 158945748Smckusick dst_entry->copy_on_write = TRUE; 159045748Smckusick /* 159145748Smckusick * Get rid of the old object. 159245748Smckusick */ 159345748Smckusick vm_object_deallocate(temp_object); 159445748Smckusick 159545748Smckusick pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start, 159645748Smckusick dst_entry->end - dst_entry->start, src_entry->start); 159745748Smckusick } 159845748Smckusick else { 159945748Smckusick /* 160045748Smckusick * Of course, wired down pages can't be set copy-on-write. 160145748Smckusick * Cause wired pages to be copied into the new 160245748Smckusick * map by simulating faults (the new pages are 160345748Smckusick * pageable) 160445748Smckusick */ 160545748Smckusick vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry); 160645748Smckusick } 160745748Smckusick } 160845748Smckusick 160945748Smckusick /* 161045748Smckusick * vm_map_copy: 161145748Smckusick * 161245748Smckusick * Perform a virtual memory copy from the source 161345748Smckusick * address map/range to the destination map/range. 161445748Smckusick * 161545748Smckusick * If src_destroy or dst_alloc is requested, 161645748Smckusick * the source and destination regions should be 161745748Smckusick * disjoint, not only in the top-level map, but 161845748Smckusick * in the sharing maps as well. [The best way 161945748Smckusick * to guarantee this is to use a new intermediate 162045748Smckusick * map to make copies. This also reduces map 162145748Smckusick * fragmentation.] 162245748Smckusick */ 162345748Smckusick vm_map_copy(dst_map, src_map, 162445748Smckusick dst_addr, len, src_addr, 162545748Smckusick dst_alloc, src_destroy) 162645748Smckusick vm_map_t dst_map; 162745748Smckusick vm_map_t src_map; 162845748Smckusick vm_offset_t dst_addr; 162945748Smckusick vm_size_t len; 163045748Smckusick vm_offset_t src_addr; 163145748Smckusick boolean_t dst_alloc; 163245748Smckusick boolean_t src_destroy; 163345748Smckusick { 163445748Smckusick register 163545748Smckusick vm_map_entry_t src_entry; 163645748Smckusick register 163745748Smckusick vm_map_entry_t dst_entry; 163845748Smckusick vm_map_entry_t tmp_entry; 163945748Smckusick vm_offset_t src_start; 164045748Smckusick vm_offset_t src_end; 164145748Smckusick vm_offset_t dst_start; 164245748Smckusick vm_offset_t dst_end; 164345748Smckusick vm_offset_t src_clip; 164445748Smckusick vm_offset_t dst_clip; 164545748Smckusick int result; 164645748Smckusick boolean_t old_src_destroy; 164745748Smckusick 164845748Smckusick /* 164945748Smckusick * XXX While we figure out why src_destroy screws up, 165045748Smckusick * we'll do it by explicitly vm_map_delete'ing at the end. 165145748Smckusick */ 165245748Smckusick 165345748Smckusick old_src_destroy = src_destroy; 165445748Smckusick src_destroy = FALSE; 165545748Smckusick 165645748Smckusick /* 165745748Smckusick * Compute start and end of region in both maps 165845748Smckusick */ 165945748Smckusick 166045748Smckusick src_start = src_addr; 166145748Smckusick src_end = src_start + len; 166245748Smckusick dst_start = dst_addr; 166345748Smckusick dst_end = dst_start + len; 166445748Smckusick 166545748Smckusick /* 166645748Smckusick * Check that the region can exist in both source 166745748Smckusick * and destination. 166845748Smckusick */ 166945748Smckusick 167045748Smckusick if ((dst_end < dst_start) || (src_end < src_start)) 167145748Smckusick return(KERN_NO_SPACE); 167245748Smckusick 167345748Smckusick /* 167445748Smckusick * Lock the maps in question -- we avoid deadlock 167545748Smckusick * by ordering lock acquisition by map value 167645748Smckusick */ 167745748Smckusick 167845748Smckusick if (src_map == dst_map) { 167945748Smckusick vm_map_lock(src_map); 168045748Smckusick } 168145748Smckusick else if ((int) src_map < (int) dst_map) { 168245748Smckusick vm_map_lock(src_map); 168345748Smckusick vm_map_lock(dst_map); 168445748Smckusick } else { 168545748Smckusick vm_map_lock(dst_map); 168645748Smckusick vm_map_lock(src_map); 168745748Smckusick } 168845748Smckusick 168945748Smckusick result = KERN_SUCCESS; 169045748Smckusick 169145748Smckusick /* 169245748Smckusick * Check protections... source must be completely readable and 169345748Smckusick * destination must be completely writable. [Note that if we're 169445748Smckusick * allocating the destination region, we don't have to worry 169545748Smckusick * about protection, but instead about whether the region 169645748Smckusick * exists.] 169745748Smckusick */ 169845748Smckusick 169945748Smckusick if (src_map->is_main_map && dst_map->is_main_map) { 170045748Smckusick if (!vm_map_check_protection(src_map, src_start, src_end, 170145748Smckusick VM_PROT_READ)) { 170245748Smckusick result = KERN_PROTECTION_FAILURE; 170345748Smckusick goto Return; 170445748Smckusick } 170545748Smckusick 170645748Smckusick if (dst_alloc) { 170745748Smckusick /* XXX Consider making this a vm_map_find instead */ 170848383Skarels if ((result = vm_map_insert(dst_map, NULL, 170945748Smckusick (vm_offset_t) 0, dst_start, dst_end)) != KERN_SUCCESS) 171045748Smckusick goto Return; 171145748Smckusick } 171245748Smckusick else if (!vm_map_check_protection(dst_map, dst_start, dst_end, 171345748Smckusick VM_PROT_WRITE)) { 171445748Smckusick result = KERN_PROTECTION_FAILURE; 171545748Smckusick goto Return; 171645748Smckusick } 171745748Smckusick } 171845748Smckusick 171945748Smckusick /* 172045748Smckusick * Find the start entries and clip. 172145748Smckusick * 172245748Smckusick * Note that checking protection asserts that the 172345748Smckusick * lookup cannot fail. 172445748Smckusick * 172545748Smckusick * Also note that we wait to do the second lookup 172645748Smckusick * until we have done the first clip, as the clip 172745748Smckusick * may affect which entry we get! 172845748Smckusick */ 172945748Smckusick 173045748Smckusick (void) vm_map_lookup_entry(src_map, src_addr, &tmp_entry); 173145748Smckusick src_entry = tmp_entry; 173245748Smckusick vm_map_clip_start(src_map, src_entry, src_start); 173345748Smckusick 173445748Smckusick (void) vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry); 173545748Smckusick dst_entry = tmp_entry; 173645748Smckusick vm_map_clip_start(dst_map, dst_entry, dst_start); 173745748Smckusick 173845748Smckusick /* 173945748Smckusick * If both source and destination entries are the same, 174045748Smckusick * retry the first lookup, as it may have changed. 174145748Smckusick */ 174245748Smckusick 174345748Smckusick if (src_entry == dst_entry) { 174445748Smckusick (void) vm_map_lookup_entry(src_map, src_addr, &tmp_entry); 174545748Smckusick src_entry = tmp_entry; 174645748Smckusick } 174745748Smckusick 174845748Smckusick /* 174945748Smckusick * If source and destination entries are still the same, 175045748Smckusick * a null copy is being performed. 175145748Smckusick */ 175245748Smckusick 175345748Smckusick if (src_entry == dst_entry) 175445748Smckusick goto Return; 175545748Smckusick 175645748Smckusick /* 175745748Smckusick * Go through entries until we get to the end of the 175845748Smckusick * region. 175945748Smckusick */ 176045748Smckusick 176145748Smckusick while (src_start < src_end) { 176245748Smckusick /* 176345748Smckusick * Clip the entries to the endpoint of the entire region. 176445748Smckusick */ 176545748Smckusick 176645748Smckusick vm_map_clip_end(src_map, src_entry, src_end); 176745748Smckusick vm_map_clip_end(dst_map, dst_entry, dst_end); 176845748Smckusick 176945748Smckusick /* 177045748Smckusick * Clip each entry to the endpoint of the other entry. 177145748Smckusick */ 177245748Smckusick 177345748Smckusick src_clip = src_entry->start + (dst_entry->end - dst_entry->start); 177445748Smckusick vm_map_clip_end(src_map, src_entry, src_clip); 177545748Smckusick 177645748Smckusick dst_clip = dst_entry->start + (src_entry->end - src_entry->start); 177745748Smckusick vm_map_clip_end(dst_map, dst_entry, dst_clip); 177845748Smckusick 177945748Smckusick /* 178045748Smckusick * Both entries now match in size and relative endpoints. 178145748Smckusick * 178245748Smckusick * If both entries refer to a VM object, we can 178345748Smckusick * deal with them now. 178445748Smckusick */ 178545748Smckusick 178645748Smckusick if (!src_entry->is_a_map && !dst_entry->is_a_map) { 178745748Smckusick vm_map_copy_entry(src_map, dst_map, src_entry, 178845748Smckusick dst_entry); 178945748Smckusick } 179045748Smckusick else { 179145748Smckusick register vm_map_t new_dst_map; 179245748Smckusick vm_offset_t new_dst_start; 179345748Smckusick vm_size_t new_size; 179445748Smckusick vm_map_t new_src_map; 179545748Smckusick vm_offset_t new_src_start; 179645748Smckusick 179745748Smckusick /* 179845748Smckusick * We have to follow at least one sharing map. 179945748Smckusick */ 180045748Smckusick 180145748Smckusick new_size = (dst_entry->end - dst_entry->start); 180245748Smckusick 180345748Smckusick if (src_entry->is_a_map) { 180445748Smckusick new_src_map = src_entry->object.share_map; 180545748Smckusick new_src_start = src_entry->offset; 180645748Smckusick } 180745748Smckusick else { 180845748Smckusick new_src_map = src_map; 180945748Smckusick new_src_start = src_entry->start; 181045748Smckusick lock_set_recursive(&src_map->lock); 181145748Smckusick } 181245748Smckusick 181345748Smckusick if (dst_entry->is_a_map) { 181445748Smckusick vm_offset_t new_dst_end; 181545748Smckusick 181645748Smckusick new_dst_map = dst_entry->object.share_map; 181745748Smckusick new_dst_start = dst_entry->offset; 181845748Smckusick 181945748Smckusick /* 182045748Smckusick * Since the destination sharing entries 182145748Smckusick * will be merely deallocated, we can 182245748Smckusick * do that now, and replace the region 182345748Smckusick * with a null object. [This prevents 182445748Smckusick * splitting the source map to match 182545748Smckusick * the form of the destination map.] 182645748Smckusick * Note that we can only do so if the 182745748Smckusick * source and destination do not overlap. 182845748Smckusick */ 182945748Smckusick 183045748Smckusick new_dst_end = new_dst_start + new_size; 183145748Smckusick 183245748Smckusick if (new_dst_map != new_src_map) { 183345748Smckusick vm_map_lock(new_dst_map); 183445748Smckusick (void) vm_map_delete(new_dst_map, 183545748Smckusick new_dst_start, 183645748Smckusick new_dst_end); 183745748Smckusick (void) vm_map_insert(new_dst_map, 183848383Skarels NULL, 183945748Smckusick (vm_offset_t) 0, 184045748Smckusick new_dst_start, 184145748Smckusick new_dst_end); 184245748Smckusick vm_map_unlock(new_dst_map); 184345748Smckusick } 184445748Smckusick } 184545748Smckusick else { 184645748Smckusick new_dst_map = dst_map; 184745748Smckusick new_dst_start = dst_entry->start; 184845748Smckusick lock_set_recursive(&dst_map->lock); 184945748Smckusick } 185045748Smckusick 185145748Smckusick /* 185245748Smckusick * Recursively copy the sharing map. 185345748Smckusick */ 185445748Smckusick 185545748Smckusick (void) vm_map_copy(new_dst_map, new_src_map, 185645748Smckusick new_dst_start, new_size, new_src_start, 185745748Smckusick FALSE, FALSE); 185845748Smckusick 185945748Smckusick if (dst_map == new_dst_map) 186045748Smckusick lock_clear_recursive(&dst_map->lock); 186145748Smckusick if (src_map == new_src_map) 186245748Smckusick lock_clear_recursive(&src_map->lock); 186345748Smckusick } 186445748Smckusick 186545748Smckusick /* 186645748Smckusick * Update variables for next pass through the loop. 186745748Smckusick */ 186845748Smckusick 186945748Smckusick src_start = src_entry->end; 187045748Smckusick src_entry = src_entry->next; 187145748Smckusick dst_start = dst_entry->end; 187245748Smckusick dst_entry = dst_entry->next; 187345748Smckusick 187445748Smckusick /* 187545748Smckusick * If the source is to be destroyed, here is the 187645748Smckusick * place to do it. 187745748Smckusick */ 187845748Smckusick 187945748Smckusick if (src_destroy && src_map->is_main_map && 188045748Smckusick dst_map->is_main_map) 188145748Smckusick vm_map_entry_delete(src_map, src_entry->prev); 188245748Smckusick } 188345748Smckusick 188445748Smckusick /* 188545748Smckusick * Update the physical maps as appropriate 188645748Smckusick */ 188745748Smckusick 188845748Smckusick if (src_map->is_main_map && dst_map->is_main_map) { 188945748Smckusick if (src_destroy) 189045748Smckusick pmap_remove(src_map->pmap, src_addr, src_addr + len); 189145748Smckusick } 189245748Smckusick 189345748Smckusick /* 189445748Smckusick * Unlock the maps 189545748Smckusick */ 189645748Smckusick 189745748Smckusick Return: ; 189845748Smckusick 189945748Smckusick if (old_src_destroy) 190045748Smckusick vm_map_delete(src_map, src_addr, src_addr + len); 190145748Smckusick 190245748Smckusick vm_map_unlock(src_map); 190345748Smckusick if (src_map != dst_map) 190445748Smckusick vm_map_unlock(dst_map); 190545748Smckusick 190645748Smckusick return(result); 190745748Smckusick } 190845748Smckusick 190945748Smckusick /* 191048383Skarels * vmspace_fork: 191148383Skarels * Create a new process vmspace structure and vm_map 191248383Skarels * based on those of an existing process. The new map 191348383Skarels * is based on the old map, according to the inheritance 191448383Skarels * values on the regions in that map. 191545748Smckusick * 191648383Skarels * The source map must not be locked. 191745748Smckusick */ 191848383Skarels struct vmspace * 191948383Skarels vmspace_fork(vm1) 192048383Skarels register struct vmspace *vm1; 192145748Smckusick { 192248383Skarels register struct vmspace *vm2; 192348383Skarels vm_map_t old_map = &vm1->vm_map; 192445748Smckusick vm_map_t new_map; 192545748Smckusick vm_map_entry_t old_entry; 192645748Smckusick vm_map_entry_t new_entry; 192745748Smckusick pmap_t new_pmap; 192845748Smckusick 192945748Smckusick vm_map_lock(old_map); 193045748Smckusick 193148383Skarels vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset, 193248383Skarels old_map->entries_pageable); 193348383Skarels bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy, 193448383Skarels (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy); 193548383Skarels new_pmap = &vm2->vm_pmap; /* XXX */ 193648383Skarels new_map = &vm2->vm_map; /* XXX */ 193745748Smckusick 193845748Smckusick old_entry = old_map->header.next; 193945748Smckusick 194045748Smckusick while (old_entry != &old_map->header) { 194145748Smckusick if (old_entry->is_sub_map) 194245748Smckusick panic("vm_map_fork: encountered a submap"); 194345748Smckusick 194445748Smckusick switch (old_entry->inheritance) { 194545748Smckusick case VM_INHERIT_NONE: 194645748Smckusick break; 194745748Smckusick 194845748Smckusick case VM_INHERIT_SHARE: 194945748Smckusick /* 195045748Smckusick * If we don't already have a sharing map: 195145748Smckusick */ 195245748Smckusick 195345748Smckusick if (!old_entry->is_a_map) { 195445748Smckusick vm_map_t new_share_map; 195545748Smckusick vm_map_entry_t new_share_entry; 195645748Smckusick 195745748Smckusick /* 195845748Smckusick * Create a new sharing map 195945748Smckusick */ 196045748Smckusick 196148383Skarels new_share_map = vm_map_create(NULL, 196245748Smckusick old_entry->start, 196345748Smckusick old_entry->end, 196445748Smckusick TRUE); 196545748Smckusick new_share_map->is_main_map = FALSE; 196645748Smckusick 196745748Smckusick /* 196845748Smckusick * Create the only sharing entry from the 196945748Smckusick * old task map entry. 197045748Smckusick */ 197145748Smckusick 197245748Smckusick new_share_entry = 197345748Smckusick vm_map_entry_create(new_share_map); 197445748Smckusick *new_share_entry = *old_entry; 197545748Smckusick 197645748Smckusick /* 197745748Smckusick * Insert the entry into the new sharing 197845748Smckusick * map 197945748Smckusick */ 198045748Smckusick 198145748Smckusick vm_map_entry_link(new_share_map, 198245748Smckusick new_share_map->header.prev, 198345748Smckusick new_share_entry); 198445748Smckusick 198545748Smckusick /* 198645748Smckusick * Fix up the task map entry to refer 198745748Smckusick * to the sharing map now. 198845748Smckusick */ 198945748Smckusick 199045748Smckusick old_entry->is_a_map = TRUE; 199145748Smckusick old_entry->object.share_map = new_share_map; 199245748Smckusick old_entry->offset = old_entry->start; 199345748Smckusick } 199445748Smckusick 199545748Smckusick /* 199645748Smckusick * Clone the entry, referencing the sharing map. 199745748Smckusick */ 199845748Smckusick 199945748Smckusick new_entry = vm_map_entry_create(new_map); 200045748Smckusick *new_entry = *old_entry; 200145748Smckusick vm_map_reference(new_entry->object.share_map); 200245748Smckusick 200345748Smckusick /* 200445748Smckusick * Insert the entry into the new map -- we 200545748Smckusick * know we're inserting at the end of the new 200645748Smckusick * map. 200745748Smckusick */ 200845748Smckusick 200945748Smckusick vm_map_entry_link(new_map, new_map->header.prev, 201045748Smckusick new_entry); 201145748Smckusick 201245748Smckusick /* 201345748Smckusick * Update the physical map 201445748Smckusick */ 201545748Smckusick 201645748Smckusick pmap_copy(new_map->pmap, old_map->pmap, 201745748Smckusick new_entry->start, 201845748Smckusick (old_entry->end - old_entry->start), 201945748Smckusick old_entry->start); 202045748Smckusick break; 202145748Smckusick 202245748Smckusick case VM_INHERIT_COPY: 202345748Smckusick /* 202445748Smckusick * Clone the entry and link into the map. 202545748Smckusick */ 202645748Smckusick 202745748Smckusick new_entry = vm_map_entry_create(new_map); 202845748Smckusick *new_entry = *old_entry; 202945748Smckusick new_entry->wired_count = 0; 203048383Skarels new_entry->object.vm_object = NULL; 203145748Smckusick new_entry->is_a_map = FALSE; 203245748Smckusick vm_map_entry_link(new_map, new_map->header.prev, 203345748Smckusick new_entry); 203445748Smckusick if (old_entry->is_a_map) { 203545748Smckusick int check; 203645748Smckusick 203745748Smckusick check = vm_map_copy(new_map, 203845748Smckusick old_entry->object.share_map, 203945748Smckusick new_entry->start, 204045748Smckusick (vm_size_t)(new_entry->end - 204145748Smckusick new_entry->start), 204245748Smckusick old_entry->offset, 204345748Smckusick FALSE, FALSE); 204445748Smckusick if (check != KERN_SUCCESS) 204545748Smckusick printf("vm_map_fork: copy in share_map region failed\n"); 204645748Smckusick } 204745748Smckusick else { 204845748Smckusick vm_map_copy_entry(old_map, new_map, old_entry, 204945748Smckusick new_entry); 205045748Smckusick } 205145748Smckusick break; 205245748Smckusick } 205345748Smckusick old_entry = old_entry->next; 205445748Smckusick } 205545748Smckusick 205645748Smckusick new_map->size = old_map->size; 205745748Smckusick vm_map_unlock(old_map); 205845748Smckusick 205948383Skarels return(vm2); 206045748Smckusick } 206145748Smckusick 206245748Smckusick /* 206345748Smckusick * vm_map_lookup: 206445748Smckusick * 206545748Smckusick * Finds the VM object, offset, and 206645748Smckusick * protection for a given virtual address in the 206745748Smckusick * specified map, assuming a page fault of the 206845748Smckusick * type specified. 206945748Smckusick * 207045748Smckusick * Leaves the map in question locked for read; return 207145748Smckusick * values are guaranteed until a vm_map_lookup_done 207245748Smckusick * call is performed. Note that the map argument 207345748Smckusick * is in/out; the returned map must be used in 207445748Smckusick * the call to vm_map_lookup_done. 207545748Smckusick * 207645748Smckusick * A handle (out_entry) is returned for use in 207745748Smckusick * vm_map_lookup_done, to make that fast. 207845748Smckusick * 207945748Smckusick * If a lookup is requested with "write protection" 208045748Smckusick * specified, the map may be changed to perform virtual 208145748Smckusick * copying operations, although the data referenced will 208245748Smckusick * remain the same. 208345748Smckusick */ 208445748Smckusick vm_map_lookup(var_map, vaddr, fault_type, out_entry, 208545748Smckusick object, offset, out_prot, wired, single_use) 208645748Smckusick vm_map_t *var_map; /* IN/OUT */ 208745748Smckusick register vm_offset_t vaddr; 208845748Smckusick register vm_prot_t fault_type; 208945748Smckusick 209045748Smckusick vm_map_entry_t *out_entry; /* OUT */ 209145748Smckusick vm_object_t *object; /* OUT */ 209245748Smckusick vm_offset_t *offset; /* OUT */ 209345748Smckusick vm_prot_t *out_prot; /* OUT */ 209445748Smckusick boolean_t *wired; /* OUT */ 209545748Smckusick boolean_t *single_use; /* OUT */ 209645748Smckusick { 209745748Smckusick vm_map_t share_map; 209845748Smckusick vm_offset_t share_offset; 209945748Smckusick register vm_map_entry_t entry; 210045748Smckusick register vm_map_t map = *var_map; 210145748Smckusick register vm_prot_t prot; 210245748Smckusick register boolean_t su; 210345748Smckusick 210445748Smckusick RetryLookup: ; 210545748Smckusick 210645748Smckusick /* 210745748Smckusick * Lookup the faulting address. 210845748Smckusick */ 210945748Smckusick 211045748Smckusick vm_map_lock_read(map); 211145748Smckusick 211245748Smckusick #define RETURN(why) \ 211345748Smckusick { \ 211445748Smckusick vm_map_unlock_read(map); \ 211545748Smckusick return(why); \ 211645748Smckusick } 211745748Smckusick 211845748Smckusick /* 211945748Smckusick * If the map has an interesting hint, try it before calling 212045748Smckusick * full blown lookup routine. 212145748Smckusick */ 212245748Smckusick 212345748Smckusick simple_lock(&map->hint_lock); 212445748Smckusick entry = map->hint; 212545748Smckusick simple_unlock(&map->hint_lock); 212645748Smckusick 212745748Smckusick *out_entry = entry; 212845748Smckusick 212945748Smckusick if ((entry == &map->header) || 213045748Smckusick (vaddr < entry->start) || (vaddr >= entry->end)) { 213145748Smckusick vm_map_entry_t tmp_entry; 213245748Smckusick 213345748Smckusick /* 213445748Smckusick * Entry was either not a valid hint, or the vaddr 213545748Smckusick * was not contained in the entry, so do a full lookup. 213645748Smckusick */ 213745748Smckusick if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) 213845748Smckusick RETURN(KERN_INVALID_ADDRESS); 213945748Smckusick 214045748Smckusick entry = tmp_entry; 214145748Smckusick *out_entry = entry; 214245748Smckusick } 214345748Smckusick 214445748Smckusick /* 214545748Smckusick * Handle submaps. 214645748Smckusick */ 214745748Smckusick 214845748Smckusick if (entry->is_sub_map) { 214945748Smckusick vm_map_t old_map = map; 215045748Smckusick 215145748Smckusick *var_map = map = entry->object.sub_map; 215245748Smckusick vm_map_unlock_read(old_map); 215345748Smckusick goto RetryLookup; 215445748Smckusick } 215545748Smckusick 215645748Smckusick /* 215745748Smckusick * Check whether this task is allowed to have 215845748Smckusick * this page. 215945748Smckusick */ 216045748Smckusick 216145748Smckusick prot = entry->protection; 216245748Smckusick if ((fault_type & (prot)) != fault_type) 216345748Smckusick RETURN(KERN_PROTECTION_FAILURE); 216445748Smckusick 216545748Smckusick /* 216645748Smckusick * If this page is not pageable, we have to get 216745748Smckusick * it for all possible accesses. 216845748Smckusick */ 216945748Smckusick 217045748Smckusick if (*wired = (entry->wired_count != 0)) 217145748Smckusick prot = fault_type = entry->protection; 217245748Smckusick 217345748Smckusick /* 217445748Smckusick * If we don't already have a VM object, track 217545748Smckusick * it down. 217645748Smckusick */ 217745748Smckusick 217845748Smckusick if (su = !entry->is_a_map) { 217945748Smckusick share_map = map; 218045748Smckusick share_offset = vaddr; 218145748Smckusick } 218245748Smckusick else { 218345748Smckusick vm_map_entry_t share_entry; 218445748Smckusick 218545748Smckusick /* 218645748Smckusick * Compute the sharing map, and offset into it. 218745748Smckusick */ 218845748Smckusick 218945748Smckusick share_map = entry->object.share_map; 219045748Smckusick share_offset = (vaddr - entry->start) + entry->offset; 219145748Smckusick 219245748Smckusick /* 219345748Smckusick * Look for the backing store object and offset 219445748Smckusick */ 219545748Smckusick 219645748Smckusick vm_map_lock_read(share_map); 219745748Smckusick 219845748Smckusick if (!vm_map_lookup_entry(share_map, share_offset, 219945748Smckusick &share_entry)) { 220045748Smckusick vm_map_unlock_read(share_map); 220145748Smckusick RETURN(KERN_INVALID_ADDRESS); 220245748Smckusick } 220345748Smckusick entry = share_entry; 220445748Smckusick } 220545748Smckusick 220645748Smckusick /* 220745748Smckusick * If the entry was copy-on-write, we either ... 220845748Smckusick */ 220945748Smckusick 221045748Smckusick if (entry->needs_copy) { 221145748Smckusick /* 221245748Smckusick * If we want to write the page, we may as well 221345748Smckusick * handle that now since we've got the sharing 221445748Smckusick * map locked. 221545748Smckusick * 221645748Smckusick * If we don't need to write the page, we just 221745748Smckusick * demote the permissions allowed. 221845748Smckusick */ 221945748Smckusick 222045748Smckusick if (fault_type & VM_PROT_WRITE) { 222145748Smckusick /* 222245748Smckusick * Make a new object, and place it in the 222345748Smckusick * object chain. Note that no new references 222445748Smckusick * have appeared -- one just moved from the 222545748Smckusick * share map to the new object. 222645748Smckusick */ 222745748Smckusick 222845748Smckusick if (lock_read_to_write(&share_map->lock)) { 222945748Smckusick if (share_map != map) 223045748Smckusick vm_map_unlock_read(map); 223145748Smckusick goto RetryLookup; 223245748Smckusick } 223345748Smckusick 223445748Smckusick vm_object_shadow( 223545748Smckusick &entry->object.vm_object, 223645748Smckusick &entry->offset, 223745748Smckusick (vm_size_t) (entry->end - entry->start)); 223845748Smckusick 223945748Smckusick entry->needs_copy = FALSE; 224045748Smckusick 224145748Smckusick lock_write_to_read(&share_map->lock); 224245748Smckusick } 224345748Smckusick else { 224445748Smckusick /* 224545748Smckusick * We're attempting to read a copy-on-write 224645748Smckusick * page -- don't allow writes. 224745748Smckusick */ 224845748Smckusick 224945748Smckusick prot &= (~VM_PROT_WRITE); 225045748Smckusick } 225145748Smckusick } 225245748Smckusick 225345748Smckusick /* 225445748Smckusick * Create an object if necessary. 225545748Smckusick */ 225648383Skarels if (entry->object.vm_object == NULL) { 225745748Smckusick 225845748Smckusick if (lock_read_to_write(&share_map->lock)) { 225945748Smckusick if (share_map != map) 226045748Smckusick vm_map_unlock_read(map); 226145748Smckusick goto RetryLookup; 226245748Smckusick } 226345748Smckusick 226445748Smckusick entry->object.vm_object = vm_object_allocate( 226545748Smckusick (vm_size_t)(entry->end - entry->start)); 226645748Smckusick entry->offset = 0; 226745748Smckusick lock_write_to_read(&share_map->lock); 226845748Smckusick } 226945748Smckusick 227045748Smckusick /* 227145748Smckusick * Return the object/offset from this entry. If the entry 227245748Smckusick * was copy-on-write or empty, it has been fixed up. 227345748Smckusick */ 227445748Smckusick 227545748Smckusick *offset = (share_offset - entry->start) + entry->offset; 227645748Smckusick *object = entry->object.vm_object; 227745748Smckusick 227845748Smckusick /* 227945748Smckusick * Return whether this is the only map sharing this data. 228045748Smckusick */ 228145748Smckusick 228245748Smckusick if (!su) { 228345748Smckusick simple_lock(&share_map->ref_lock); 228445748Smckusick su = (share_map->ref_count == 1); 228545748Smckusick simple_unlock(&share_map->ref_lock); 228645748Smckusick } 228745748Smckusick 228845748Smckusick *out_prot = prot; 228945748Smckusick *single_use = su; 229045748Smckusick 229145748Smckusick return(KERN_SUCCESS); 229245748Smckusick 229345748Smckusick #undef RETURN 229445748Smckusick } 229545748Smckusick 229645748Smckusick /* 229745748Smckusick * vm_map_lookup_done: 229845748Smckusick * 229945748Smckusick * Releases locks acquired by a vm_map_lookup 230045748Smckusick * (according to the handle returned by that lookup). 230145748Smckusick */ 230245748Smckusick 230345748Smckusick void vm_map_lookup_done(map, entry) 230445748Smckusick register vm_map_t map; 230545748Smckusick vm_map_entry_t entry; 230645748Smckusick { 230745748Smckusick /* 230845748Smckusick * If this entry references a map, unlock it first. 230945748Smckusick */ 231045748Smckusick 231145748Smckusick if (entry->is_a_map) 231245748Smckusick vm_map_unlock_read(entry->object.share_map); 231345748Smckusick 231445748Smckusick /* 231545748Smckusick * Unlock the main-level map 231645748Smckusick */ 231745748Smckusick 231845748Smckusick vm_map_unlock_read(map); 231945748Smckusick } 232045748Smckusick 232145748Smckusick /* 232245748Smckusick * Routine: vm_map_simplify 232345748Smckusick * Purpose: 232445748Smckusick * Attempt to simplify the map representation in 232545748Smckusick * the vicinity of the given starting address. 232645748Smckusick * Note: 232745748Smckusick * This routine is intended primarily to keep the 232845748Smckusick * kernel maps more compact -- they generally don't 232945748Smckusick * benefit from the "expand a map entry" technology 233045748Smckusick * at allocation time because the adjacent entry 233145748Smckusick * is often wired down. 233245748Smckusick */ 233345748Smckusick void vm_map_simplify(map, start) 233445748Smckusick vm_map_t map; 233545748Smckusick vm_offset_t start; 233645748Smckusick { 233745748Smckusick vm_map_entry_t this_entry; 233845748Smckusick vm_map_entry_t prev_entry; 233945748Smckusick 234045748Smckusick vm_map_lock(map); 234145748Smckusick if ( 234245748Smckusick (vm_map_lookup_entry(map, start, &this_entry)) && 234345748Smckusick ((prev_entry = this_entry->prev) != &map->header) && 234445748Smckusick 234545748Smckusick (prev_entry->end == start) && 234645748Smckusick (map->is_main_map) && 234745748Smckusick 234845748Smckusick (prev_entry->is_a_map == FALSE) && 234945748Smckusick (prev_entry->is_sub_map == FALSE) && 235045748Smckusick 235145748Smckusick (this_entry->is_a_map == FALSE) && 235245748Smckusick (this_entry->is_sub_map == FALSE) && 235345748Smckusick 235445748Smckusick (prev_entry->inheritance == this_entry->inheritance) && 235545748Smckusick (prev_entry->protection == this_entry->protection) && 235645748Smckusick (prev_entry->max_protection == this_entry->max_protection) && 235745748Smckusick (prev_entry->wired_count == this_entry->wired_count) && 235845748Smckusick 235945748Smckusick (prev_entry->copy_on_write == this_entry->copy_on_write) && 236045748Smckusick (prev_entry->needs_copy == this_entry->needs_copy) && 236145748Smckusick 236245748Smckusick (prev_entry->object.vm_object == this_entry->object.vm_object) && 236345748Smckusick ((prev_entry->offset + (prev_entry->end - prev_entry->start)) 236445748Smckusick == this_entry->offset) 236545748Smckusick ) { 236645748Smckusick if (map->first_free == this_entry) 236745748Smckusick map->first_free = prev_entry; 236845748Smckusick 236945748Smckusick SAVE_HINT(map, prev_entry); 237045748Smckusick vm_map_entry_unlink(map, this_entry); 237145748Smckusick prev_entry->end = this_entry->end; 237245748Smckusick vm_object_deallocate(this_entry->object.vm_object); 237345748Smckusick vm_map_entry_dispose(map, this_entry); 237445748Smckusick } 237545748Smckusick vm_map_unlock(map); 237645748Smckusick } 237745748Smckusick 237845748Smckusick /* 237945748Smckusick * vm_map_print: [ debug ] 238045748Smckusick */ 238145748Smckusick void vm_map_print(map, full) 238245748Smckusick register vm_map_t map; 238345748Smckusick boolean_t full; 238445748Smckusick { 238545748Smckusick register vm_map_entry_t entry; 238645748Smckusick extern int indent; 238745748Smckusick 238845748Smckusick iprintf("%s map 0x%x: pmap=0x%x,ref=%d,nentries=%d,version=%d\n", 238945748Smckusick (map->is_main_map ? "Task" : "Share"), 239045748Smckusick (int) map, (int) (map->pmap), map->ref_count, map->nentries, 239145748Smckusick map->timestamp); 239245748Smckusick 239345748Smckusick if (!full && indent) 239445748Smckusick return; 239545748Smckusick 239645748Smckusick indent += 2; 239745748Smckusick for (entry = map->header.next; entry != &map->header; 239845748Smckusick entry = entry->next) { 239945748Smckusick iprintf("map entry 0x%x: start=0x%x, end=0x%x, ", 240045748Smckusick (int) entry, (int) entry->start, (int) entry->end); 240145748Smckusick if (map->is_main_map) { 240245748Smckusick static char *inheritance_name[4] = 240345748Smckusick { "share", "copy", "none", "donate_copy"}; 240445748Smckusick printf("prot=%x/%x/%s, ", 240545748Smckusick entry->protection, 240645748Smckusick entry->max_protection, 240745748Smckusick inheritance_name[entry->inheritance]); 240845748Smckusick if (entry->wired_count != 0) 240945748Smckusick printf("wired, "); 241045748Smckusick } 241145748Smckusick 241245748Smckusick if (entry->is_a_map || entry->is_sub_map) { 241345748Smckusick printf("share=0x%x, offset=0x%x\n", 241445748Smckusick (int) entry->object.share_map, 241545748Smckusick (int) entry->offset); 241645748Smckusick if ((entry->prev == &map->header) || 241745748Smckusick (!entry->prev->is_a_map) || 241845748Smckusick (entry->prev->object.share_map != 241945748Smckusick entry->object.share_map)) { 242045748Smckusick indent += 2; 242145748Smckusick vm_map_print(entry->object.share_map, full); 242245748Smckusick indent -= 2; 242345748Smckusick } 242445748Smckusick 242545748Smckusick } 242645748Smckusick else { 242745748Smckusick printf("object=0x%x, offset=0x%x", 242845748Smckusick (int) entry->object.vm_object, 242945748Smckusick (int) entry->offset); 243045748Smckusick if (entry->copy_on_write) 243145748Smckusick printf(", copy (%s)", 243245748Smckusick entry->needs_copy ? "needed" : "done"); 243345748Smckusick printf("\n"); 243445748Smckusick 243545748Smckusick if ((entry->prev == &map->header) || 243645748Smckusick (entry->prev->is_a_map) || 243745748Smckusick (entry->prev->object.vm_object != 243845748Smckusick entry->object.vm_object)) { 243945748Smckusick indent += 2; 244045748Smckusick vm_object_print(entry->object.vm_object, full); 244145748Smckusick indent -= 2; 244245748Smckusick } 244345748Smckusick } 244445748Smckusick } 244545748Smckusick indent -= 2; 244645748Smckusick } 2447