1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * The Mach Operating System project at Carnegie-Mellon University. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * from: @(#)vm_map.c 8.3 (Berkeley) 1/12/94 35 * 36 * 37 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 38 * All rights reserved. 39 * 40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 * 62 * $FreeBSD: src/sys/vm/vm_map.c,v 1.187.2.19 2003/05/27 00:47:02 alc Exp $ 63 */ 64 65 /* 66 * Virtual memory mapping module. 67 */ 68 69 #include <sys/param.h> 70 #include <sys/systm.h> 71 #include <sys/kernel.h> 72 #include <sys/proc.h> 73 #include <sys/serialize.h> 74 #include <sys/lock.h> 75 #include <sys/vmmeter.h> 76 #include <sys/mman.h> 77 #include <sys/vnode.h> 78 #include <sys/resourcevar.h> 79 #include <sys/shm.h> 80 #include <sys/tree.h> 81 #include <sys/malloc.h> 82 #include <sys/objcache.h> 83 84 #include <vm/vm.h> 85 #include <vm/vm_param.h> 86 #include <vm/pmap.h> 87 #include <vm/vm_map.h> 88 #include <vm/vm_page.h> 89 #include <vm/vm_object.h> 90 #include <vm/vm_pager.h> 91 #include <vm/vm_kern.h> 92 #include <vm/vm_extern.h> 93 #include <vm/swap_pager.h> 94 #include <vm/vm_zone.h> 95 96 #include <sys/random.h> 97 #include <sys/sysctl.h> 98 #include <sys/spinlock.h> 99 100 #include <sys/thread2.h> 101 #include <sys/spinlock2.h> 102 103 /* 104 * Virtual memory maps provide for the mapping, protection, and sharing 105 * of virtual memory objects. In addition, this module provides for an 106 * efficient virtual copy of memory from one map to another. 107 * 108 * Synchronization is required prior to most operations. 109 * 110 * Maps consist of an ordered doubly-linked list of simple entries. 111 * A hint and a RB tree is used to speed-up lookups. 112 * 113 * Callers looking to modify maps specify start/end addresses which cause 114 * the related map entry to be clipped if necessary, and then later 115 * recombined if the pieces remained compatible. 116 * 117 * Virtual copy operations are performed by copying VM object references 118 * from one map to another, and then marking both regions as copy-on-write. 119 */ 120 static boolean_t vmspace_ctor(void *obj, void *privdata, int ocflags); 121 static void vmspace_dtor(void *obj, void *privdata); 122 static void vmspace_terminate(struct vmspace *vm, int final); 123 124 MALLOC_DEFINE(M_VMSPACE, "vmspace", "vmspace objcache backingstore"); 125 static struct objcache *vmspace_cache; 126 127 /* 128 * per-cpu page table cross mappings are initialized in early boot 129 * and might require a considerable number of vm_map_entry structures. 130 */ 131 #define MAPENTRYBSP_CACHE (MAXCPU+1) 132 #define MAPENTRYAP_CACHE 8 133 134 #define MAP_ENTRY_PARTITION_SIZE ((vm_offset_t)(16 * 1024 * 1024)) 135 #define MAP_ENTRY_PARTITION_MASK (MAP_ENTRY_PARTITION_SIZE - 1) 136 137 #define VM_MAP_ENTRY_WITHIN_PARTITION(entry) \ 138 ((((entry)->start ^ (entry)->end) & ~MAP_ENTRY_PARTITION_MASK) == 0) 139 140 static struct vm_zone mapentzone_store; 141 static vm_zone_t mapentzone; 142 143 static struct vm_map_entry map_entry_init[MAX_MAPENT]; 144 static struct vm_map_entry cpu_map_entry_init_bsp[MAPENTRYBSP_CACHE]; 145 static struct vm_map_entry cpu_map_entry_init_ap[MAXCPU][MAPENTRYAP_CACHE]; 146 147 static int randomize_mmap; 148 SYSCTL_INT(_vm, OID_AUTO, randomize_mmap, CTLFLAG_RW, &randomize_mmap, 0, 149 "Randomize mmap offsets"); 150 static int vm_map_relock_enable = 1; 151 SYSCTL_INT(_vm, OID_AUTO, map_relock_enable, CTLFLAG_RW, 152 &vm_map_relock_enable, 0, "Randomize mmap offsets"); 153 154 static void vmspace_drop_notoken(struct vmspace *vm); 155 static void vm_map_entry_shadow(vm_map_entry_t entry, int addref); 156 static vm_map_entry_t vm_map_entry_create(vm_map_t map, int *); 157 static void vm_map_entry_dispose (vm_map_t map, vm_map_entry_t entry, int *); 158 static void _vm_map_clip_end (vm_map_t, vm_map_entry_t, vm_offset_t, int *); 159 static void _vm_map_clip_start (vm_map_t, vm_map_entry_t, vm_offset_t, int *); 160 static void vm_map_entry_delete (vm_map_t, vm_map_entry_t, int *); 161 static void vm_map_entry_unwire (vm_map_t, vm_map_entry_t); 162 static void vm_map_copy_entry (vm_map_t, vm_map_t, vm_map_entry_t, 163 vm_map_entry_t); 164 static void vm_map_unclip_range (vm_map_t map, vm_map_entry_t start_entry, 165 vm_offset_t start, vm_offset_t end, int *countp, int flags); 166 static void vm_map_entry_partition(vm_map_t map, vm_map_entry_t entry, 167 vm_offset_t vaddr, int *countp); 168 169 /* 170 * Initialize the vm_map module. Must be called before any other vm_map 171 * routines. 172 * 173 * Map and entry structures are allocated from the general purpose 174 * memory pool with some exceptions: 175 * 176 * - The kernel map is allocated statically. 177 * - Initial kernel map entries are allocated out of a static pool. 178 * - We must set ZONE_SPECIAL here or the early boot code can get 179 * stuck if there are >63 cores. 180 * 181 * These restrictions are necessary since malloc() uses the 182 * maps and requires map entries. 183 * 184 * Called from the low level boot code only. 185 */ 186 void 187 vm_map_startup(void) 188 { 189 mapentzone = &mapentzone_store; 190 zbootinit(mapentzone, "MAP ENTRY", sizeof (struct vm_map_entry), 191 map_entry_init, MAX_MAPENT); 192 mapentzone_store.zflags |= ZONE_SPECIAL; 193 } 194 195 /* 196 * Called prior to any vmspace allocations. 197 * 198 * Called from the low level boot code only. 199 */ 200 void 201 vm_init2(void) 202 { 203 vmspace_cache = objcache_create_mbacked(M_VMSPACE, 204 sizeof(struct vmspace), 205 0, ncpus * 4, 206 vmspace_ctor, vmspace_dtor, 207 NULL); 208 zinitna(mapentzone, NULL, 0, 0, ZONE_USE_RESERVE | ZONE_SPECIAL); 209 pmap_init2(); 210 vm_object_init2(); 211 } 212 213 /* 214 * objcache support. We leave the pmap root cached as long as possible 215 * for performance reasons. 216 */ 217 static 218 boolean_t 219 vmspace_ctor(void *obj, void *privdata, int ocflags) 220 { 221 struct vmspace *vm = obj; 222 223 bzero(vm, sizeof(*vm)); 224 vm->vm_refcnt = VM_REF_DELETED; 225 226 return 1; 227 } 228 229 static 230 void 231 vmspace_dtor(void *obj, void *privdata) 232 { 233 struct vmspace *vm = obj; 234 235 KKASSERT(vm->vm_refcnt == VM_REF_DELETED); 236 pmap_puninit(vmspace_pmap(vm)); 237 } 238 239 /* 240 * Red black tree functions 241 * 242 * The caller must hold the related map lock. 243 */ 244 static int rb_vm_map_compare(vm_map_entry_t a, vm_map_entry_t b); 245 RB_GENERATE(vm_map_rb_tree, vm_map_entry, rb_entry, rb_vm_map_compare); 246 247 /* a->start is address, and the only field has to be initialized */ 248 static int 249 rb_vm_map_compare(vm_map_entry_t a, vm_map_entry_t b) 250 { 251 if (a->start < b->start) 252 return(-1); 253 else if (a->start > b->start) 254 return(1); 255 return(0); 256 } 257 258 /* 259 * Initialize vmspace ref/hold counts vmspace0. There is a holdcnt for 260 * every refcnt. 261 */ 262 void 263 vmspace_initrefs(struct vmspace *vm) 264 { 265 vm->vm_refcnt = 1; 266 vm->vm_holdcnt = 1; 267 } 268 269 /* 270 * Allocate a vmspace structure, including a vm_map and pmap. 271 * Initialize numerous fields. While the initial allocation is zerod, 272 * subsequence reuse from the objcache leaves elements of the structure 273 * intact (particularly the pmap), so portions must be zerod. 274 * 275 * Returns a referenced vmspace. 276 * 277 * No requirements. 278 */ 279 struct vmspace * 280 vmspace_alloc(vm_offset_t min, vm_offset_t max) 281 { 282 struct vmspace *vm; 283 284 vm = objcache_get(vmspace_cache, M_WAITOK); 285 286 bzero(&vm->vm_startcopy, 287 (char *)&vm->vm_endcopy - (char *)&vm->vm_startcopy); 288 vm_map_init(&vm->vm_map, min, max, NULL); /* initializes token */ 289 290 /* 291 * NOTE: hold to acquires token for safety. 292 * 293 * On return vmspace is referenced (refs=1, hold=1). That is, 294 * each refcnt also has a holdcnt. There can be additional holds 295 * (holdcnt) above and beyond the refcnt. Finalization is handled in 296 * two stages, one on refs 1->0, and the the second on hold 1->0. 297 */ 298 KKASSERT(vm->vm_holdcnt == 0); 299 KKASSERT(vm->vm_refcnt == VM_REF_DELETED); 300 vmspace_initrefs(vm); 301 vmspace_hold(vm); 302 pmap_pinit(vmspace_pmap(vm)); /* (some fields reused) */ 303 vm->vm_map.pmap = vmspace_pmap(vm); /* XXX */ 304 vm->vm_shm = NULL; 305 vm->vm_flags = 0; 306 cpu_vmspace_alloc(vm); 307 vmspace_drop(vm); 308 309 return (vm); 310 } 311 312 /* 313 * NOTE: Can return 0 if the vmspace is exiting. 314 */ 315 int 316 vmspace_getrefs(struct vmspace *vm) 317 { 318 int32_t n; 319 320 n = vm->vm_refcnt; 321 cpu_ccfence(); 322 if (n & VM_REF_DELETED) 323 n = -1; 324 return n; 325 } 326 327 void 328 vmspace_hold(struct vmspace *vm) 329 { 330 atomic_add_int(&vm->vm_holdcnt, 1); 331 lwkt_gettoken(&vm->vm_map.token); 332 } 333 334 /* 335 * Drop with final termination interlock. 336 */ 337 void 338 vmspace_drop(struct vmspace *vm) 339 { 340 lwkt_reltoken(&vm->vm_map.token); 341 vmspace_drop_notoken(vm); 342 } 343 344 static void 345 vmspace_drop_notoken(struct vmspace *vm) 346 { 347 if (atomic_fetchadd_int(&vm->vm_holdcnt, -1) == 1) { 348 if (vm->vm_refcnt & VM_REF_DELETED) 349 vmspace_terminate(vm, 1); 350 } 351 } 352 353 /* 354 * A vmspace object must not be in a terminated state to be able to obtain 355 * additional refs on it. 356 * 357 * These are official references to the vmspace, the count is used to check 358 * for vmspace sharing. Foreign accessors should use 'hold' and not 'ref'. 359 * 360 * XXX we need to combine hold & ref together into one 64-bit field to allow 361 * holds to prevent stage-1 termination. 362 */ 363 void 364 vmspace_ref(struct vmspace *vm) 365 { 366 uint32_t n; 367 368 atomic_add_int(&vm->vm_holdcnt, 1); 369 n = atomic_fetchadd_int(&vm->vm_refcnt, 1); 370 KKASSERT((n & VM_REF_DELETED) == 0); 371 } 372 373 /* 374 * Release a ref on the vmspace. On the 1->0 transition we do stage-1 375 * termination of the vmspace. Then, on the final drop of the hold we 376 * will do stage-2 final termination. 377 */ 378 void 379 vmspace_rel(struct vmspace *vm) 380 { 381 uint32_t n; 382 383 /* 384 * Drop refs. Each ref also has a hold which is also dropped. 385 * 386 * When refs hits 0 compete to get the VM_REF_DELETED flag (hold 387 * prevent finalization) to start termination processing. 388 * Finalization occurs when the last hold count drops to 0. 389 */ 390 n = atomic_fetchadd_int(&vm->vm_refcnt, -1) - 1; 391 while (n == 0) { 392 if (atomic_cmpset_int(&vm->vm_refcnt, 0, VM_REF_DELETED)) { 393 vmspace_terminate(vm, 0); 394 break; 395 } 396 n = vm->vm_refcnt; 397 cpu_ccfence(); 398 } 399 vmspace_drop_notoken(vm); 400 } 401 402 /* 403 * This is called during exit indicating that the vmspace is no 404 * longer in used by an exiting process, but the process has not yet 405 * been reaped. 406 * 407 * We drop refs, allowing for stage-1 termination, but maintain a holdcnt 408 * to prevent stage-2 until the process is reaped. Note hte order of 409 * operation, we must hold first. 410 * 411 * No requirements. 412 */ 413 void 414 vmspace_relexit(struct vmspace *vm) 415 { 416 atomic_add_int(&vm->vm_holdcnt, 1); 417 vmspace_rel(vm); 418 } 419 420 /* 421 * Called during reap to disconnect the remainder of the vmspace from 422 * the process. On the hold drop the vmspace termination is finalized. 423 * 424 * No requirements. 425 */ 426 void 427 vmspace_exitfree(struct proc *p) 428 { 429 struct vmspace *vm; 430 431 vm = p->p_vmspace; 432 p->p_vmspace = NULL; 433 vmspace_drop_notoken(vm); 434 } 435 436 /* 437 * Called in two cases: 438 * 439 * (1) When the last refcnt is dropped and the vmspace becomes inactive, 440 * called with final == 0. refcnt will be (u_int)-1 at this point, 441 * and holdcnt will still be non-zero. 442 * 443 * (2) When holdcnt becomes 0, called with final == 1. There should no 444 * longer be anyone with access to the vmspace. 445 * 446 * VMSPACE_EXIT1 flags the primary deactivation 447 * VMSPACE_EXIT2 flags the last reap 448 */ 449 static void 450 vmspace_terminate(struct vmspace *vm, int final) 451 { 452 int count; 453 454 lwkt_gettoken(&vm->vm_map.token); 455 if (final == 0) { 456 KKASSERT((vm->vm_flags & VMSPACE_EXIT1) == 0); 457 vm->vm_flags |= VMSPACE_EXIT1; 458 459 /* 460 * Get rid of most of the resources. Leave the kernel pmap 461 * intact. 462 * 463 * If the pmap does not contain wired pages we can bulk-delete 464 * the pmap as a performance optimization before removing the 465 * related mappings. 466 * 467 * If the pmap contains wired pages we cannot do this 468 * pre-optimization because currently vm_fault_unwire() 469 * expects the pmap pages to exist and will not decrement 470 * p->wire_count if they do not. 471 */ 472 shmexit(vm); 473 if (vmspace_pmap(vm)->pm_stats.wired_count) { 474 vm_map_remove(&vm->vm_map, VM_MIN_USER_ADDRESS, 475 VM_MAX_USER_ADDRESS); 476 pmap_remove_pages(vmspace_pmap(vm), VM_MIN_USER_ADDRESS, 477 VM_MAX_USER_ADDRESS); 478 } else { 479 pmap_remove_pages(vmspace_pmap(vm), VM_MIN_USER_ADDRESS, 480 VM_MAX_USER_ADDRESS); 481 vm_map_remove(&vm->vm_map, VM_MIN_USER_ADDRESS, 482 VM_MAX_USER_ADDRESS); 483 } 484 lwkt_reltoken(&vm->vm_map.token); 485 } else { 486 KKASSERT((vm->vm_flags & VMSPACE_EXIT1) != 0); 487 KKASSERT((vm->vm_flags & VMSPACE_EXIT2) == 0); 488 489 /* 490 * Get rid of remaining basic resources. 491 */ 492 vm->vm_flags |= VMSPACE_EXIT2; 493 shmexit(vm); 494 495 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 496 vm_map_lock(&vm->vm_map); 497 cpu_vmspace_free(vm); 498 499 /* 500 * Lock the map, to wait out all other references to it. 501 * Delete all of the mappings and pages they hold, then call 502 * the pmap module to reclaim anything left. 503 */ 504 vm_map_delete(&vm->vm_map, vm->vm_map.min_offset, 505 vm->vm_map.max_offset, &count); 506 vm_map_unlock(&vm->vm_map); 507 vm_map_entry_release(count); 508 509 pmap_release(vmspace_pmap(vm)); 510 lwkt_reltoken(&vm->vm_map.token); 511 objcache_put(vmspace_cache, vm); 512 } 513 } 514 515 /* 516 * Swap useage is determined by taking the proportional swap used by 517 * VM objects backing the VM map. To make up for fractional losses, 518 * if the VM object has any swap use at all the associated map entries 519 * count for at least 1 swap page. 520 * 521 * No requirements. 522 */ 523 vm_offset_t 524 vmspace_swap_count(struct vmspace *vm) 525 { 526 vm_map_t map = &vm->vm_map; 527 vm_map_entry_t cur; 528 vm_object_t object; 529 vm_offset_t count = 0; 530 vm_offset_t n; 531 532 vmspace_hold(vm); 533 for (cur = map->header.next; cur != &map->header; cur = cur->next) { 534 switch(cur->maptype) { 535 case VM_MAPTYPE_NORMAL: 536 case VM_MAPTYPE_VPAGETABLE: 537 if ((object = cur->object.vm_object) == NULL) 538 break; 539 if (object->swblock_count) { 540 n = (cur->end - cur->start) / PAGE_SIZE; 541 count += object->swblock_count * 542 SWAP_META_PAGES * n / object->size + 1; 543 } 544 break; 545 default: 546 break; 547 } 548 } 549 vmspace_drop(vm); 550 551 return(count); 552 } 553 554 /* 555 * Calculate the approximate number of anonymous pages in use by 556 * this vmspace. To make up for fractional losses, we count each 557 * VM object as having at least 1 anonymous page. 558 * 559 * No requirements. 560 */ 561 vm_offset_t 562 vmspace_anonymous_count(struct vmspace *vm) 563 { 564 vm_map_t map = &vm->vm_map; 565 vm_map_entry_t cur; 566 vm_object_t object; 567 vm_offset_t count = 0; 568 569 vmspace_hold(vm); 570 for (cur = map->header.next; cur != &map->header; cur = cur->next) { 571 switch(cur->maptype) { 572 case VM_MAPTYPE_NORMAL: 573 case VM_MAPTYPE_VPAGETABLE: 574 if ((object = cur->object.vm_object) == NULL) 575 break; 576 if (object->type != OBJT_DEFAULT && 577 object->type != OBJT_SWAP) { 578 break; 579 } 580 count += object->resident_page_count; 581 break; 582 default: 583 break; 584 } 585 } 586 vmspace_drop(vm); 587 588 return(count); 589 } 590 591 /* 592 * Initialize an existing vm_map structure such as that in the vmspace 593 * structure. The pmap is initialized elsewhere. 594 * 595 * No requirements. 596 */ 597 void 598 vm_map_init(struct vm_map *map, vm_offset_t min, vm_offset_t max, pmap_t pmap) 599 { 600 map->header.next = map->header.prev = &map->header; 601 RB_INIT(&map->rb_root); 602 spin_init(&map->ilock_spin, "ilock"); 603 map->ilock_base = NULL; 604 map->nentries = 0; 605 map->size = 0; 606 map->system_map = 0; 607 map->min_offset = min; 608 map->max_offset = max; 609 map->pmap = pmap; 610 map->timestamp = 0; 611 map->flags = 0; 612 bzero(&map->freehint, sizeof(map->freehint)); 613 lwkt_token_init(&map->token, "vm_map"); 614 lockinit(&map->lock, "vm_maplk", (hz + 9) / 10, 0); 615 } 616 617 /* 618 * Find the first possible free address for the specified request length. 619 * Returns 0 if we don't have one cached. 620 */ 621 static 622 vm_offset_t 623 vm_map_freehint_find(vm_map_t map, vm_size_t length, vm_size_t align) 624 { 625 vm_map_freehint_t *scan; 626 627 scan = &map->freehint[0]; 628 while (scan < &map->freehint[VM_MAP_FFCOUNT]) { 629 if (scan->length == length && scan->align == align) 630 return(scan->start); 631 ++scan; 632 } 633 return 0; 634 } 635 636 /* 637 * Unconditionally set the freehint. Called by vm_map_findspace() after 638 * it finds an address. This will help us iterate optimally on the next 639 * similar findspace. 640 */ 641 static 642 void 643 vm_map_freehint_update(vm_map_t map, vm_offset_t start, 644 vm_size_t length, vm_size_t align) 645 { 646 vm_map_freehint_t *scan; 647 648 scan = &map->freehint[0]; 649 while (scan < &map->freehint[VM_MAP_FFCOUNT]) { 650 if (scan->length == length && scan->align == align) { 651 scan->start = start; 652 return; 653 } 654 ++scan; 655 } 656 scan = &map->freehint[map->freehint_newindex & VM_MAP_FFMASK]; 657 scan->start = start; 658 scan->align = align; 659 scan->length = length; 660 ++map->freehint_newindex; 661 } 662 663 /* 664 * Update any existing freehints (for any alignment), for the hole we just 665 * added. 666 */ 667 static 668 void 669 vm_map_freehint_hole(vm_map_t map, vm_offset_t start, vm_size_t length) 670 { 671 vm_map_freehint_t *scan; 672 673 scan = &map->freehint[0]; 674 while (scan < &map->freehint[VM_MAP_FFCOUNT]) { 675 if (scan->length <= length && scan->start > start) 676 scan->start = start; 677 ++scan; 678 } 679 } 680 681 /* 682 * Shadow the vm_map_entry's object. This typically needs to be done when 683 * a write fault is taken on an entry which had previously been cloned by 684 * fork(). The shared object (which might be NULL) must become private so 685 * we add a shadow layer above it. 686 * 687 * Object allocation for anonymous mappings is defered as long as possible. 688 * When creating a shadow, however, the underlying object must be instantiated 689 * so it can be shared. 690 * 691 * If the map segment is governed by a virtual page table then it is 692 * possible to address offsets beyond the mapped area. Just allocate 693 * a maximally sized object for this case. 694 * 695 * If addref is non-zero an additional reference is added to the returned 696 * entry. This mechanic exists because the additional reference might have 697 * to be added atomically and not after return to prevent a premature 698 * collapse. 699 * 700 * The vm_map must be exclusively locked. 701 * No other requirements. 702 */ 703 static 704 void 705 vm_map_entry_shadow(vm_map_entry_t entry, int addref) 706 { 707 if (entry->maptype == VM_MAPTYPE_VPAGETABLE) { 708 vm_object_shadow(&entry->object.vm_object, &entry->offset, 709 0x7FFFFFFF, addref); /* XXX */ 710 } else { 711 vm_object_shadow(&entry->object.vm_object, &entry->offset, 712 atop(entry->end - entry->start), addref); 713 } 714 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 715 } 716 717 /* 718 * Allocate an object for a vm_map_entry. 719 * 720 * Object allocation for anonymous mappings is defered as long as possible. 721 * This function is called when we can defer no longer, generally when a map 722 * entry might be split or forked or takes a page fault. 723 * 724 * If the map segment is governed by a virtual page table then it is 725 * possible to address offsets beyond the mapped area. Just allocate 726 * a maximally sized object for this case. 727 * 728 * The vm_map must be exclusively locked. 729 * No other requirements. 730 */ 731 void 732 vm_map_entry_allocate_object(vm_map_entry_t entry) 733 { 734 vm_object_t obj; 735 736 if (entry->maptype == VM_MAPTYPE_VPAGETABLE) { 737 obj = vm_object_allocate(OBJT_DEFAULT, 0x7FFFFFFF); /* XXX */ 738 } else { 739 obj = vm_object_allocate(OBJT_DEFAULT, 740 atop(entry->end - entry->start)); 741 } 742 entry->object.vm_object = obj; 743 entry->offset = 0; 744 } 745 746 /* 747 * Set an initial negative count so the first attempt to reserve 748 * space preloads a bunch of vm_map_entry's for this cpu. Also 749 * pre-allocate 2 vm_map_entries which will be needed by zalloc() to 750 * map a new page for vm_map_entry structures. SMP systems are 751 * particularly sensitive. 752 * 753 * This routine is called in early boot so we cannot just call 754 * vm_map_entry_reserve(). 755 * 756 * Called from the low level boot code only (for each cpu) 757 * 758 * WARNING! Take care not to have too-big a static/BSS structure here 759 * as MAXCPU can be 256+, otherwise the loader's 64MB heap 760 * can get blown out by the kernel plus the initrd image. 761 */ 762 void 763 vm_map_entry_reserve_cpu_init(globaldata_t gd) 764 { 765 vm_map_entry_t entry; 766 int count; 767 int i; 768 769 atomic_add_int(&gd->gd_vme_avail, -MAP_RESERVE_COUNT * 2); 770 if (gd->gd_cpuid == 0) { 771 entry = &cpu_map_entry_init_bsp[0]; 772 count = MAPENTRYBSP_CACHE; 773 } else { 774 entry = &cpu_map_entry_init_ap[gd->gd_cpuid][0]; 775 count = MAPENTRYAP_CACHE; 776 } 777 for (i = 0; i < count; ++i, ++entry) { 778 entry->next = gd->gd_vme_base; 779 gd->gd_vme_base = entry; 780 } 781 } 782 783 /* 784 * Reserves vm_map_entry structures so code later-on can manipulate 785 * map_entry structures within a locked map without blocking trying 786 * to allocate a new vm_map_entry. 787 * 788 * No requirements. 789 * 790 * WARNING! We must not decrement gd_vme_avail until after we have 791 * ensured that sufficient entries exist, otherwise we can 792 * get into an endless call recursion in the zalloc code 793 * itself. 794 */ 795 int 796 vm_map_entry_reserve(int count) 797 { 798 struct globaldata *gd = mycpu; 799 vm_map_entry_t entry; 800 801 /* 802 * Make sure we have enough structures in gd_vme_base to handle 803 * the reservation request. 804 * 805 * Use a critical section to protect against VM faults. It might 806 * not be needed, but we have to be careful here. 807 */ 808 if (gd->gd_vme_avail < count) { 809 crit_enter(); 810 while (gd->gd_vme_avail < count) { 811 entry = zalloc(mapentzone); 812 entry->next = gd->gd_vme_base; 813 gd->gd_vme_base = entry; 814 atomic_add_int(&gd->gd_vme_avail, 1); 815 } 816 crit_exit(); 817 } 818 atomic_add_int(&gd->gd_vme_avail, -count); 819 820 return(count); 821 } 822 823 /* 824 * Releases previously reserved vm_map_entry structures that were not 825 * used. If we have too much junk in our per-cpu cache clean some of 826 * it out. 827 * 828 * No requirements. 829 */ 830 void 831 vm_map_entry_release(int count) 832 { 833 struct globaldata *gd = mycpu; 834 vm_map_entry_t entry; 835 vm_map_entry_t efree; 836 837 count = atomic_fetchadd_int(&gd->gd_vme_avail, count) + count; 838 if (gd->gd_vme_avail > MAP_RESERVE_SLOP) { 839 efree = NULL; 840 crit_enter(); 841 while (gd->gd_vme_avail > MAP_RESERVE_HYST) { 842 entry = gd->gd_vme_base; 843 KKASSERT(entry != NULL); 844 gd->gd_vme_base = entry->next; 845 atomic_add_int(&gd->gd_vme_avail, -1); 846 entry->next = efree; 847 efree = entry; 848 } 849 crit_exit(); 850 while ((entry = efree) != NULL) { 851 efree = efree->next; 852 zfree(mapentzone, entry); 853 } 854 } 855 } 856 857 /* 858 * Reserve map entry structures for use in kernel_map itself. These 859 * entries have *ALREADY* been reserved on a per-cpu basis when the map 860 * was inited. This function is used by zalloc() to avoid a recursion 861 * when zalloc() itself needs to allocate additional kernel memory. 862 * 863 * This function works like the normal reserve but does not load the 864 * vm_map_entry cache (because that would result in an infinite 865 * recursion). Note that gd_vme_avail may go negative. This is expected. 866 * 867 * Any caller of this function must be sure to renormalize after 868 * potentially eating entries to ensure that the reserve supply 869 * remains intact. 870 * 871 * No requirements. 872 */ 873 int 874 vm_map_entry_kreserve(int count) 875 { 876 struct globaldata *gd = mycpu; 877 878 atomic_add_int(&gd->gd_vme_avail, -count); 879 KASSERT(gd->gd_vme_base != NULL, 880 ("no reserved entries left, gd_vme_avail = %d", 881 gd->gd_vme_avail)); 882 return(count); 883 } 884 885 /* 886 * Release previously reserved map entries for kernel_map. We do not 887 * attempt to clean up like the normal release function as this would 888 * cause an unnecessary (but probably not fatal) deep procedure call. 889 * 890 * No requirements. 891 */ 892 void 893 vm_map_entry_krelease(int count) 894 { 895 struct globaldata *gd = mycpu; 896 897 atomic_add_int(&gd->gd_vme_avail, count); 898 } 899 900 /* 901 * Allocates a VM map entry for insertion. No entry fields are filled in. 902 * 903 * The entries should have previously been reserved. The reservation count 904 * is tracked in (*countp). 905 * 906 * No requirements. 907 */ 908 static vm_map_entry_t 909 vm_map_entry_create(vm_map_t map, int *countp) 910 { 911 struct globaldata *gd = mycpu; 912 vm_map_entry_t entry; 913 914 KKASSERT(*countp > 0); 915 --*countp; 916 crit_enter(); 917 entry = gd->gd_vme_base; 918 KASSERT(entry != NULL, ("gd_vme_base NULL! count %d", *countp)); 919 gd->gd_vme_base = entry->next; 920 crit_exit(); 921 922 return(entry); 923 } 924 925 /* 926 * Dispose of a vm_map_entry that is no longer being referenced. 927 * 928 * No requirements. 929 */ 930 static void 931 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry, int *countp) 932 { 933 struct globaldata *gd = mycpu; 934 935 ++*countp; 936 crit_enter(); 937 entry->next = gd->gd_vme_base; 938 gd->gd_vme_base = entry; 939 crit_exit(); 940 } 941 942 943 /* 944 * Insert/remove entries from maps. 945 * 946 * The related map must be exclusively locked. 947 * The caller must hold map->token 948 * No other requirements. 949 */ 950 static __inline void 951 vm_map_entry_link(vm_map_t map, 952 vm_map_entry_t after_where, 953 vm_map_entry_t entry) 954 { 955 ASSERT_VM_MAP_LOCKED(map); 956 957 map->nentries++; 958 entry->prev = after_where; 959 entry->next = after_where->next; 960 entry->next->prev = entry; 961 after_where->next = entry; 962 if (vm_map_rb_tree_RB_INSERT(&map->rb_root, entry)) 963 panic("vm_map_entry_link: dup addr map %p ent %p", map, entry); 964 } 965 966 static __inline void 967 vm_map_entry_unlink(vm_map_t map, 968 vm_map_entry_t entry) 969 { 970 vm_map_entry_t prev; 971 vm_map_entry_t next; 972 973 ASSERT_VM_MAP_LOCKED(map); 974 975 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 976 panic("vm_map_entry_unlink: attempt to mess with " 977 "locked entry! %p", entry); 978 } 979 prev = entry->prev; 980 next = entry->next; 981 next->prev = prev; 982 prev->next = next; 983 vm_map_rb_tree_RB_REMOVE(&map->rb_root, entry); 984 map->nentries--; 985 } 986 987 /* 988 * Finds the map entry containing (or immediately preceding) the specified 989 * address in the given map. The entry is returned in (*entry). 990 * 991 * The boolean result indicates whether the address is actually contained 992 * in the map. 993 * 994 * The related map must be locked. 995 * No other requirements. 996 */ 997 boolean_t 998 vm_map_lookup_entry(vm_map_t map, vm_offset_t address, vm_map_entry_t *entry) 999 { 1000 vm_map_entry_t tmp; 1001 vm_map_entry_t last; 1002 1003 ASSERT_VM_MAP_LOCKED(map); 1004 1005 /* 1006 * Locate the record from the top of the tree. 'last' tracks the 1007 * closest prior record and is returned if no match is found, which 1008 * in binary tree terms means tracking the most recent right-branch 1009 * taken. If there is no prior record, &map->header is returned. 1010 */ 1011 last = &map->header; 1012 tmp = RB_ROOT(&map->rb_root); 1013 1014 while (tmp) { 1015 if (address >= tmp->start) { 1016 if (address < tmp->end) { 1017 *entry = tmp; 1018 return(TRUE); 1019 } 1020 last = tmp; 1021 tmp = RB_RIGHT(tmp, rb_entry); 1022 } else { 1023 tmp = RB_LEFT(tmp, rb_entry); 1024 } 1025 } 1026 *entry = last; 1027 return (FALSE); 1028 } 1029 1030 /* 1031 * Inserts the given whole VM object into the target map at the specified 1032 * address range. The object's size should match that of the address range. 1033 * 1034 * The map must be exclusively locked. 1035 * The object must be held. 1036 * The caller must have reserved sufficient vm_map_entry structures. 1037 * 1038 * If object is non-NULL, ref count must be bumped by caller prior to 1039 * making call to account for the new entry. 1040 */ 1041 int 1042 vm_map_insert(vm_map_t map, int *countp, void *map_object, void *map_aux, 1043 vm_ooffset_t offset, vm_offset_t start, vm_offset_t end, 1044 vm_maptype_t maptype, vm_subsys_t id, 1045 vm_prot_t prot, vm_prot_t max, int cow) 1046 { 1047 vm_map_entry_t new_entry; 1048 vm_map_entry_t prev_entry; 1049 vm_map_entry_t temp_entry; 1050 vm_eflags_t protoeflags; 1051 int must_drop = 0; 1052 vm_object_t object; 1053 1054 if (maptype == VM_MAPTYPE_UKSMAP) 1055 object = NULL; 1056 else 1057 object = map_object; 1058 1059 ASSERT_VM_MAP_LOCKED(map); 1060 if (object) 1061 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 1062 1063 /* 1064 * Check that the start and end points are not bogus. 1065 */ 1066 if ((start < map->min_offset) || (end > map->max_offset) || 1067 (start >= end)) 1068 return (KERN_INVALID_ADDRESS); 1069 1070 /* 1071 * Find the entry prior to the proposed starting address; if it's part 1072 * of an existing entry, this range is bogus. 1073 */ 1074 if (vm_map_lookup_entry(map, start, &temp_entry)) 1075 return (KERN_NO_SPACE); 1076 1077 prev_entry = temp_entry; 1078 1079 /* 1080 * Assert that the next entry doesn't overlap the end point. 1081 */ 1082 1083 if ((prev_entry->next != &map->header) && 1084 (prev_entry->next->start < end)) 1085 return (KERN_NO_SPACE); 1086 1087 protoeflags = 0; 1088 1089 if (cow & MAP_COPY_ON_WRITE) 1090 protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY; 1091 1092 if (cow & MAP_NOFAULT) { 1093 protoeflags |= MAP_ENTRY_NOFAULT; 1094 1095 KASSERT(object == NULL, 1096 ("vm_map_insert: paradoxical MAP_NOFAULT request")); 1097 } 1098 if (cow & MAP_DISABLE_SYNCER) 1099 protoeflags |= MAP_ENTRY_NOSYNC; 1100 if (cow & MAP_DISABLE_COREDUMP) 1101 protoeflags |= MAP_ENTRY_NOCOREDUMP; 1102 if (cow & MAP_IS_STACK) 1103 protoeflags |= MAP_ENTRY_STACK; 1104 if (cow & MAP_IS_KSTACK) 1105 protoeflags |= MAP_ENTRY_KSTACK; 1106 1107 lwkt_gettoken(&map->token); 1108 1109 if (object) { 1110 /* 1111 * When object is non-NULL, it could be shared with another 1112 * process. We have to set or clear OBJ_ONEMAPPING 1113 * appropriately. 1114 * 1115 * NOTE: This flag is only applicable to DEFAULT and SWAP 1116 * objects and will already be clear in other types 1117 * of objects, so a shared object lock is ok for 1118 * VNODE objects. 1119 */ 1120 if ((object->ref_count > 1) || (object->shadow_count != 0)) { 1121 vm_object_clear_flag(object, OBJ_ONEMAPPING); 1122 } 1123 } 1124 else if ((prev_entry != &map->header) && 1125 (prev_entry->eflags == protoeflags) && 1126 (prev_entry->end == start) && 1127 (prev_entry->wired_count == 0) && 1128 (prev_entry->id == id) && 1129 prev_entry->maptype == maptype && 1130 maptype == VM_MAPTYPE_NORMAL && 1131 ((prev_entry->object.vm_object == NULL) || 1132 vm_object_coalesce(prev_entry->object.vm_object, 1133 OFF_TO_IDX(prev_entry->offset), 1134 (vm_size_t)(prev_entry->end - prev_entry->start), 1135 (vm_size_t)(end - prev_entry->end)))) { 1136 /* 1137 * We were able to extend the object. Determine if we 1138 * can extend the previous map entry to include the 1139 * new range as well. 1140 */ 1141 if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) && 1142 (prev_entry->protection == prot) && 1143 (prev_entry->max_protection == max)) { 1144 map->size += (end - prev_entry->end); 1145 prev_entry->end = end; 1146 vm_map_simplify_entry(map, prev_entry, countp); 1147 lwkt_reltoken(&map->token); 1148 return (KERN_SUCCESS); 1149 } 1150 1151 /* 1152 * If we can extend the object but cannot extend the 1153 * map entry, we have to create a new map entry. We 1154 * must bump the ref count on the extended object to 1155 * account for it. object may be NULL. 1156 * 1157 * XXX if object is NULL should we set offset to 0 here ? 1158 */ 1159 object = prev_entry->object.vm_object; 1160 offset = prev_entry->offset + 1161 (prev_entry->end - prev_entry->start); 1162 if (object) { 1163 vm_object_hold(object); 1164 vm_object_chain_wait(object, 0); 1165 vm_object_reference_locked(object); 1166 must_drop = 1; 1167 map_object = object; 1168 } 1169 } 1170 1171 /* 1172 * NOTE: if conditionals fail, object can be NULL here. This occurs 1173 * in things like the buffer map where we manage kva but do not manage 1174 * backing objects. 1175 */ 1176 1177 /* 1178 * Create a new entry 1179 */ 1180 1181 new_entry = vm_map_entry_create(map, countp); 1182 new_entry->start = start; 1183 new_entry->end = end; 1184 new_entry->id = id; 1185 1186 new_entry->maptype = maptype; 1187 new_entry->eflags = protoeflags; 1188 new_entry->object.map_object = map_object; 1189 new_entry->aux.master_pde = 0; /* in case size is different */ 1190 new_entry->aux.map_aux = map_aux; 1191 new_entry->offset = offset; 1192 1193 new_entry->inheritance = VM_INHERIT_DEFAULT; 1194 new_entry->protection = prot; 1195 new_entry->max_protection = max; 1196 new_entry->wired_count = 0; 1197 1198 /* 1199 * Insert the new entry into the list 1200 */ 1201 1202 vm_map_entry_link(map, prev_entry, new_entry); 1203 map->size += new_entry->end - new_entry->start; 1204 1205 /* 1206 * Don't worry about updating freehint[] when inserting, allow 1207 * addresses to be lower than the actual first free spot. 1208 */ 1209 #if 0 1210 /* 1211 * Temporarily removed to avoid MAP_STACK panic, due to 1212 * MAP_STACK being a huge hack. Will be added back in 1213 * when MAP_STACK (and the user stack mapping) is fixed. 1214 */ 1215 /* 1216 * It may be possible to simplify the entry 1217 */ 1218 vm_map_simplify_entry(map, new_entry, countp); 1219 #endif 1220 1221 /* 1222 * Try to pre-populate the page table. Mappings governed by virtual 1223 * page tables cannot be prepopulated without a lot of work, so 1224 * don't try. 1225 */ 1226 if ((cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) && 1227 maptype != VM_MAPTYPE_VPAGETABLE && 1228 maptype != VM_MAPTYPE_UKSMAP) { 1229 int dorelock = 0; 1230 if (vm_map_relock_enable && (cow & MAP_PREFAULT_RELOCK)) { 1231 dorelock = 1; 1232 vm_object_lock_swap(); 1233 vm_object_drop(object); 1234 } 1235 pmap_object_init_pt(map->pmap, start, prot, 1236 object, OFF_TO_IDX(offset), end - start, 1237 cow & MAP_PREFAULT_PARTIAL); 1238 if (dorelock) { 1239 vm_object_hold(object); 1240 vm_object_lock_swap(); 1241 } 1242 } 1243 if (must_drop) 1244 vm_object_drop(object); 1245 1246 lwkt_reltoken(&map->token); 1247 return (KERN_SUCCESS); 1248 } 1249 1250 /* 1251 * Find sufficient space for `length' bytes in the given map, starting at 1252 * `start'. Returns 0 on success, 1 on no space. 1253 * 1254 * This function will returned an arbitrarily aligned pointer. If no 1255 * particular alignment is required you should pass align as 1. Note that 1256 * the map may return PAGE_SIZE aligned pointers if all the lengths used in 1257 * the map are a multiple of PAGE_SIZE, even if you pass a smaller align 1258 * argument. 1259 * 1260 * 'align' should be a power of 2 but is not required to be. 1261 * 1262 * The map must be exclusively locked. 1263 * No other requirements. 1264 */ 1265 int 1266 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length, 1267 vm_size_t align, int flags, vm_offset_t *addr) 1268 { 1269 vm_map_entry_t entry, next; 1270 vm_map_entry_t tmp; 1271 vm_offset_t hole_start; 1272 vm_offset_t end; 1273 vm_offset_t align_mask; 1274 1275 if (start < map->min_offset) 1276 start = map->min_offset; 1277 if (start > map->max_offset) 1278 return (1); 1279 1280 /* 1281 * If the alignment is not a power of 2 we will have to use 1282 * a mod/division, set align_mask to a special value. 1283 */ 1284 if ((align | (align - 1)) + 1 != (align << 1)) 1285 align_mask = (vm_offset_t)-1; 1286 else 1287 align_mask = align - 1; 1288 1289 /* 1290 * Use freehint to adjust the start point, hopefully reducing 1291 * the iteration to O(1). 1292 */ 1293 hole_start = vm_map_freehint_find(map, length, align); 1294 if (start < hole_start) 1295 start = hole_start; 1296 if (vm_map_lookup_entry(map, start, &tmp)) 1297 start = tmp->end; 1298 entry = tmp; 1299 1300 /* 1301 * Look through the rest of the map, trying to fit a new region in the 1302 * gap between existing regions, or after the very last region. 1303 */ 1304 for (;; start = (entry = next)->end) { 1305 /* 1306 * Adjust the proposed start by the requested alignment, 1307 * be sure that we didn't wrap the address. 1308 */ 1309 if (align_mask == (vm_offset_t)-1) 1310 end = roundup(start, align); 1311 else 1312 end = (start + align_mask) & ~align_mask; 1313 if (end < start) 1314 return (1); 1315 start = end; 1316 1317 /* 1318 * Find the end of the proposed new region. Be sure we didn't 1319 * go beyond the end of the map, or wrap around the address. 1320 * Then check to see if this is the last entry or if the 1321 * proposed end fits in the gap between this and the next 1322 * entry. 1323 */ 1324 end = start + length; 1325 if (end > map->max_offset || end < start) 1326 return (1); 1327 next = entry->next; 1328 1329 /* 1330 * If the next entry's start address is beyond the desired 1331 * end address we may have found a good entry. 1332 * 1333 * If the next entry is a stack mapping we do not map into 1334 * the stack's reserved space. 1335 * 1336 * XXX continue to allow mapping into the stack's reserved 1337 * space if doing a MAP_STACK mapping inside a MAP_STACK 1338 * mapping, for backwards compatibility. But the caller 1339 * really should use MAP_STACK | MAP_TRYFIXED if they 1340 * want to do that. 1341 */ 1342 if (next == &map->header) 1343 break; 1344 if (next->start >= end) { 1345 if ((next->eflags & MAP_ENTRY_STACK) == 0) 1346 break; 1347 if (flags & MAP_STACK) 1348 break; 1349 if (next->start - next->aux.avail_ssize >= end) 1350 break; 1351 } 1352 } 1353 1354 /* 1355 * Update the freehint 1356 */ 1357 vm_map_freehint_update(map, start, length, align); 1358 1359 /* 1360 * Grow the kernel_map if necessary. pmap_growkernel() will panic 1361 * if it fails. The kernel_map is locked and nothing can steal 1362 * our address space if pmap_growkernel() blocks. 1363 * 1364 * NOTE: This may be unconditionally called for kldload areas on 1365 * x86_64 because these do not bump kernel_vm_end (which would 1366 * fill 128G worth of page tables!). Therefore we must not 1367 * retry. 1368 */ 1369 if (map == &kernel_map) { 1370 vm_offset_t kstop; 1371 1372 kstop = round_page(start + length); 1373 if (kstop > kernel_vm_end) 1374 pmap_growkernel(start, kstop); 1375 } 1376 *addr = start; 1377 return (0); 1378 } 1379 1380 /* 1381 * vm_map_find finds an unallocated region in the target address map with 1382 * the given length and allocates it. The search is defined to be first-fit 1383 * from the specified address; the region found is returned in the same 1384 * parameter. 1385 * 1386 * If object is non-NULL, ref count must be bumped by caller 1387 * prior to making call to account for the new entry. 1388 * 1389 * No requirements. This function will lock the map temporarily. 1390 */ 1391 int 1392 vm_map_find(vm_map_t map, void *map_object, void *map_aux, 1393 vm_ooffset_t offset, vm_offset_t *addr, 1394 vm_size_t length, vm_size_t align, boolean_t fitit, 1395 vm_maptype_t maptype, vm_subsys_t id, 1396 vm_prot_t prot, vm_prot_t max, int cow) 1397 { 1398 vm_offset_t start; 1399 vm_object_t object; 1400 int result; 1401 int count; 1402 1403 if (maptype == VM_MAPTYPE_UKSMAP) 1404 object = NULL; 1405 else 1406 object = map_object; 1407 1408 start = *addr; 1409 1410 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 1411 vm_map_lock(map); 1412 if (object) 1413 vm_object_hold_shared(object); 1414 if (fitit) { 1415 if (vm_map_findspace(map, start, length, align, 0, addr)) { 1416 if (object) 1417 vm_object_drop(object); 1418 vm_map_unlock(map); 1419 vm_map_entry_release(count); 1420 return (KERN_NO_SPACE); 1421 } 1422 start = *addr; 1423 } 1424 result = vm_map_insert(map, &count, map_object, map_aux, 1425 offset, start, start + length, 1426 maptype, id, prot, max, cow); 1427 if (object) 1428 vm_object_drop(object); 1429 vm_map_unlock(map); 1430 vm_map_entry_release(count); 1431 1432 return (result); 1433 } 1434 1435 /* 1436 * Simplify the given map entry by merging with either neighbor. This 1437 * routine also has the ability to merge with both neighbors. 1438 * 1439 * This routine guarentees that the passed entry remains valid (though 1440 * possibly extended). When merging, this routine may delete one or 1441 * both neighbors. No action is taken on entries which have their 1442 * in-transition flag set. 1443 * 1444 * The map must be exclusively locked. 1445 */ 1446 void 1447 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry, int *countp) 1448 { 1449 vm_map_entry_t next, prev; 1450 vm_size_t prevsize, esize; 1451 1452 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 1453 ++mycpu->gd_cnt.v_intrans_coll; 1454 return; 1455 } 1456 1457 if (entry->maptype == VM_MAPTYPE_SUBMAP) 1458 return; 1459 if (entry->maptype == VM_MAPTYPE_UKSMAP) 1460 return; 1461 1462 prev = entry->prev; 1463 if (prev != &map->header) { 1464 prevsize = prev->end - prev->start; 1465 if ( (prev->end == entry->start) && 1466 (prev->maptype == entry->maptype) && 1467 (prev->object.vm_object == entry->object.vm_object) && 1468 (!prev->object.vm_object || 1469 (prev->offset + prevsize == entry->offset)) && 1470 (prev->eflags == entry->eflags) && 1471 (prev->protection == entry->protection) && 1472 (prev->max_protection == entry->max_protection) && 1473 (prev->inheritance == entry->inheritance) && 1474 (prev->id == entry->id) && 1475 (prev->wired_count == entry->wired_count)) { 1476 vm_map_entry_unlink(map, prev); 1477 entry->start = prev->start; 1478 entry->offset = prev->offset; 1479 if (prev->object.vm_object) 1480 vm_object_deallocate(prev->object.vm_object); 1481 vm_map_entry_dispose(map, prev, countp); 1482 } 1483 } 1484 1485 next = entry->next; 1486 if (next != &map->header) { 1487 esize = entry->end - entry->start; 1488 if ((entry->end == next->start) && 1489 (next->maptype == entry->maptype) && 1490 (next->object.vm_object == entry->object.vm_object) && 1491 (!entry->object.vm_object || 1492 (entry->offset + esize == next->offset)) && 1493 (next->eflags == entry->eflags) && 1494 (next->protection == entry->protection) && 1495 (next->max_protection == entry->max_protection) && 1496 (next->inheritance == entry->inheritance) && 1497 (next->id == entry->id) && 1498 (next->wired_count == entry->wired_count)) { 1499 vm_map_entry_unlink(map, next); 1500 entry->end = next->end; 1501 if (next->object.vm_object) 1502 vm_object_deallocate(next->object.vm_object); 1503 vm_map_entry_dispose(map, next, countp); 1504 } 1505 } 1506 } 1507 1508 /* 1509 * Asserts that the given entry begins at or after the specified address. 1510 * If necessary, it splits the entry into two. 1511 */ 1512 #define vm_map_clip_start(map, entry, startaddr, countp) \ 1513 { \ 1514 if (startaddr > entry->start) \ 1515 _vm_map_clip_start(map, entry, startaddr, countp); \ 1516 } 1517 1518 /* 1519 * This routine is called only when it is known that the entry must be split. 1520 * 1521 * The map must be exclusively locked. 1522 */ 1523 static void 1524 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start, 1525 int *countp) 1526 { 1527 vm_map_entry_t new_entry; 1528 1529 /* 1530 * Split off the front portion -- note that we must insert the new 1531 * entry BEFORE this one, so that this entry has the specified 1532 * starting address. 1533 */ 1534 1535 vm_map_simplify_entry(map, entry, countp); 1536 1537 /* 1538 * If there is no object backing this entry, we might as well create 1539 * one now. If we defer it, an object can get created after the map 1540 * is clipped, and individual objects will be created for the split-up 1541 * map. This is a bit of a hack, but is also about the best place to 1542 * put this improvement. 1543 */ 1544 if (entry->object.vm_object == NULL && !map->system_map && 1545 VM_MAP_ENTRY_WITHIN_PARTITION(entry)) { 1546 vm_map_entry_allocate_object(entry); 1547 } 1548 1549 new_entry = vm_map_entry_create(map, countp); 1550 *new_entry = *entry; 1551 1552 new_entry->end = start; 1553 entry->offset += (start - entry->start); 1554 entry->start = start; 1555 1556 vm_map_entry_link(map, entry->prev, new_entry); 1557 1558 switch(entry->maptype) { 1559 case VM_MAPTYPE_NORMAL: 1560 case VM_MAPTYPE_VPAGETABLE: 1561 if (new_entry->object.vm_object) { 1562 vm_object_hold(new_entry->object.vm_object); 1563 vm_object_chain_wait(new_entry->object.vm_object, 0); 1564 vm_object_reference_locked(new_entry->object.vm_object); 1565 vm_object_drop(new_entry->object.vm_object); 1566 } 1567 break; 1568 default: 1569 break; 1570 } 1571 } 1572 1573 /* 1574 * Asserts that the given entry ends at or before the specified address. 1575 * If necessary, it splits the entry into two. 1576 * 1577 * The map must be exclusively locked. 1578 */ 1579 #define vm_map_clip_end(map, entry, endaddr, countp) \ 1580 { \ 1581 if (endaddr < entry->end) \ 1582 _vm_map_clip_end(map, entry, endaddr, countp); \ 1583 } 1584 1585 /* 1586 * This routine is called only when it is known that the entry must be split. 1587 * 1588 * The map must be exclusively locked. 1589 */ 1590 static void 1591 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end, 1592 int *countp) 1593 { 1594 vm_map_entry_t new_entry; 1595 1596 /* 1597 * If there is no object backing this entry, we might as well create 1598 * one now. If we defer it, an object can get created after the map 1599 * is clipped, and individual objects will be created for the split-up 1600 * map. This is a bit of a hack, but is also about the best place to 1601 * put this improvement. 1602 */ 1603 1604 if (entry->object.vm_object == NULL && !map->system_map && 1605 VM_MAP_ENTRY_WITHIN_PARTITION(entry)) { 1606 vm_map_entry_allocate_object(entry); 1607 } 1608 1609 /* 1610 * Create a new entry and insert it AFTER the specified entry 1611 */ 1612 new_entry = vm_map_entry_create(map, countp); 1613 *new_entry = *entry; 1614 1615 new_entry->start = entry->end = end; 1616 new_entry->offset += (end - entry->start); 1617 1618 vm_map_entry_link(map, entry, new_entry); 1619 1620 switch(entry->maptype) { 1621 case VM_MAPTYPE_NORMAL: 1622 case VM_MAPTYPE_VPAGETABLE: 1623 if (new_entry->object.vm_object) { 1624 vm_object_hold(new_entry->object.vm_object); 1625 vm_object_chain_wait(new_entry->object.vm_object, 0); 1626 vm_object_reference_locked(new_entry->object.vm_object); 1627 vm_object_drop(new_entry->object.vm_object); 1628 } 1629 break; 1630 default: 1631 break; 1632 } 1633 } 1634 1635 /* 1636 * Asserts that the starting and ending region addresses fall within the 1637 * valid range for the map. 1638 */ 1639 #define VM_MAP_RANGE_CHECK(map, start, end) \ 1640 { \ 1641 if (start < vm_map_min(map)) \ 1642 start = vm_map_min(map); \ 1643 if (end > vm_map_max(map)) \ 1644 end = vm_map_max(map); \ 1645 if (start > end) \ 1646 start = end; \ 1647 } 1648 1649 /* 1650 * Used to block when an in-transition collison occurs. The map 1651 * is unlocked for the sleep and relocked before the return. 1652 */ 1653 void 1654 vm_map_transition_wait(vm_map_t map) 1655 { 1656 tsleep_interlock(map, 0); 1657 vm_map_unlock(map); 1658 tsleep(map, PINTERLOCKED, "vment", 0); 1659 vm_map_lock(map); 1660 } 1661 1662 /* 1663 * When we do blocking operations with the map lock held it is 1664 * possible that a clip might have occured on our in-transit entry, 1665 * requiring an adjustment to the entry in our loop. These macros 1666 * help the pageable and clip_range code deal with the case. The 1667 * conditional costs virtually nothing if no clipping has occured. 1668 */ 1669 1670 #define CLIP_CHECK_BACK(entry, save_start) \ 1671 do { \ 1672 while (entry->start != save_start) { \ 1673 entry = entry->prev; \ 1674 KASSERT(entry != &map->header, ("bad entry clip")); \ 1675 } \ 1676 } while(0) 1677 1678 #define CLIP_CHECK_FWD(entry, save_end) \ 1679 do { \ 1680 while (entry->end != save_end) { \ 1681 entry = entry->next; \ 1682 KASSERT(entry != &map->header, ("bad entry clip")); \ 1683 } \ 1684 } while(0) 1685 1686 1687 /* 1688 * Clip the specified range and return the base entry. The 1689 * range may cover several entries starting at the returned base 1690 * and the first and last entry in the covering sequence will be 1691 * properly clipped to the requested start and end address. 1692 * 1693 * If no holes are allowed you should pass the MAP_CLIP_NO_HOLES 1694 * flag. 1695 * 1696 * The MAP_ENTRY_IN_TRANSITION flag will be set for the entries 1697 * covered by the requested range. 1698 * 1699 * The map must be exclusively locked on entry and will remain locked 1700 * on return. If no range exists or the range contains holes and you 1701 * specified that no holes were allowed, NULL will be returned. This 1702 * routine may temporarily unlock the map in order avoid a deadlock when 1703 * sleeping. 1704 */ 1705 static 1706 vm_map_entry_t 1707 vm_map_clip_range(vm_map_t map, vm_offset_t start, vm_offset_t end, 1708 int *countp, int flags) 1709 { 1710 vm_map_entry_t start_entry; 1711 vm_map_entry_t entry; 1712 1713 /* 1714 * Locate the entry and effect initial clipping. The in-transition 1715 * case does not occur very often so do not try to optimize it. 1716 */ 1717 again: 1718 if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) 1719 return (NULL); 1720 entry = start_entry; 1721 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 1722 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 1723 ++mycpu->gd_cnt.v_intrans_coll; 1724 ++mycpu->gd_cnt.v_intrans_wait; 1725 vm_map_transition_wait(map); 1726 /* 1727 * entry and/or start_entry may have been clipped while 1728 * we slept, or may have gone away entirely. We have 1729 * to restart from the lookup. 1730 */ 1731 goto again; 1732 } 1733 1734 /* 1735 * Since we hold an exclusive map lock we do not have to restart 1736 * after clipping, even though clipping may block in zalloc. 1737 */ 1738 vm_map_clip_start(map, entry, start, countp); 1739 vm_map_clip_end(map, entry, end, countp); 1740 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 1741 1742 /* 1743 * Scan entries covered by the range. When working on the next 1744 * entry a restart need only re-loop on the current entry which 1745 * we have already locked, since 'next' may have changed. Also, 1746 * even though entry is safe, it may have been clipped so we 1747 * have to iterate forwards through the clip after sleeping. 1748 */ 1749 while (entry->next != &map->header && entry->next->start < end) { 1750 vm_map_entry_t next = entry->next; 1751 1752 if (flags & MAP_CLIP_NO_HOLES) { 1753 if (next->start > entry->end) { 1754 vm_map_unclip_range(map, start_entry, 1755 start, entry->end, countp, flags); 1756 return(NULL); 1757 } 1758 } 1759 1760 if (next->eflags & MAP_ENTRY_IN_TRANSITION) { 1761 vm_offset_t save_end = entry->end; 1762 next->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 1763 ++mycpu->gd_cnt.v_intrans_coll; 1764 ++mycpu->gd_cnt.v_intrans_wait; 1765 vm_map_transition_wait(map); 1766 1767 /* 1768 * clips might have occured while we blocked. 1769 */ 1770 CLIP_CHECK_FWD(entry, save_end); 1771 CLIP_CHECK_BACK(start_entry, start); 1772 continue; 1773 } 1774 /* 1775 * No restart necessary even though clip_end may block, we 1776 * are holding the map lock. 1777 */ 1778 vm_map_clip_end(map, next, end, countp); 1779 next->eflags |= MAP_ENTRY_IN_TRANSITION; 1780 entry = next; 1781 } 1782 if (flags & MAP_CLIP_NO_HOLES) { 1783 if (entry->end != end) { 1784 vm_map_unclip_range(map, start_entry, 1785 start, entry->end, countp, flags); 1786 return(NULL); 1787 } 1788 } 1789 return(start_entry); 1790 } 1791 1792 /* 1793 * Undo the effect of vm_map_clip_range(). You should pass the same 1794 * flags and the same range that you passed to vm_map_clip_range(). 1795 * This code will clear the in-transition flag on the entries and 1796 * wake up anyone waiting. This code will also simplify the sequence 1797 * and attempt to merge it with entries before and after the sequence. 1798 * 1799 * The map must be locked on entry and will remain locked on return. 1800 * 1801 * Note that you should also pass the start_entry returned by 1802 * vm_map_clip_range(). However, if you block between the two calls 1803 * with the map unlocked please be aware that the start_entry may 1804 * have been clipped and you may need to scan it backwards to find 1805 * the entry corresponding with the original start address. You are 1806 * responsible for this, vm_map_unclip_range() expects the correct 1807 * start_entry to be passed to it and will KASSERT otherwise. 1808 */ 1809 static 1810 void 1811 vm_map_unclip_range(vm_map_t map, vm_map_entry_t start_entry, 1812 vm_offset_t start, vm_offset_t end, 1813 int *countp, int flags) 1814 { 1815 vm_map_entry_t entry; 1816 1817 entry = start_entry; 1818 1819 KASSERT(entry->start == start, ("unclip_range: illegal base entry")); 1820 while (entry != &map->header && entry->start < end) { 1821 KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION, 1822 ("in-transition flag not set during unclip on: %p", 1823 entry)); 1824 KASSERT(entry->end <= end, 1825 ("unclip_range: tail wasn't clipped")); 1826 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; 1827 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 1828 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 1829 wakeup(map); 1830 } 1831 entry = entry->next; 1832 } 1833 1834 /* 1835 * Simplification does not block so there is no restart case. 1836 */ 1837 entry = start_entry; 1838 while (entry != &map->header && entry->start < end) { 1839 vm_map_simplify_entry(map, entry, countp); 1840 entry = entry->next; 1841 } 1842 } 1843 1844 /* 1845 * Mark the given range as handled by a subordinate map. 1846 * 1847 * This range must have been created with vm_map_find(), and no other 1848 * operations may have been performed on this range prior to calling 1849 * vm_map_submap(). 1850 * 1851 * Submappings cannot be removed. 1852 * 1853 * No requirements. 1854 */ 1855 int 1856 vm_map_submap(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_map_t submap) 1857 { 1858 vm_map_entry_t entry; 1859 int result = KERN_INVALID_ARGUMENT; 1860 int count; 1861 1862 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 1863 vm_map_lock(map); 1864 1865 VM_MAP_RANGE_CHECK(map, start, end); 1866 1867 if (vm_map_lookup_entry(map, start, &entry)) { 1868 vm_map_clip_start(map, entry, start, &count); 1869 } else { 1870 entry = entry->next; 1871 } 1872 1873 vm_map_clip_end(map, entry, end, &count); 1874 1875 if ((entry->start == start) && (entry->end == end) && 1876 ((entry->eflags & MAP_ENTRY_COW) == 0) && 1877 (entry->object.vm_object == NULL)) { 1878 entry->object.sub_map = submap; 1879 entry->maptype = VM_MAPTYPE_SUBMAP; 1880 result = KERN_SUCCESS; 1881 } 1882 vm_map_unlock(map); 1883 vm_map_entry_release(count); 1884 1885 return (result); 1886 } 1887 1888 /* 1889 * Sets the protection of the specified address region in the target map. 1890 * If "set_max" is specified, the maximum protection is to be set; 1891 * otherwise, only the current protection is affected. 1892 * 1893 * The protection is not applicable to submaps, but is applicable to normal 1894 * maps and maps governed by virtual page tables. For example, when operating 1895 * on a virtual page table our protection basically controls how COW occurs 1896 * on the backing object, whereas the virtual page table abstraction itself 1897 * is an abstraction for userland. 1898 * 1899 * No requirements. 1900 */ 1901 int 1902 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, 1903 vm_prot_t new_prot, boolean_t set_max) 1904 { 1905 vm_map_entry_t current; 1906 vm_map_entry_t entry; 1907 int count; 1908 1909 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 1910 vm_map_lock(map); 1911 1912 VM_MAP_RANGE_CHECK(map, start, end); 1913 1914 if (vm_map_lookup_entry(map, start, &entry)) { 1915 vm_map_clip_start(map, entry, start, &count); 1916 } else { 1917 entry = entry->next; 1918 } 1919 1920 /* 1921 * Make a first pass to check for protection violations. 1922 */ 1923 current = entry; 1924 while ((current != &map->header) && (current->start < end)) { 1925 if (current->maptype == VM_MAPTYPE_SUBMAP) { 1926 vm_map_unlock(map); 1927 vm_map_entry_release(count); 1928 return (KERN_INVALID_ARGUMENT); 1929 } 1930 if ((new_prot & current->max_protection) != new_prot) { 1931 vm_map_unlock(map); 1932 vm_map_entry_release(count); 1933 return (KERN_PROTECTION_FAILURE); 1934 } 1935 current = current->next; 1936 } 1937 1938 /* 1939 * Go back and fix up protections. [Note that clipping is not 1940 * necessary the second time.] 1941 */ 1942 current = entry; 1943 1944 while ((current != &map->header) && (current->start < end)) { 1945 vm_prot_t old_prot; 1946 1947 vm_map_clip_end(map, current, end, &count); 1948 1949 old_prot = current->protection; 1950 if (set_max) { 1951 current->max_protection = new_prot; 1952 current->protection = new_prot & old_prot; 1953 } else { 1954 current->protection = new_prot; 1955 } 1956 1957 /* 1958 * Update physical map if necessary. Worry about copy-on-write 1959 * here -- CHECK THIS XXX 1960 */ 1961 1962 if (current->protection != old_prot) { 1963 #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ 1964 VM_PROT_ALL) 1965 1966 pmap_protect(map->pmap, current->start, 1967 current->end, 1968 current->protection & MASK(current)); 1969 #undef MASK 1970 } 1971 1972 vm_map_simplify_entry(map, current, &count); 1973 1974 current = current->next; 1975 } 1976 1977 vm_map_unlock(map); 1978 vm_map_entry_release(count); 1979 return (KERN_SUCCESS); 1980 } 1981 1982 /* 1983 * This routine traverses a processes map handling the madvise 1984 * system call. Advisories are classified as either those effecting 1985 * the vm_map_entry structure, or those effecting the underlying 1986 * objects. 1987 * 1988 * The <value> argument is used for extended madvise calls. 1989 * 1990 * No requirements. 1991 */ 1992 int 1993 vm_map_madvise(vm_map_t map, vm_offset_t start, vm_offset_t end, 1994 int behav, off_t value) 1995 { 1996 vm_map_entry_t current, entry; 1997 int modify_map = 0; 1998 int error = 0; 1999 int count; 2000 2001 /* 2002 * Some madvise calls directly modify the vm_map_entry, in which case 2003 * we need to use an exclusive lock on the map and we need to perform 2004 * various clipping operations. Otherwise we only need a read-lock 2005 * on the map. 2006 */ 2007 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 2008 2009 switch(behav) { 2010 case MADV_NORMAL: 2011 case MADV_SEQUENTIAL: 2012 case MADV_RANDOM: 2013 case MADV_NOSYNC: 2014 case MADV_AUTOSYNC: 2015 case MADV_NOCORE: 2016 case MADV_CORE: 2017 case MADV_SETMAP: 2018 modify_map = 1; 2019 vm_map_lock(map); 2020 break; 2021 case MADV_INVAL: 2022 case MADV_WILLNEED: 2023 case MADV_DONTNEED: 2024 case MADV_FREE: 2025 vm_map_lock_read(map); 2026 break; 2027 default: 2028 vm_map_entry_release(count); 2029 return (EINVAL); 2030 } 2031 2032 /* 2033 * Locate starting entry and clip if necessary. 2034 */ 2035 2036 VM_MAP_RANGE_CHECK(map, start, end); 2037 2038 if (vm_map_lookup_entry(map, start, &entry)) { 2039 if (modify_map) 2040 vm_map_clip_start(map, entry, start, &count); 2041 } else { 2042 entry = entry->next; 2043 } 2044 2045 if (modify_map) { 2046 /* 2047 * madvise behaviors that are implemented in the vm_map_entry. 2048 * 2049 * We clip the vm_map_entry so that behavioral changes are 2050 * limited to the specified address range. 2051 */ 2052 for (current = entry; 2053 (current != &map->header) && (current->start < end); 2054 current = current->next 2055 ) { 2056 if (current->maptype == VM_MAPTYPE_SUBMAP) 2057 continue; 2058 2059 vm_map_clip_end(map, current, end, &count); 2060 2061 switch (behav) { 2062 case MADV_NORMAL: 2063 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL); 2064 break; 2065 case MADV_SEQUENTIAL: 2066 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL); 2067 break; 2068 case MADV_RANDOM: 2069 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM); 2070 break; 2071 case MADV_NOSYNC: 2072 current->eflags |= MAP_ENTRY_NOSYNC; 2073 break; 2074 case MADV_AUTOSYNC: 2075 current->eflags &= ~MAP_ENTRY_NOSYNC; 2076 break; 2077 case MADV_NOCORE: 2078 current->eflags |= MAP_ENTRY_NOCOREDUMP; 2079 break; 2080 case MADV_CORE: 2081 current->eflags &= ~MAP_ENTRY_NOCOREDUMP; 2082 break; 2083 case MADV_SETMAP: 2084 /* 2085 * Set the page directory page for a map 2086 * governed by a virtual page table. Mark 2087 * the entry as being governed by a virtual 2088 * page table if it is not. 2089 * 2090 * XXX the page directory page is stored 2091 * in the avail_ssize field if the map_entry. 2092 * 2093 * XXX the map simplification code does not 2094 * compare this field so weird things may 2095 * happen if you do not apply this function 2096 * to the entire mapping governed by the 2097 * virtual page table. 2098 */ 2099 if (current->maptype != VM_MAPTYPE_VPAGETABLE) { 2100 error = EINVAL; 2101 break; 2102 } 2103 current->aux.master_pde = value; 2104 pmap_remove(map->pmap, 2105 current->start, current->end); 2106 break; 2107 case MADV_INVAL: 2108 /* 2109 * Invalidate the related pmap entries, used 2110 * to flush portions of the real kernel's 2111 * pmap when the caller has removed or 2112 * modified existing mappings in a virtual 2113 * page table. 2114 * 2115 * (exclusive locked map version does not 2116 * need the range interlock). 2117 */ 2118 pmap_remove(map->pmap, 2119 current->start, current->end); 2120 break; 2121 default: 2122 error = EINVAL; 2123 break; 2124 } 2125 vm_map_simplify_entry(map, current, &count); 2126 } 2127 vm_map_unlock(map); 2128 } else { 2129 vm_pindex_t pindex; 2130 vm_pindex_t delta; 2131 2132 /* 2133 * madvise behaviors that are implemented in the underlying 2134 * vm_object. 2135 * 2136 * Since we don't clip the vm_map_entry, we have to clip 2137 * the vm_object pindex and count. 2138 * 2139 * NOTE! These functions are only supported on normal maps, 2140 * except MADV_INVAL which is also supported on 2141 * virtual page tables. 2142 */ 2143 for (current = entry; 2144 (current != &map->header) && (current->start < end); 2145 current = current->next 2146 ) { 2147 vm_offset_t useStart; 2148 2149 if (current->maptype != VM_MAPTYPE_NORMAL && 2150 (current->maptype != VM_MAPTYPE_VPAGETABLE || 2151 behav != MADV_INVAL)) { 2152 continue; 2153 } 2154 2155 pindex = OFF_TO_IDX(current->offset); 2156 delta = atop(current->end - current->start); 2157 useStart = current->start; 2158 2159 if (current->start < start) { 2160 pindex += atop(start - current->start); 2161 delta -= atop(start - current->start); 2162 useStart = start; 2163 } 2164 if (current->end > end) 2165 delta -= atop(current->end - end); 2166 2167 if ((vm_spindex_t)delta <= 0) 2168 continue; 2169 2170 if (behav == MADV_INVAL) { 2171 /* 2172 * Invalidate the related pmap entries, used 2173 * to flush portions of the real kernel's 2174 * pmap when the caller has removed or 2175 * modified existing mappings in a virtual 2176 * page table. 2177 * 2178 * (shared locked map version needs the 2179 * interlock, see vm_fault()). 2180 */ 2181 struct vm_map_ilock ilock; 2182 2183 KASSERT(useStart >= VM_MIN_USER_ADDRESS && 2184 useStart + ptoa(delta) <= 2185 VM_MAX_USER_ADDRESS, 2186 ("Bad range %016jx-%016jx (%016jx)", 2187 useStart, useStart + ptoa(delta), 2188 delta)); 2189 vm_map_interlock(map, &ilock, 2190 useStart, 2191 useStart + ptoa(delta)); 2192 pmap_remove(map->pmap, 2193 useStart, 2194 useStart + ptoa(delta)); 2195 vm_map_deinterlock(map, &ilock); 2196 } else { 2197 vm_object_madvise(current->object.vm_object, 2198 pindex, delta, behav); 2199 } 2200 2201 /* 2202 * Try to populate the page table. Mappings governed 2203 * by virtual page tables cannot be pre-populated 2204 * without a lot of work so don't try. 2205 */ 2206 if (behav == MADV_WILLNEED && 2207 current->maptype != VM_MAPTYPE_VPAGETABLE) { 2208 pmap_object_init_pt( 2209 map->pmap, 2210 useStart, 2211 current->protection, 2212 current->object.vm_object, 2213 pindex, 2214 (count << PAGE_SHIFT), 2215 MAP_PREFAULT_MADVISE 2216 ); 2217 } 2218 } 2219 vm_map_unlock_read(map); 2220 } 2221 vm_map_entry_release(count); 2222 return(error); 2223 } 2224 2225 2226 /* 2227 * Sets the inheritance of the specified address range in the target map. 2228 * Inheritance affects how the map will be shared with child maps at the 2229 * time of vm_map_fork. 2230 */ 2231 int 2232 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, 2233 vm_inherit_t new_inheritance) 2234 { 2235 vm_map_entry_t entry; 2236 vm_map_entry_t temp_entry; 2237 int count; 2238 2239 switch (new_inheritance) { 2240 case VM_INHERIT_NONE: 2241 case VM_INHERIT_COPY: 2242 case VM_INHERIT_SHARE: 2243 break; 2244 default: 2245 return (KERN_INVALID_ARGUMENT); 2246 } 2247 2248 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 2249 vm_map_lock(map); 2250 2251 VM_MAP_RANGE_CHECK(map, start, end); 2252 2253 if (vm_map_lookup_entry(map, start, &temp_entry)) { 2254 entry = temp_entry; 2255 vm_map_clip_start(map, entry, start, &count); 2256 } else 2257 entry = temp_entry->next; 2258 2259 while ((entry != &map->header) && (entry->start < end)) { 2260 vm_map_clip_end(map, entry, end, &count); 2261 2262 entry->inheritance = new_inheritance; 2263 2264 vm_map_simplify_entry(map, entry, &count); 2265 2266 entry = entry->next; 2267 } 2268 vm_map_unlock(map); 2269 vm_map_entry_release(count); 2270 return (KERN_SUCCESS); 2271 } 2272 2273 /* 2274 * Implement the semantics of mlock 2275 */ 2276 int 2277 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t real_end, 2278 boolean_t new_pageable) 2279 { 2280 vm_map_entry_t entry; 2281 vm_map_entry_t start_entry; 2282 vm_offset_t end; 2283 int rv = KERN_SUCCESS; 2284 int count; 2285 2286 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 2287 vm_map_lock(map); 2288 VM_MAP_RANGE_CHECK(map, start, real_end); 2289 end = real_end; 2290 2291 start_entry = vm_map_clip_range(map, start, end, &count, 2292 MAP_CLIP_NO_HOLES); 2293 if (start_entry == NULL) { 2294 vm_map_unlock(map); 2295 vm_map_entry_release(count); 2296 return (KERN_INVALID_ADDRESS); 2297 } 2298 2299 if (new_pageable == 0) { 2300 entry = start_entry; 2301 while ((entry != &map->header) && (entry->start < end)) { 2302 vm_offset_t save_start; 2303 vm_offset_t save_end; 2304 2305 /* 2306 * Already user wired or hard wired (trivial cases) 2307 */ 2308 if (entry->eflags & MAP_ENTRY_USER_WIRED) { 2309 entry = entry->next; 2310 continue; 2311 } 2312 if (entry->wired_count != 0) { 2313 entry->wired_count++; 2314 entry->eflags |= MAP_ENTRY_USER_WIRED; 2315 entry = entry->next; 2316 continue; 2317 } 2318 2319 /* 2320 * A new wiring requires instantiation of appropriate 2321 * management structures and the faulting in of the 2322 * page. 2323 */ 2324 if (entry->maptype == VM_MAPTYPE_NORMAL || 2325 entry->maptype == VM_MAPTYPE_VPAGETABLE) { 2326 int copyflag = entry->eflags & 2327 MAP_ENTRY_NEEDS_COPY; 2328 if (copyflag && ((entry->protection & 2329 VM_PROT_WRITE) != 0)) { 2330 vm_map_entry_shadow(entry, 0); 2331 } else if (entry->object.vm_object == NULL && 2332 !map->system_map) { 2333 vm_map_entry_allocate_object(entry); 2334 } 2335 } 2336 entry->wired_count++; 2337 entry->eflags |= MAP_ENTRY_USER_WIRED; 2338 2339 /* 2340 * Now fault in the area. Note that vm_fault_wire() 2341 * may release the map lock temporarily, it will be 2342 * relocked on return. The in-transition 2343 * flag protects the entries. 2344 */ 2345 save_start = entry->start; 2346 save_end = entry->end; 2347 rv = vm_fault_wire(map, entry, TRUE, 0); 2348 if (rv) { 2349 CLIP_CHECK_BACK(entry, save_start); 2350 for (;;) { 2351 KASSERT(entry->wired_count == 1, ("bad wired_count on entry")); 2352 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2353 entry->wired_count = 0; 2354 if (entry->end == save_end) 2355 break; 2356 entry = entry->next; 2357 KASSERT(entry != &map->header, ("bad entry clip during backout")); 2358 } 2359 end = save_start; /* unwire the rest */ 2360 break; 2361 } 2362 /* 2363 * note that even though the entry might have been 2364 * clipped, the USER_WIRED flag we set prevents 2365 * duplication so we do not have to do a 2366 * clip check. 2367 */ 2368 entry = entry->next; 2369 } 2370 2371 /* 2372 * If we failed fall through to the unwiring section to 2373 * unwire what we had wired so far. 'end' has already 2374 * been adjusted. 2375 */ 2376 if (rv) 2377 new_pageable = 1; 2378 2379 /* 2380 * start_entry might have been clipped if we unlocked the 2381 * map and blocked. No matter how clipped it has gotten 2382 * there should be a fragment that is on our start boundary. 2383 */ 2384 CLIP_CHECK_BACK(start_entry, start); 2385 } 2386 2387 /* 2388 * Deal with the unwiring case. 2389 */ 2390 if (new_pageable) { 2391 /* 2392 * This is the unwiring case. We must first ensure that the 2393 * range to be unwired is really wired down. We know there 2394 * are no holes. 2395 */ 2396 entry = start_entry; 2397 while ((entry != &map->header) && (entry->start < end)) { 2398 if ((entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { 2399 rv = KERN_INVALID_ARGUMENT; 2400 goto done; 2401 } 2402 KASSERT(entry->wired_count != 0, ("wired count was 0 with USER_WIRED set! %p", entry)); 2403 entry = entry->next; 2404 } 2405 2406 /* 2407 * Now decrement the wiring count for each region. If a region 2408 * becomes completely unwired, unwire its physical pages and 2409 * mappings. 2410 */ 2411 /* 2412 * The map entries are processed in a loop, checking to 2413 * make sure the entry is wired and asserting it has a wired 2414 * count. However, another loop was inserted more-or-less in 2415 * the middle of the unwiring path. This loop picks up the 2416 * "entry" loop variable from the first loop without first 2417 * setting it to start_entry. Naturally, the secound loop 2418 * is never entered and the pages backing the entries are 2419 * never unwired. This can lead to a leak of wired pages. 2420 */ 2421 entry = start_entry; 2422 while ((entry != &map->header) && (entry->start < end)) { 2423 KASSERT(entry->eflags & MAP_ENTRY_USER_WIRED, 2424 ("expected USER_WIRED on entry %p", entry)); 2425 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2426 entry->wired_count--; 2427 if (entry->wired_count == 0) 2428 vm_fault_unwire(map, entry); 2429 entry = entry->next; 2430 } 2431 } 2432 done: 2433 vm_map_unclip_range(map, start_entry, start, real_end, &count, 2434 MAP_CLIP_NO_HOLES); 2435 map->timestamp++; 2436 vm_map_unlock(map); 2437 vm_map_entry_release(count); 2438 return (rv); 2439 } 2440 2441 /* 2442 * Sets the pageability of the specified address range in the target map. 2443 * Regions specified as not pageable require locked-down physical 2444 * memory and physical page maps. 2445 * 2446 * The map must not be locked, but a reference must remain to the map 2447 * throughout the call. 2448 * 2449 * This function may be called via the zalloc path and must properly 2450 * reserve map entries for kernel_map. 2451 * 2452 * No requirements. 2453 */ 2454 int 2455 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t real_end, int kmflags) 2456 { 2457 vm_map_entry_t entry; 2458 vm_map_entry_t start_entry; 2459 vm_offset_t end; 2460 int rv = KERN_SUCCESS; 2461 int count; 2462 2463 if (kmflags & KM_KRESERVE) 2464 count = vm_map_entry_kreserve(MAP_RESERVE_COUNT); 2465 else 2466 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 2467 vm_map_lock(map); 2468 VM_MAP_RANGE_CHECK(map, start, real_end); 2469 end = real_end; 2470 2471 start_entry = vm_map_clip_range(map, start, end, &count, 2472 MAP_CLIP_NO_HOLES); 2473 if (start_entry == NULL) { 2474 vm_map_unlock(map); 2475 rv = KERN_INVALID_ADDRESS; 2476 goto failure; 2477 } 2478 if ((kmflags & KM_PAGEABLE) == 0) { 2479 /* 2480 * Wiring. 2481 * 2482 * 1. Holding the write lock, we create any shadow or zero-fill 2483 * objects that need to be created. Then we clip each map 2484 * entry to the region to be wired and increment its wiring 2485 * count. We create objects before clipping the map entries 2486 * to avoid object proliferation. 2487 * 2488 * 2. We downgrade to a read lock, and call vm_fault_wire to 2489 * fault in the pages for any newly wired area (wired_count is 2490 * 1). 2491 * 2492 * Downgrading to a read lock for vm_fault_wire avoids a 2493 * possible deadlock with another process that may have faulted 2494 * on one of the pages to be wired (it would mark the page busy, 2495 * blocking us, then in turn block on the map lock that we 2496 * hold). Because of problems in the recursive lock package, 2497 * we cannot upgrade to a write lock in vm_map_lookup. Thus, 2498 * any actions that require the write lock must be done 2499 * beforehand. Because we keep the read lock on the map, the 2500 * copy-on-write status of the entries we modify here cannot 2501 * change. 2502 */ 2503 entry = start_entry; 2504 while ((entry != &map->header) && (entry->start < end)) { 2505 /* 2506 * Trivial case if the entry is already wired 2507 */ 2508 if (entry->wired_count) { 2509 entry->wired_count++; 2510 entry = entry->next; 2511 continue; 2512 } 2513 2514 /* 2515 * The entry is being newly wired, we have to setup 2516 * appropriate management structures. A shadow 2517 * object is required for a copy-on-write region, 2518 * or a normal object for a zero-fill region. We 2519 * do not have to do this for entries that point to sub 2520 * maps because we won't hold the lock on the sub map. 2521 */ 2522 if (entry->maptype == VM_MAPTYPE_NORMAL || 2523 entry->maptype == VM_MAPTYPE_VPAGETABLE) { 2524 int copyflag = entry->eflags & 2525 MAP_ENTRY_NEEDS_COPY; 2526 if (copyflag && ((entry->protection & 2527 VM_PROT_WRITE) != 0)) { 2528 vm_map_entry_shadow(entry, 0); 2529 } else if (entry->object.vm_object == NULL && 2530 !map->system_map) { 2531 vm_map_entry_allocate_object(entry); 2532 } 2533 } 2534 2535 entry->wired_count++; 2536 entry = entry->next; 2537 } 2538 2539 /* 2540 * Pass 2. 2541 */ 2542 2543 /* 2544 * HACK HACK HACK HACK 2545 * 2546 * vm_fault_wire() temporarily unlocks the map to avoid 2547 * deadlocks. The in-transition flag from vm_map_clip_range 2548 * call should protect us from changes while the map is 2549 * unlocked. T 2550 * 2551 * NOTE: Previously this comment stated that clipping might 2552 * still occur while the entry is unlocked, but from 2553 * what I can tell it actually cannot. 2554 * 2555 * It is unclear whether the CLIP_CHECK_*() calls 2556 * are still needed but we keep them in anyway. 2557 * 2558 * HACK HACK HACK HACK 2559 */ 2560 2561 entry = start_entry; 2562 while (entry != &map->header && entry->start < end) { 2563 /* 2564 * If vm_fault_wire fails for any page we need to undo 2565 * what has been done. We decrement the wiring count 2566 * for those pages which have not yet been wired (now) 2567 * and unwire those that have (later). 2568 */ 2569 vm_offset_t save_start = entry->start; 2570 vm_offset_t save_end = entry->end; 2571 2572 if (entry->wired_count == 1) 2573 rv = vm_fault_wire(map, entry, FALSE, kmflags); 2574 if (rv) { 2575 CLIP_CHECK_BACK(entry, save_start); 2576 for (;;) { 2577 KASSERT(entry->wired_count == 1, ("wired_count changed unexpectedly")); 2578 entry->wired_count = 0; 2579 if (entry->end == save_end) 2580 break; 2581 entry = entry->next; 2582 KASSERT(entry != &map->header, ("bad entry clip during backout")); 2583 } 2584 end = save_start; 2585 break; 2586 } 2587 CLIP_CHECK_FWD(entry, save_end); 2588 entry = entry->next; 2589 } 2590 2591 /* 2592 * If a failure occured undo everything by falling through 2593 * to the unwiring code. 'end' has already been adjusted 2594 * appropriately. 2595 */ 2596 if (rv) 2597 kmflags |= KM_PAGEABLE; 2598 2599 /* 2600 * start_entry is still IN_TRANSITION but may have been 2601 * clipped since vm_fault_wire() unlocks and relocks the 2602 * map. No matter how clipped it has gotten there should 2603 * be a fragment that is on our start boundary. 2604 */ 2605 CLIP_CHECK_BACK(start_entry, start); 2606 } 2607 2608 if (kmflags & KM_PAGEABLE) { 2609 /* 2610 * This is the unwiring case. We must first ensure that the 2611 * range to be unwired is really wired down. We know there 2612 * are no holes. 2613 */ 2614 entry = start_entry; 2615 while ((entry != &map->header) && (entry->start < end)) { 2616 if (entry->wired_count == 0) { 2617 rv = KERN_INVALID_ARGUMENT; 2618 goto done; 2619 } 2620 entry = entry->next; 2621 } 2622 2623 /* 2624 * Now decrement the wiring count for each region. If a region 2625 * becomes completely unwired, unwire its physical pages and 2626 * mappings. 2627 */ 2628 entry = start_entry; 2629 while ((entry != &map->header) && (entry->start < end)) { 2630 entry->wired_count--; 2631 if (entry->wired_count == 0) 2632 vm_fault_unwire(map, entry); 2633 entry = entry->next; 2634 } 2635 } 2636 done: 2637 vm_map_unclip_range(map, start_entry, start, real_end, 2638 &count, MAP_CLIP_NO_HOLES); 2639 map->timestamp++; 2640 vm_map_unlock(map); 2641 failure: 2642 if (kmflags & KM_KRESERVE) 2643 vm_map_entry_krelease(count); 2644 else 2645 vm_map_entry_release(count); 2646 return (rv); 2647 } 2648 2649 /* 2650 * Mark a newly allocated address range as wired but do not fault in 2651 * the pages. The caller is expected to load the pages into the object. 2652 * 2653 * The map must be locked on entry and will remain locked on return. 2654 * No other requirements. 2655 */ 2656 void 2657 vm_map_set_wired_quick(vm_map_t map, vm_offset_t addr, vm_size_t size, 2658 int *countp) 2659 { 2660 vm_map_entry_t scan; 2661 vm_map_entry_t entry; 2662 2663 entry = vm_map_clip_range(map, addr, addr + size, 2664 countp, MAP_CLIP_NO_HOLES); 2665 for (scan = entry; 2666 scan != &map->header && scan->start < addr + size; 2667 scan = scan->next) { 2668 KKASSERT(scan->wired_count == 0); 2669 scan->wired_count = 1; 2670 } 2671 vm_map_unclip_range(map, entry, addr, addr + size, 2672 countp, MAP_CLIP_NO_HOLES); 2673 } 2674 2675 /* 2676 * Push any dirty cached pages in the address range to their pager. 2677 * If syncio is TRUE, dirty pages are written synchronously. 2678 * If invalidate is TRUE, any cached pages are freed as well. 2679 * 2680 * This routine is called by sys_msync() 2681 * 2682 * Returns an error if any part of the specified range is not mapped. 2683 * 2684 * No requirements. 2685 */ 2686 int 2687 vm_map_clean(vm_map_t map, vm_offset_t start, vm_offset_t end, 2688 boolean_t syncio, boolean_t invalidate) 2689 { 2690 vm_map_entry_t current; 2691 vm_map_entry_t entry; 2692 vm_size_t size; 2693 vm_object_t object; 2694 vm_object_t tobj; 2695 vm_ooffset_t offset; 2696 2697 vm_map_lock_read(map); 2698 VM_MAP_RANGE_CHECK(map, start, end); 2699 if (!vm_map_lookup_entry(map, start, &entry)) { 2700 vm_map_unlock_read(map); 2701 return (KERN_INVALID_ADDRESS); 2702 } 2703 lwkt_gettoken(&map->token); 2704 2705 /* 2706 * Make a first pass to check for holes. 2707 */ 2708 for (current = entry; current->start < end; current = current->next) { 2709 if (current->maptype == VM_MAPTYPE_SUBMAP) { 2710 lwkt_reltoken(&map->token); 2711 vm_map_unlock_read(map); 2712 return (KERN_INVALID_ARGUMENT); 2713 } 2714 if (end > current->end && 2715 (current->next == &map->header || 2716 current->end != current->next->start)) { 2717 lwkt_reltoken(&map->token); 2718 vm_map_unlock_read(map); 2719 return (KERN_INVALID_ADDRESS); 2720 } 2721 } 2722 2723 if (invalidate) 2724 pmap_remove(vm_map_pmap(map), start, end); 2725 2726 /* 2727 * Make a second pass, cleaning/uncaching pages from the indicated 2728 * objects as we go. 2729 */ 2730 for (current = entry; current->start < end; current = current->next) { 2731 offset = current->offset + (start - current->start); 2732 size = (end <= current->end ? end : current->end) - start; 2733 2734 switch(current->maptype) { 2735 case VM_MAPTYPE_SUBMAP: 2736 { 2737 vm_map_t smap; 2738 vm_map_entry_t tentry; 2739 vm_size_t tsize; 2740 2741 smap = current->object.sub_map; 2742 vm_map_lock_read(smap); 2743 vm_map_lookup_entry(smap, offset, &tentry); 2744 tsize = tentry->end - offset; 2745 if (tsize < size) 2746 size = tsize; 2747 object = tentry->object.vm_object; 2748 offset = tentry->offset + (offset - tentry->start); 2749 vm_map_unlock_read(smap); 2750 break; 2751 } 2752 case VM_MAPTYPE_NORMAL: 2753 case VM_MAPTYPE_VPAGETABLE: 2754 object = current->object.vm_object; 2755 break; 2756 default: 2757 object = NULL; 2758 break; 2759 } 2760 2761 if (object) 2762 vm_object_hold(object); 2763 2764 /* 2765 * Note that there is absolutely no sense in writing out 2766 * anonymous objects, so we track down the vnode object 2767 * to write out. 2768 * We invalidate (remove) all pages from the address space 2769 * anyway, for semantic correctness. 2770 * 2771 * note: certain anonymous maps, such as MAP_NOSYNC maps, 2772 * may start out with a NULL object. 2773 */ 2774 while (object && (tobj = object->backing_object) != NULL) { 2775 vm_object_hold(tobj); 2776 if (tobj == object->backing_object) { 2777 vm_object_lock_swap(); 2778 offset += object->backing_object_offset; 2779 vm_object_drop(object); 2780 object = tobj; 2781 if (object->size < OFF_TO_IDX(offset + size)) 2782 size = IDX_TO_OFF(object->size) - 2783 offset; 2784 break; 2785 } 2786 vm_object_drop(tobj); 2787 } 2788 if (object && (object->type == OBJT_VNODE) && 2789 (current->protection & VM_PROT_WRITE) && 2790 (object->flags & OBJ_NOMSYNC) == 0) { 2791 /* 2792 * Flush pages if writing is allowed, invalidate them 2793 * if invalidation requested. Pages undergoing I/O 2794 * will be ignored by vm_object_page_remove(). 2795 * 2796 * We cannot lock the vnode and then wait for paging 2797 * to complete without deadlocking against vm_fault. 2798 * Instead we simply call vm_object_page_remove() and 2799 * allow it to block internally on a page-by-page 2800 * basis when it encounters pages undergoing async 2801 * I/O. 2802 */ 2803 int flags; 2804 2805 /* no chain wait needed for vnode objects */ 2806 vm_object_reference_locked(object); 2807 vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY); 2808 flags = (syncio || invalidate) ? OBJPC_SYNC : 0; 2809 flags |= invalidate ? OBJPC_INVAL : 0; 2810 2811 /* 2812 * When operating on a virtual page table just 2813 * flush the whole object. XXX we probably ought 2814 * to 2815 */ 2816 switch(current->maptype) { 2817 case VM_MAPTYPE_NORMAL: 2818 vm_object_page_clean(object, 2819 OFF_TO_IDX(offset), 2820 OFF_TO_IDX(offset + size + PAGE_MASK), 2821 flags); 2822 break; 2823 case VM_MAPTYPE_VPAGETABLE: 2824 vm_object_page_clean(object, 0, 0, flags); 2825 break; 2826 } 2827 vn_unlock(((struct vnode *)object->handle)); 2828 vm_object_deallocate_locked(object); 2829 } 2830 if (object && invalidate && 2831 ((object->type == OBJT_VNODE) || 2832 (object->type == OBJT_DEVICE) || 2833 (object->type == OBJT_MGTDEVICE))) { 2834 int clean_only = 2835 ((object->type == OBJT_DEVICE) || 2836 (object->type == OBJT_MGTDEVICE)) ? FALSE : TRUE; 2837 /* no chain wait needed for vnode/device objects */ 2838 vm_object_reference_locked(object); 2839 switch(current->maptype) { 2840 case VM_MAPTYPE_NORMAL: 2841 vm_object_page_remove(object, 2842 OFF_TO_IDX(offset), 2843 OFF_TO_IDX(offset + size + PAGE_MASK), 2844 clean_only); 2845 break; 2846 case VM_MAPTYPE_VPAGETABLE: 2847 vm_object_page_remove(object, 0, 0, clean_only); 2848 break; 2849 } 2850 vm_object_deallocate_locked(object); 2851 } 2852 start += size; 2853 if (object) 2854 vm_object_drop(object); 2855 } 2856 2857 lwkt_reltoken(&map->token); 2858 vm_map_unlock_read(map); 2859 2860 return (KERN_SUCCESS); 2861 } 2862 2863 /* 2864 * Make the region specified by this entry pageable. 2865 * 2866 * The vm_map must be exclusively locked. 2867 */ 2868 static void 2869 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry) 2870 { 2871 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2872 entry->wired_count = 0; 2873 vm_fault_unwire(map, entry); 2874 } 2875 2876 /* 2877 * Deallocate the given entry from the target map. 2878 * 2879 * The vm_map must be exclusively locked. 2880 */ 2881 static void 2882 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry, int *countp) 2883 { 2884 vm_map_entry_unlink(map, entry); 2885 map->size -= entry->end - entry->start; 2886 2887 switch(entry->maptype) { 2888 case VM_MAPTYPE_NORMAL: 2889 case VM_MAPTYPE_VPAGETABLE: 2890 case VM_MAPTYPE_SUBMAP: 2891 vm_object_deallocate(entry->object.vm_object); 2892 break; 2893 case VM_MAPTYPE_UKSMAP: 2894 /* XXX TODO */ 2895 break; 2896 default: 2897 break; 2898 } 2899 2900 vm_map_entry_dispose(map, entry, countp); 2901 } 2902 2903 /* 2904 * Deallocates the given address range from the target map. 2905 * 2906 * The vm_map must be exclusively locked. 2907 */ 2908 int 2909 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end, int *countp) 2910 { 2911 vm_object_t object; 2912 vm_map_entry_t entry; 2913 vm_map_entry_t first_entry; 2914 vm_offset_t hole_start; 2915 2916 ASSERT_VM_MAP_LOCKED(map); 2917 lwkt_gettoken(&map->token); 2918 again: 2919 /* 2920 * Find the start of the region, and clip it. Set entry to point 2921 * at the first record containing the requested address or, if no 2922 * such record exists, the next record with a greater address. The 2923 * loop will run from this point until a record beyond the termination 2924 * address is encountered. 2925 * 2926 * Adjust freehint[] for either the clip case or the extension case. 2927 * 2928 * GGG see other GGG comment. 2929 */ 2930 if (vm_map_lookup_entry(map, start, &first_entry)) { 2931 entry = first_entry; 2932 vm_map_clip_start(map, entry, start, countp); 2933 hole_start = start; 2934 } else { 2935 entry = first_entry->next; 2936 if (entry == &map->header) 2937 hole_start = first_entry->start; 2938 else 2939 hole_start = first_entry->end; 2940 } 2941 2942 /* 2943 * Step through all entries in this region 2944 */ 2945 while ((entry != &map->header) && (entry->start < end)) { 2946 vm_map_entry_t next; 2947 vm_offset_t s, e; 2948 vm_pindex_t offidxstart, offidxend, count; 2949 2950 /* 2951 * If we hit an in-transition entry we have to sleep and 2952 * retry. It's easier (and not really slower) to just retry 2953 * since this case occurs so rarely and the hint is already 2954 * pointing at the right place. We have to reset the 2955 * start offset so as not to accidently delete an entry 2956 * another process just created in vacated space. 2957 */ 2958 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 2959 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 2960 start = entry->start; 2961 ++mycpu->gd_cnt.v_intrans_coll; 2962 ++mycpu->gd_cnt.v_intrans_wait; 2963 vm_map_transition_wait(map); 2964 goto again; 2965 } 2966 vm_map_clip_end(map, entry, end, countp); 2967 2968 s = entry->start; 2969 e = entry->end; 2970 next = entry->next; 2971 2972 offidxstart = OFF_TO_IDX(entry->offset); 2973 count = OFF_TO_IDX(e - s); 2974 2975 switch(entry->maptype) { 2976 case VM_MAPTYPE_NORMAL: 2977 case VM_MAPTYPE_VPAGETABLE: 2978 case VM_MAPTYPE_SUBMAP: 2979 object = entry->object.vm_object; 2980 break; 2981 default: 2982 object = NULL; 2983 break; 2984 } 2985 2986 /* 2987 * Unwire before removing addresses from the pmap; otherwise, 2988 * unwiring will put the entries back in the pmap. 2989 * 2990 * Generally speaking, doing a bulk pmap_remove() before 2991 * removing the pages from the VM object is better at 2992 * reducing unnecessary IPIs. The pmap code is now optimized 2993 * to not blindly iterate the range when pt and pd pages 2994 * are missing. 2995 */ 2996 if (entry->wired_count != 0) 2997 vm_map_entry_unwire(map, entry); 2998 2999 offidxend = offidxstart + count; 3000 3001 if (object == &kernel_object) { 3002 pmap_remove(map->pmap, s, e); 3003 vm_object_hold(object); 3004 vm_object_page_remove(object, offidxstart, 3005 offidxend, FALSE); 3006 vm_object_drop(object); 3007 } else if (object && object->type != OBJT_DEFAULT && 3008 object->type != OBJT_SWAP) { 3009 /* 3010 * vnode object routines cannot be chain-locked, 3011 * but since we aren't removing pages from the 3012 * object here we can use a shared hold. 3013 */ 3014 vm_object_hold_shared(object); 3015 pmap_remove(map->pmap, s, e); 3016 vm_object_drop(object); 3017 } else if (object) { 3018 vm_object_hold(object); 3019 vm_object_chain_acquire(object, 0); 3020 pmap_remove(map->pmap, s, e); 3021 3022 if (object != NULL && 3023 object->ref_count != 1 && 3024 (object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == 3025 OBJ_ONEMAPPING && 3026 (object->type == OBJT_DEFAULT || 3027 object->type == OBJT_SWAP)) { 3028 vm_object_collapse(object, NULL); 3029 vm_object_page_remove(object, offidxstart, 3030 offidxend, FALSE); 3031 if (object->type == OBJT_SWAP) { 3032 swap_pager_freespace(object, 3033 offidxstart, 3034 count); 3035 } 3036 if (offidxend >= object->size && 3037 offidxstart < object->size) { 3038 object->size = offidxstart; 3039 } 3040 } 3041 vm_object_chain_release(object); 3042 vm_object_drop(object); 3043 } else if (entry->maptype == VM_MAPTYPE_UKSMAP) { 3044 pmap_remove(map->pmap, s, e); 3045 } 3046 3047 /* 3048 * Delete the entry (which may delete the object) only after 3049 * removing all pmap entries pointing to its pages. 3050 * (Otherwise, its page frames may be reallocated, and any 3051 * modify bits will be set in the wrong object!) 3052 */ 3053 vm_map_entry_delete(map, entry, countp); 3054 entry = next; 3055 } 3056 if (entry == &map->header) 3057 vm_map_freehint_hole(map, hole_start, entry->end - hole_start); 3058 else 3059 vm_map_freehint_hole(map, hole_start, 3060 entry->start - hole_start); 3061 3062 lwkt_reltoken(&map->token); 3063 3064 return (KERN_SUCCESS); 3065 } 3066 3067 /* 3068 * Remove the given address range from the target map. 3069 * This is the exported form of vm_map_delete. 3070 * 3071 * No requirements. 3072 */ 3073 int 3074 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end) 3075 { 3076 int result; 3077 int count; 3078 3079 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 3080 vm_map_lock(map); 3081 VM_MAP_RANGE_CHECK(map, start, end); 3082 result = vm_map_delete(map, start, end, &count); 3083 vm_map_unlock(map); 3084 vm_map_entry_release(count); 3085 3086 return (result); 3087 } 3088 3089 /* 3090 * Assert that the target map allows the specified privilege on the 3091 * entire address region given. The entire region must be allocated. 3092 * 3093 * The caller must specify whether the vm_map is already locked or not. 3094 */ 3095 boolean_t 3096 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end, 3097 vm_prot_t protection, boolean_t have_lock) 3098 { 3099 vm_map_entry_t entry; 3100 vm_map_entry_t tmp_entry; 3101 boolean_t result; 3102 3103 if (have_lock == FALSE) 3104 vm_map_lock_read(map); 3105 3106 if (!vm_map_lookup_entry(map, start, &tmp_entry)) { 3107 if (have_lock == FALSE) 3108 vm_map_unlock_read(map); 3109 return (FALSE); 3110 } 3111 entry = tmp_entry; 3112 3113 result = TRUE; 3114 while (start < end) { 3115 if (entry == &map->header) { 3116 result = FALSE; 3117 break; 3118 } 3119 /* 3120 * No holes allowed! 3121 */ 3122 3123 if (start < entry->start) { 3124 result = FALSE; 3125 break; 3126 } 3127 /* 3128 * Check protection associated with entry. 3129 */ 3130 3131 if ((entry->protection & protection) != protection) { 3132 result = FALSE; 3133 break; 3134 } 3135 /* go to next entry */ 3136 3137 start = entry->end; 3138 entry = entry->next; 3139 } 3140 if (have_lock == FALSE) 3141 vm_map_unlock_read(map); 3142 return (result); 3143 } 3144 3145 /* 3146 * If appropriate this function shadows the original object with a new object 3147 * and moves the VM pages from the original object to the new object. 3148 * The original object will also be collapsed, if possible. 3149 * 3150 * We can only do this for normal memory objects with a single mapping, and 3151 * it only makes sense to do it if there are 2 or more refs on the original 3152 * object. i.e. typically a memory object that has been extended into 3153 * multiple vm_map_entry's with non-overlapping ranges. 3154 * 3155 * This makes it easier to remove unused pages and keeps object inheritance 3156 * from being a negative impact on memory usage. 3157 * 3158 * On return the (possibly new) entry->object.vm_object will have an 3159 * additional ref on it for the caller to dispose of (usually by cloning 3160 * the vm_map_entry). The additional ref had to be done in this routine 3161 * to avoid racing a collapse. The object's ONEMAPPING flag will also be 3162 * cleared. 3163 * 3164 * The vm_map must be locked and its token held. 3165 */ 3166 static void 3167 vm_map_split(vm_map_entry_t entry) 3168 { 3169 /* OPTIMIZED */ 3170 vm_object_t oobject, nobject, bobject; 3171 vm_offset_t s, e; 3172 vm_page_t m; 3173 vm_pindex_t offidxstart, offidxend, idx; 3174 vm_size_t size; 3175 vm_ooffset_t offset; 3176 int useshadowlist; 3177 3178 /* 3179 * Optimize away object locks for vnode objects. Important exit/exec 3180 * critical path. 3181 * 3182 * OBJ_ONEMAPPING doesn't apply to vnode objects but clear the flag 3183 * anyway. 3184 */ 3185 oobject = entry->object.vm_object; 3186 if (oobject->type != OBJT_DEFAULT && oobject->type != OBJT_SWAP) { 3187 vm_object_reference_quick(oobject); 3188 vm_object_clear_flag(oobject, OBJ_ONEMAPPING); 3189 return; 3190 } 3191 3192 /* 3193 * Setup. Chain lock the original object throughout the entire 3194 * routine to prevent new page faults from occuring. 3195 * 3196 * XXX can madvise WILLNEED interfere with us too? 3197 */ 3198 vm_object_hold(oobject); 3199 vm_object_chain_acquire(oobject, 0); 3200 3201 /* 3202 * Original object cannot be split? Might have also changed state. 3203 */ 3204 if (oobject->handle == NULL || (oobject->type != OBJT_DEFAULT && 3205 oobject->type != OBJT_SWAP)) { 3206 vm_object_chain_release(oobject); 3207 vm_object_reference_locked(oobject); 3208 vm_object_clear_flag(oobject, OBJ_ONEMAPPING); 3209 vm_object_drop(oobject); 3210 return; 3211 } 3212 3213 /* 3214 * Collapse original object with its backing store as an 3215 * optimization to reduce chain lengths when possible. 3216 * 3217 * If ref_count <= 1 there aren't other non-overlapping vm_map_entry's 3218 * for oobject, so there's no point collapsing it. 3219 * 3220 * Then re-check whether the object can be split. 3221 */ 3222 vm_object_collapse(oobject, NULL); 3223 3224 if (oobject->ref_count <= 1 || 3225 (oobject->type != OBJT_DEFAULT && oobject->type != OBJT_SWAP) || 3226 (oobject->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) != OBJ_ONEMAPPING) { 3227 vm_object_chain_release(oobject); 3228 vm_object_reference_locked(oobject); 3229 vm_object_clear_flag(oobject, OBJ_ONEMAPPING); 3230 vm_object_drop(oobject); 3231 return; 3232 } 3233 3234 /* 3235 * Acquire the chain lock on the backing object. 3236 * 3237 * Give bobject an additional ref count for when it will be shadowed 3238 * by nobject. 3239 */ 3240 useshadowlist = 0; 3241 if ((bobject = oobject->backing_object) != NULL) { 3242 if (bobject->type != OBJT_VNODE) { 3243 useshadowlist = 1; 3244 vm_object_hold(bobject); 3245 vm_object_chain_wait(bobject, 0); 3246 /* ref for shadowing below */ 3247 vm_object_reference_locked(bobject); 3248 vm_object_chain_acquire(bobject, 0); 3249 KKASSERT(bobject->backing_object == bobject); 3250 KKASSERT((bobject->flags & OBJ_DEAD) == 0); 3251 } else { 3252 /* 3253 * vnodes are not placed on the shadow list but 3254 * they still get another ref for the backing_object 3255 * reference. 3256 */ 3257 vm_object_reference_quick(bobject); 3258 } 3259 } 3260 3261 /* 3262 * Calculate the object page range and allocate the new object. 3263 */ 3264 offset = entry->offset; 3265 s = entry->start; 3266 e = entry->end; 3267 3268 offidxstart = OFF_TO_IDX(offset); 3269 offidxend = offidxstart + OFF_TO_IDX(e - s); 3270 size = offidxend - offidxstart; 3271 3272 switch(oobject->type) { 3273 case OBJT_DEFAULT: 3274 nobject = default_pager_alloc(NULL, IDX_TO_OFF(size), 3275 VM_PROT_ALL, 0); 3276 break; 3277 case OBJT_SWAP: 3278 nobject = swap_pager_alloc(NULL, IDX_TO_OFF(size), 3279 VM_PROT_ALL, 0); 3280 break; 3281 default: 3282 /* not reached */ 3283 nobject = NULL; 3284 KKASSERT(0); 3285 } 3286 3287 if (nobject == NULL) { 3288 if (bobject) { 3289 if (useshadowlist) { 3290 vm_object_chain_release(bobject); 3291 vm_object_deallocate(bobject); 3292 vm_object_drop(bobject); 3293 } else { 3294 vm_object_deallocate(bobject); 3295 } 3296 } 3297 vm_object_chain_release(oobject); 3298 vm_object_reference_locked(oobject); 3299 vm_object_clear_flag(oobject, OBJ_ONEMAPPING); 3300 vm_object_drop(oobject); 3301 return; 3302 } 3303 3304 /* 3305 * The new object will replace entry->object.vm_object so it needs 3306 * a second reference (the caller expects an additional ref). 3307 */ 3308 vm_object_hold(nobject); 3309 vm_object_reference_locked(nobject); 3310 vm_object_chain_acquire(nobject, 0); 3311 3312 /* 3313 * nobject shadows bobject (oobject already shadows bobject). 3314 * 3315 * Adding an object to bobject's shadow list requires refing bobject 3316 * which we did above in the useshadowlist case. 3317 */ 3318 if (bobject) { 3319 nobject->backing_object_offset = 3320 oobject->backing_object_offset + IDX_TO_OFF(offidxstart); 3321 nobject->backing_object = bobject; 3322 if (useshadowlist) { 3323 bobject->shadow_count++; 3324 atomic_add_int(&bobject->generation, 1); 3325 LIST_INSERT_HEAD(&bobject->shadow_head, 3326 nobject, shadow_list); 3327 vm_object_clear_flag(bobject, OBJ_ONEMAPPING); /*XXX*/ 3328 vm_object_chain_release(bobject); 3329 vm_object_drop(bobject); 3330 vm_object_set_flag(nobject, OBJ_ONSHADOW); 3331 } 3332 } 3333 3334 /* 3335 * Move the VM pages from oobject to nobject 3336 */ 3337 for (idx = 0; idx < size; idx++) { 3338 vm_page_t m; 3339 3340 m = vm_page_lookup_busy_wait(oobject, offidxstart + idx, 3341 TRUE, "vmpg"); 3342 if (m == NULL) 3343 continue; 3344 3345 /* 3346 * We must wait for pending I/O to complete before we can 3347 * rename the page. 3348 * 3349 * We do not have to VM_PROT_NONE the page as mappings should 3350 * not be changed by this operation. 3351 * 3352 * NOTE: The act of renaming a page updates chaingen for both 3353 * objects. 3354 */ 3355 vm_page_rename(m, nobject, idx); 3356 /* page automatically made dirty by rename and cache handled */ 3357 /* page remains busy */ 3358 } 3359 3360 if (oobject->type == OBJT_SWAP) { 3361 vm_object_pip_add(oobject, 1); 3362 /* 3363 * copy oobject pages into nobject and destroy unneeded 3364 * pages in shadow object. 3365 */ 3366 swap_pager_copy(oobject, nobject, offidxstart, 0); 3367 vm_object_pip_wakeup(oobject); 3368 } 3369 3370 /* 3371 * Wakeup the pages we played with. No spl protection is needed 3372 * for a simple wakeup. 3373 */ 3374 for (idx = 0; idx < size; idx++) { 3375 m = vm_page_lookup(nobject, idx); 3376 if (m) { 3377 KKASSERT(m->busy_count & PBUSY_LOCKED); 3378 vm_page_wakeup(m); 3379 } 3380 } 3381 entry->object.vm_object = nobject; 3382 entry->offset = 0LL; 3383 3384 /* 3385 * Cleanup 3386 * 3387 * NOTE: There is no need to remove OBJ_ONEMAPPING from oobject, the 3388 * related pages were moved and are no longer applicable to the 3389 * original object. 3390 * 3391 * NOTE: Deallocate oobject (due to its entry->object.vm_object being 3392 * replaced by nobject). 3393 */ 3394 vm_object_chain_release(nobject); 3395 vm_object_drop(nobject); 3396 if (bobject && useshadowlist) { 3397 vm_object_chain_release(bobject); 3398 vm_object_drop(bobject); 3399 } 3400 vm_object_chain_release(oobject); 3401 /*vm_object_clear_flag(oobject, OBJ_ONEMAPPING);*/ 3402 vm_object_deallocate_locked(oobject); 3403 vm_object_drop(oobject); 3404 } 3405 3406 /* 3407 * Copies the contents of the source entry to the destination 3408 * entry. The entries *must* be aligned properly. 3409 * 3410 * The vm_maps must be exclusively locked. 3411 * The vm_map's token must be held. 3412 * 3413 * Because the maps are locked no faults can be in progress during the 3414 * operation. 3415 */ 3416 static void 3417 vm_map_copy_entry(vm_map_t src_map, vm_map_t dst_map, 3418 vm_map_entry_t src_entry, vm_map_entry_t dst_entry) 3419 { 3420 vm_object_t src_object; 3421 3422 if (dst_entry->maptype == VM_MAPTYPE_SUBMAP || 3423 dst_entry->maptype == VM_MAPTYPE_UKSMAP) 3424 return; 3425 if (src_entry->maptype == VM_MAPTYPE_SUBMAP || 3426 src_entry->maptype == VM_MAPTYPE_UKSMAP) 3427 return; 3428 3429 if (src_entry->wired_count == 0) { 3430 /* 3431 * If the source entry is marked needs_copy, it is already 3432 * write-protected. 3433 */ 3434 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { 3435 pmap_protect(src_map->pmap, 3436 src_entry->start, 3437 src_entry->end, 3438 src_entry->protection & ~VM_PROT_WRITE); 3439 } 3440 3441 /* 3442 * Make a copy of the object. 3443 * 3444 * The object must be locked prior to checking the object type 3445 * and for the call to vm_object_collapse() and vm_map_split(). 3446 * We cannot use *_hold() here because the split code will 3447 * probably try to destroy the object. The lock is a pool 3448 * token and doesn't care. 3449 * 3450 * We must bump src_map->timestamp when setting 3451 * MAP_ENTRY_NEEDS_COPY to force any concurrent fault 3452 * to retry, otherwise the concurrent fault might improperly 3453 * install a RW pte when its supposed to be a RO(COW) pte. 3454 * This race can occur because a vnode-backed fault may have 3455 * to temporarily release the map lock. 3456 */ 3457 if (src_entry->object.vm_object != NULL) { 3458 vm_map_split(src_entry); 3459 src_object = src_entry->object.vm_object; 3460 dst_entry->object.vm_object = src_object; 3461 src_entry->eflags |= (MAP_ENTRY_COW | 3462 MAP_ENTRY_NEEDS_COPY); 3463 dst_entry->eflags |= (MAP_ENTRY_COW | 3464 MAP_ENTRY_NEEDS_COPY); 3465 dst_entry->offset = src_entry->offset; 3466 ++src_map->timestamp; 3467 } else { 3468 dst_entry->object.vm_object = NULL; 3469 dst_entry->offset = 0; 3470 } 3471 3472 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start, 3473 dst_entry->end - dst_entry->start, src_entry->start); 3474 } else { 3475 /* 3476 * Of course, wired down pages can't be set copy-on-write. 3477 * Cause wired pages to be copied into the new map by 3478 * simulating faults (the new pages are pageable) 3479 */ 3480 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry); 3481 } 3482 } 3483 3484 /* 3485 * vmspace_fork: 3486 * Create a new process vmspace structure and vm_map 3487 * based on those of an existing process. The new map 3488 * is based on the old map, according to the inheritance 3489 * values on the regions in that map. 3490 * 3491 * The source map must not be locked. 3492 * No requirements. 3493 */ 3494 static void vmspace_fork_normal_entry(vm_map_t old_map, vm_map_t new_map, 3495 vm_map_entry_t old_entry, int *countp); 3496 static void vmspace_fork_uksmap_entry(vm_map_t old_map, vm_map_t new_map, 3497 vm_map_entry_t old_entry, int *countp); 3498 3499 struct vmspace * 3500 vmspace_fork(struct vmspace *vm1) 3501 { 3502 struct vmspace *vm2; 3503 vm_map_t old_map = &vm1->vm_map; 3504 vm_map_t new_map; 3505 vm_map_entry_t old_entry; 3506 int count; 3507 3508 lwkt_gettoken(&vm1->vm_map.token); 3509 vm_map_lock(old_map); 3510 3511 vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset); 3512 lwkt_gettoken(&vm2->vm_map.token); 3513 bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy, 3514 (caddr_t)&vm1->vm_endcopy - (caddr_t)&vm1->vm_startcopy); 3515 new_map = &vm2->vm_map; /* XXX */ 3516 new_map->timestamp = 1; 3517 3518 vm_map_lock(new_map); 3519 3520 count = 0; 3521 old_entry = old_map->header.next; 3522 while (old_entry != &old_map->header) { 3523 ++count; 3524 old_entry = old_entry->next; 3525 } 3526 3527 count = vm_map_entry_reserve(count + MAP_RESERVE_COUNT); 3528 3529 old_entry = old_map->header.next; 3530 while (old_entry != &old_map->header) { 3531 switch(old_entry->maptype) { 3532 case VM_MAPTYPE_SUBMAP: 3533 panic("vm_map_fork: encountered a submap"); 3534 break; 3535 case VM_MAPTYPE_UKSMAP: 3536 vmspace_fork_uksmap_entry(old_map, new_map, 3537 old_entry, &count); 3538 break; 3539 case VM_MAPTYPE_NORMAL: 3540 case VM_MAPTYPE_VPAGETABLE: 3541 vmspace_fork_normal_entry(old_map, new_map, 3542 old_entry, &count); 3543 break; 3544 } 3545 old_entry = old_entry->next; 3546 } 3547 3548 new_map->size = old_map->size; 3549 vm_map_unlock(old_map); 3550 vm_map_unlock(new_map); 3551 vm_map_entry_release(count); 3552 3553 lwkt_reltoken(&vm2->vm_map.token); 3554 lwkt_reltoken(&vm1->vm_map.token); 3555 3556 return (vm2); 3557 } 3558 3559 static 3560 void 3561 vmspace_fork_normal_entry(vm_map_t old_map, vm_map_t new_map, 3562 vm_map_entry_t old_entry, int *countp) 3563 { 3564 vm_map_entry_t new_entry; 3565 vm_object_t object; 3566 3567 switch (old_entry->inheritance) { 3568 case VM_INHERIT_NONE: 3569 break; 3570 case VM_INHERIT_SHARE: 3571 /* 3572 * Clone the entry, creating the shared object if 3573 * necessary. 3574 */ 3575 if (old_entry->object.vm_object == NULL) 3576 vm_map_entry_allocate_object(old_entry); 3577 3578 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) { 3579 /* 3580 * Shadow a map_entry which needs a copy, 3581 * replacing its object with a new object 3582 * that points to the old one. Ask the 3583 * shadow code to automatically add an 3584 * additional ref. We can't do it afterwords 3585 * because we might race a collapse. The call 3586 * to vm_map_entry_shadow() will also clear 3587 * OBJ_ONEMAPPING. 3588 */ 3589 vm_map_entry_shadow(old_entry, 1); 3590 } else if (old_entry->object.vm_object) { 3591 /* 3592 * We will make a shared copy of the object, 3593 * and must clear OBJ_ONEMAPPING. 3594 * 3595 * Optimize vnode objects. OBJ_ONEMAPPING 3596 * is non-applicable but clear it anyway, 3597 * and its terminal so we don'th ave to deal 3598 * with chains. Reduces SMP conflicts. 3599 * 3600 * XXX assert that object.vm_object != NULL 3601 * since we allocate it above. 3602 */ 3603 object = old_entry->object.vm_object; 3604 if (object->type == OBJT_VNODE) { 3605 vm_object_reference_quick(object); 3606 vm_object_clear_flag(object, 3607 OBJ_ONEMAPPING); 3608 } else { 3609 vm_object_hold(object); 3610 vm_object_chain_wait(object, 0); 3611 vm_object_reference_locked(object); 3612 vm_object_clear_flag(object, 3613 OBJ_ONEMAPPING); 3614 vm_object_drop(object); 3615 } 3616 } 3617 3618 /* 3619 * Clone the entry. We've already bumped the ref on 3620 * any vm_object. 3621 */ 3622 new_entry = vm_map_entry_create(new_map, countp); 3623 *new_entry = *old_entry; 3624 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED; 3625 new_entry->wired_count = 0; 3626 3627 /* 3628 * Insert the entry into the new map -- we know we're 3629 * inserting at the end of the new map. 3630 */ 3631 3632 vm_map_entry_link(new_map, new_map->header.prev, 3633 new_entry); 3634 3635 /* 3636 * Update the physical map 3637 */ 3638 pmap_copy(new_map->pmap, old_map->pmap, 3639 new_entry->start, 3640 (old_entry->end - old_entry->start), 3641 old_entry->start); 3642 break; 3643 case VM_INHERIT_COPY: 3644 /* 3645 * Clone the entry and link into the map. 3646 */ 3647 new_entry = vm_map_entry_create(new_map, countp); 3648 *new_entry = *old_entry; 3649 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED; 3650 new_entry->wired_count = 0; 3651 new_entry->object.vm_object = NULL; 3652 vm_map_entry_link(new_map, new_map->header.prev, 3653 new_entry); 3654 vm_map_copy_entry(old_map, new_map, old_entry, 3655 new_entry); 3656 break; 3657 } 3658 } 3659 3660 /* 3661 * When forking user-kernel shared maps, the map might change in the 3662 * child so do not try to copy the underlying pmap entries. 3663 */ 3664 static 3665 void 3666 vmspace_fork_uksmap_entry(vm_map_t old_map, vm_map_t new_map, 3667 vm_map_entry_t old_entry, int *countp) 3668 { 3669 vm_map_entry_t new_entry; 3670 3671 new_entry = vm_map_entry_create(new_map, countp); 3672 *new_entry = *old_entry; 3673 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED; 3674 new_entry->wired_count = 0; 3675 vm_map_entry_link(new_map, new_map->header.prev, 3676 new_entry); 3677 } 3678 3679 /* 3680 * Create an auto-grow stack entry 3681 * 3682 * No requirements. 3683 */ 3684 int 3685 vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 3686 int flags, vm_prot_t prot, vm_prot_t max, int cow) 3687 { 3688 vm_map_entry_t prev_entry; 3689 vm_map_entry_t new_stack_entry; 3690 vm_size_t init_ssize; 3691 int rv; 3692 int count; 3693 vm_offset_t tmpaddr; 3694 3695 cow |= MAP_IS_STACK; 3696 3697 if (max_ssize < sgrowsiz) 3698 init_ssize = max_ssize; 3699 else 3700 init_ssize = sgrowsiz; 3701 3702 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 3703 vm_map_lock(map); 3704 3705 /* 3706 * Find space for the mapping 3707 */ 3708 if ((flags & (MAP_FIXED | MAP_TRYFIXED)) == 0) { 3709 if (vm_map_findspace(map, addrbos, max_ssize, 1, 3710 flags, &tmpaddr)) { 3711 vm_map_unlock(map); 3712 vm_map_entry_release(count); 3713 return (KERN_NO_SPACE); 3714 } 3715 addrbos = tmpaddr; 3716 } 3717 3718 /* If addr is already mapped, no go */ 3719 if (vm_map_lookup_entry(map, addrbos, &prev_entry)) { 3720 vm_map_unlock(map); 3721 vm_map_entry_release(count); 3722 return (KERN_NO_SPACE); 3723 } 3724 3725 #if 0 3726 /* XXX already handled by kern_mmap() */ 3727 /* If we would blow our VMEM resource limit, no go */ 3728 if (map->size + init_ssize > 3729 curproc->p_rlimit[RLIMIT_VMEM].rlim_cur) { 3730 vm_map_unlock(map); 3731 vm_map_entry_release(count); 3732 return (KERN_NO_SPACE); 3733 } 3734 #endif 3735 3736 /* 3737 * If we can't accomodate max_ssize in the current mapping, 3738 * no go. However, we need to be aware that subsequent user 3739 * mappings might map into the space we have reserved for 3740 * stack, and currently this space is not protected. 3741 * 3742 * Hopefully we will at least detect this condition 3743 * when we try to grow the stack. 3744 */ 3745 if ((prev_entry->next != &map->header) && 3746 (prev_entry->next->start < addrbos + max_ssize)) { 3747 vm_map_unlock(map); 3748 vm_map_entry_release(count); 3749 return (KERN_NO_SPACE); 3750 } 3751 3752 /* 3753 * We initially map a stack of only init_ssize. We will 3754 * grow as needed later. Since this is to be a grow 3755 * down stack, we map at the top of the range. 3756 * 3757 * Note: we would normally expect prot and max to be 3758 * VM_PROT_ALL, and cow to be 0. Possibly we should 3759 * eliminate these as input parameters, and just 3760 * pass these values here in the insert call. 3761 */ 3762 rv = vm_map_insert(map, &count, NULL, NULL, 3763 0, addrbos + max_ssize - init_ssize, 3764 addrbos + max_ssize, 3765 VM_MAPTYPE_NORMAL, 3766 VM_SUBSYS_STACK, prot, max, cow); 3767 3768 /* Now set the avail_ssize amount */ 3769 if (rv == KERN_SUCCESS) { 3770 if (prev_entry != &map->header) 3771 vm_map_clip_end(map, prev_entry, addrbos + max_ssize - init_ssize, &count); 3772 new_stack_entry = prev_entry->next; 3773 if (new_stack_entry->end != addrbos + max_ssize || 3774 new_stack_entry->start != addrbos + max_ssize - init_ssize) 3775 panic ("Bad entry start/end for new stack entry"); 3776 else 3777 new_stack_entry->aux.avail_ssize = max_ssize - init_ssize; 3778 } 3779 3780 vm_map_unlock(map); 3781 vm_map_entry_release(count); 3782 return (rv); 3783 } 3784 3785 /* 3786 * Attempts to grow a vm stack entry. Returns KERN_SUCCESS if the 3787 * desired address is already mapped, or if we successfully grow 3788 * the stack. Also returns KERN_SUCCESS if addr is outside the 3789 * stack range (this is strange, but preserves compatibility with 3790 * the grow function in vm_machdep.c). 3791 * 3792 * No requirements. 3793 */ 3794 int 3795 vm_map_growstack (vm_map_t map, vm_offset_t addr) 3796 { 3797 vm_map_entry_t prev_entry; 3798 vm_map_entry_t stack_entry; 3799 vm_map_entry_t new_stack_entry; 3800 struct vmspace *vm; 3801 struct lwp *lp; 3802 struct proc *p; 3803 vm_offset_t end; 3804 int grow_amount; 3805 int rv = KERN_SUCCESS; 3806 int is_procstack; 3807 int use_read_lock = 1; 3808 int count; 3809 3810 /* 3811 * Find the vm 3812 */ 3813 lp = curthread->td_lwp; 3814 p = curthread->td_proc; 3815 KKASSERT(lp != NULL); 3816 vm = lp->lwp_vmspace; 3817 3818 /* 3819 * Growstack is only allowed on the current process. We disallow 3820 * other use cases, e.g. trying to access memory via procfs that 3821 * the stack hasn't grown into. 3822 */ 3823 if (map != &vm->vm_map) { 3824 return KERN_FAILURE; 3825 } 3826 3827 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 3828 Retry: 3829 if (use_read_lock) 3830 vm_map_lock_read(map); 3831 else 3832 vm_map_lock(map); 3833 3834 /* If addr is already in the entry range, no need to grow.*/ 3835 if (vm_map_lookup_entry(map, addr, &prev_entry)) 3836 goto done; 3837 3838 if ((stack_entry = prev_entry->next) == &map->header) 3839 goto done; 3840 if (prev_entry == &map->header) 3841 end = stack_entry->start - stack_entry->aux.avail_ssize; 3842 else 3843 end = prev_entry->end; 3844 3845 /* 3846 * This next test mimics the old grow function in vm_machdep.c. 3847 * It really doesn't quite make sense, but we do it anyway 3848 * for compatibility. 3849 * 3850 * If not growable stack, return success. This signals the 3851 * caller to proceed as he would normally with normal vm. 3852 */ 3853 if (stack_entry->aux.avail_ssize < 1 || 3854 addr >= stack_entry->start || 3855 addr < stack_entry->start - stack_entry->aux.avail_ssize) { 3856 goto done; 3857 } 3858 3859 /* Find the minimum grow amount */ 3860 grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE); 3861 if (grow_amount > stack_entry->aux.avail_ssize) { 3862 rv = KERN_NO_SPACE; 3863 goto done; 3864 } 3865 3866 /* 3867 * If there is no longer enough space between the entries 3868 * nogo, and adjust the available space. Note: this 3869 * should only happen if the user has mapped into the 3870 * stack area after the stack was created, and is 3871 * probably an error. 3872 * 3873 * This also effectively destroys any guard page the user 3874 * might have intended by limiting the stack size. 3875 */ 3876 if (grow_amount > stack_entry->start - end) { 3877 if (use_read_lock && vm_map_lock_upgrade(map)) { 3878 /* lost lock */ 3879 use_read_lock = 0; 3880 goto Retry; 3881 } 3882 use_read_lock = 0; 3883 stack_entry->aux.avail_ssize = stack_entry->start - end; 3884 rv = KERN_NO_SPACE; 3885 goto done; 3886 } 3887 3888 is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr; 3889 3890 /* If this is the main process stack, see if we're over the 3891 * stack limit. 3892 */ 3893 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > 3894 p->p_rlimit[RLIMIT_STACK].rlim_cur)) { 3895 rv = KERN_NO_SPACE; 3896 goto done; 3897 } 3898 3899 /* Round up the grow amount modulo SGROWSIZ */ 3900 grow_amount = roundup (grow_amount, sgrowsiz); 3901 if (grow_amount > stack_entry->aux.avail_ssize) { 3902 grow_amount = stack_entry->aux.avail_ssize; 3903 } 3904 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > 3905 p->p_rlimit[RLIMIT_STACK].rlim_cur)) { 3906 grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur - 3907 ctob(vm->vm_ssize); 3908 } 3909 3910 /* If we would blow our VMEM resource limit, no go */ 3911 if (map->size + grow_amount > p->p_rlimit[RLIMIT_VMEM].rlim_cur) { 3912 rv = KERN_NO_SPACE; 3913 goto done; 3914 } 3915 3916 if (use_read_lock && vm_map_lock_upgrade(map)) { 3917 /* lost lock */ 3918 use_read_lock = 0; 3919 goto Retry; 3920 } 3921 use_read_lock = 0; 3922 3923 /* Get the preliminary new entry start value */ 3924 addr = stack_entry->start - grow_amount; 3925 3926 /* If this puts us into the previous entry, cut back our growth 3927 * to the available space. Also, see the note above. 3928 */ 3929 if (addr < end) { 3930 stack_entry->aux.avail_ssize = stack_entry->start - end; 3931 addr = end; 3932 } 3933 3934 rv = vm_map_insert(map, &count, NULL, NULL, 3935 0, addr, stack_entry->start, 3936 VM_MAPTYPE_NORMAL, 3937 VM_SUBSYS_STACK, VM_PROT_ALL, VM_PROT_ALL, 0); 3938 3939 /* Adjust the available stack space by the amount we grew. */ 3940 if (rv == KERN_SUCCESS) { 3941 if (prev_entry != &map->header) 3942 vm_map_clip_end(map, prev_entry, addr, &count); 3943 new_stack_entry = prev_entry->next; 3944 if (new_stack_entry->end != stack_entry->start || 3945 new_stack_entry->start != addr) 3946 panic ("Bad stack grow start/end in new stack entry"); 3947 else { 3948 new_stack_entry->aux.avail_ssize = 3949 stack_entry->aux.avail_ssize - 3950 (new_stack_entry->end - new_stack_entry->start); 3951 if (is_procstack) 3952 vm->vm_ssize += btoc(new_stack_entry->end - 3953 new_stack_entry->start); 3954 } 3955 3956 if (map->flags & MAP_WIREFUTURE) 3957 vm_map_unwire(map, new_stack_entry->start, 3958 new_stack_entry->end, FALSE); 3959 } 3960 3961 done: 3962 if (use_read_lock) 3963 vm_map_unlock_read(map); 3964 else 3965 vm_map_unlock(map); 3966 vm_map_entry_release(count); 3967 return (rv); 3968 } 3969 3970 /* 3971 * Unshare the specified VM space for exec. If other processes are 3972 * mapped to it, then create a new one. The new vmspace is null. 3973 * 3974 * No requirements. 3975 */ 3976 void 3977 vmspace_exec(struct proc *p, struct vmspace *vmcopy) 3978 { 3979 struct vmspace *oldvmspace = p->p_vmspace; 3980 struct vmspace *newvmspace; 3981 vm_map_t map = &p->p_vmspace->vm_map; 3982 3983 /* 3984 * If we are execing a resident vmspace we fork it, otherwise 3985 * we create a new vmspace. Note that exitingcnt is not 3986 * copied to the new vmspace. 3987 */ 3988 lwkt_gettoken(&oldvmspace->vm_map.token); 3989 if (vmcopy) { 3990 newvmspace = vmspace_fork(vmcopy); 3991 lwkt_gettoken(&newvmspace->vm_map.token); 3992 } else { 3993 newvmspace = vmspace_alloc(map->min_offset, map->max_offset); 3994 lwkt_gettoken(&newvmspace->vm_map.token); 3995 bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy, 3996 (caddr_t)&oldvmspace->vm_endcopy - 3997 (caddr_t)&oldvmspace->vm_startcopy); 3998 } 3999 4000 /* 4001 * Finish initializing the vmspace before assigning it 4002 * to the process. The vmspace will become the current vmspace 4003 * if p == curproc. 4004 */ 4005 pmap_pinit2(vmspace_pmap(newvmspace)); 4006 pmap_replacevm(p, newvmspace, 0); 4007 lwkt_reltoken(&newvmspace->vm_map.token); 4008 lwkt_reltoken(&oldvmspace->vm_map.token); 4009 vmspace_rel(oldvmspace); 4010 } 4011 4012 /* 4013 * Unshare the specified VM space for forcing COW. This 4014 * is called by rfork, for the (RFMEM|RFPROC) == 0 case. 4015 */ 4016 void 4017 vmspace_unshare(struct proc *p) 4018 { 4019 struct vmspace *oldvmspace = p->p_vmspace; 4020 struct vmspace *newvmspace; 4021 4022 lwkt_gettoken(&oldvmspace->vm_map.token); 4023 if (vmspace_getrefs(oldvmspace) == 1) { 4024 lwkt_reltoken(&oldvmspace->vm_map.token); 4025 return; 4026 } 4027 newvmspace = vmspace_fork(oldvmspace); 4028 lwkt_gettoken(&newvmspace->vm_map.token); 4029 pmap_pinit2(vmspace_pmap(newvmspace)); 4030 pmap_replacevm(p, newvmspace, 0); 4031 lwkt_reltoken(&newvmspace->vm_map.token); 4032 lwkt_reltoken(&oldvmspace->vm_map.token); 4033 vmspace_rel(oldvmspace); 4034 } 4035 4036 /* 4037 * vm_map_hint: return the beginning of the best area suitable for 4038 * creating a new mapping with "prot" protection. 4039 * 4040 * No requirements. 4041 */ 4042 vm_offset_t 4043 vm_map_hint(struct proc *p, vm_offset_t addr, vm_prot_t prot) 4044 { 4045 struct vmspace *vms = p->p_vmspace; 4046 4047 if (!randomize_mmap || addr != 0) { 4048 /* 4049 * Set a reasonable start point for the hint if it was 4050 * not specified or if it falls within the heap space. 4051 * Hinted mmap()s do not allocate out of the heap space. 4052 */ 4053 if (addr == 0 || 4054 (addr >= round_page((vm_offset_t)vms->vm_taddr) && 4055 addr < round_page((vm_offset_t)vms->vm_daddr + maxdsiz))) { 4056 addr = round_page((vm_offset_t)vms->vm_daddr + maxdsiz); 4057 } 4058 4059 return addr; 4060 } 4061 addr = (vm_offset_t)vms->vm_daddr + MAXDSIZ; 4062 addr += karc4random() & (MIN((256 * 1024 * 1024), MAXDSIZ) - 1); 4063 4064 return (round_page(addr)); 4065 } 4066 4067 /* 4068 * Finds the VM object, offset, and protection for a given virtual address 4069 * in the specified map, assuming a page fault of the type specified. 4070 * 4071 * Leaves the map in question locked for read; return values are guaranteed 4072 * until a vm_map_lookup_done call is performed. Note that the map argument 4073 * is in/out; the returned map must be used in the call to vm_map_lookup_done. 4074 * 4075 * A handle (out_entry) is returned for use in vm_map_lookup_done, to make 4076 * that fast. 4077 * 4078 * If a lookup is requested with "write protection" specified, the map may 4079 * be changed to perform virtual copying operations, although the data 4080 * referenced will remain the same. 4081 * 4082 * No requirements. 4083 */ 4084 int 4085 vm_map_lookup(vm_map_t *var_map, /* IN/OUT */ 4086 vm_offset_t vaddr, 4087 vm_prot_t fault_typea, 4088 vm_map_entry_t *out_entry, /* OUT */ 4089 vm_object_t *object, /* OUT */ 4090 vm_pindex_t *pindex, /* OUT */ 4091 vm_prot_t *out_prot, /* OUT */ 4092 boolean_t *wired) /* OUT */ 4093 { 4094 vm_map_entry_t entry; 4095 vm_map_t map = *var_map; 4096 vm_prot_t prot; 4097 vm_prot_t fault_type = fault_typea; 4098 int use_read_lock = 1; 4099 int rv = KERN_SUCCESS; 4100 int count; 4101 4102 count = 0; 4103 if (vaddr < VM_MAX_USER_ADDRESS) 4104 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 4105 RetryLookup: 4106 if (use_read_lock) 4107 vm_map_lock_read(map); 4108 else 4109 vm_map_lock(map); 4110 4111 /* 4112 * Always do a full lookup. The hint doesn't get us much anymore 4113 * now that the map is RB'd. 4114 */ 4115 cpu_ccfence(); 4116 *out_entry = &map->header; 4117 *object = NULL; 4118 4119 { 4120 vm_map_entry_t tmp_entry; 4121 4122 if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) { 4123 rv = KERN_INVALID_ADDRESS; 4124 goto done; 4125 } 4126 entry = tmp_entry; 4127 *out_entry = entry; 4128 } 4129 4130 /* 4131 * Handle submaps. 4132 */ 4133 if (entry->maptype == VM_MAPTYPE_SUBMAP) { 4134 vm_map_t old_map = map; 4135 4136 *var_map = map = entry->object.sub_map; 4137 if (use_read_lock) 4138 vm_map_unlock_read(old_map); 4139 else 4140 vm_map_unlock(old_map); 4141 use_read_lock = 1; 4142 goto RetryLookup; 4143 } 4144 4145 /* 4146 * Check whether this task is allowed to have this page. 4147 * Note the special case for MAP_ENTRY_COW pages with an override. 4148 * This is to implement a forced COW for debuggers. 4149 */ 4150 if (fault_type & VM_PROT_OVERRIDE_WRITE) 4151 prot = entry->max_protection; 4152 else 4153 prot = entry->protection; 4154 4155 fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); 4156 if ((fault_type & prot) != fault_type) { 4157 rv = KERN_PROTECTION_FAILURE; 4158 goto done; 4159 } 4160 4161 if ((entry->eflags & MAP_ENTRY_USER_WIRED) && 4162 (entry->eflags & MAP_ENTRY_COW) && 4163 (fault_type & VM_PROT_WRITE) && 4164 (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) { 4165 rv = KERN_PROTECTION_FAILURE; 4166 goto done; 4167 } 4168 4169 /* 4170 * If this page is not pageable, we have to get it for all possible 4171 * accesses. 4172 */ 4173 *wired = (entry->wired_count != 0); 4174 if (*wired) 4175 prot = fault_type = entry->protection; 4176 4177 /* 4178 * Virtual page tables may need to update the accessed (A) bit 4179 * in a page table entry. Upgrade the fault to a write fault for 4180 * that case if the map will support it. If the map does not support 4181 * it the page table entry simply will not be updated. 4182 */ 4183 if (entry->maptype == VM_MAPTYPE_VPAGETABLE) { 4184 if (prot & VM_PROT_WRITE) 4185 fault_type |= VM_PROT_WRITE; 4186 } 4187 4188 if (curthread->td_lwp && curthread->td_lwp->lwp_vmspace && 4189 pmap_emulate_ad_bits(&curthread->td_lwp->lwp_vmspace->vm_pmap)) { 4190 if ((prot & VM_PROT_WRITE) == 0) 4191 fault_type |= VM_PROT_WRITE; 4192 } 4193 4194 /* 4195 * Only NORMAL and VPAGETABLE maps are object-based. UKSMAPs are not. 4196 */ 4197 if (entry->maptype != VM_MAPTYPE_NORMAL && 4198 entry->maptype != VM_MAPTYPE_VPAGETABLE) { 4199 *object = NULL; 4200 goto skip; 4201 } 4202 4203 /* 4204 * If the entry was copy-on-write, we either ... 4205 */ 4206 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 4207 /* 4208 * If we want to write the page, we may as well handle that 4209 * now since we've got the map locked. 4210 * 4211 * If we don't need to write the page, we just demote the 4212 * permissions allowed. 4213 */ 4214 4215 if (fault_type & VM_PROT_WRITE) { 4216 /* 4217 * Not allowed if TDF_NOFAULT is set as the shadowing 4218 * operation can deadlock against the faulting 4219 * function due to the copy-on-write. 4220 */ 4221 if (curthread->td_flags & TDF_NOFAULT) { 4222 rv = KERN_FAILURE_NOFAULT; 4223 goto done; 4224 } 4225 4226 /* 4227 * Make a new object, and place it in the object 4228 * chain. Note that no new references have appeared 4229 * -- one just moved from the map to the new 4230 * object. 4231 */ 4232 if (use_read_lock && vm_map_lock_upgrade(map)) { 4233 /* lost lock */ 4234 use_read_lock = 0; 4235 goto RetryLookup; 4236 } 4237 use_read_lock = 0; 4238 4239 vm_map_entry_shadow(entry, 0); 4240 } else { 4241 /* 4242 * We're attempting to read a copy-on-write page -- 4243 * don't allow writes. 4244 */ 4245 prot &= ~VM_PROT_WRITE; 4246 } 4247 } 4248 4249 /* 4250 * Create an object if necessary. This code also handles 4251 * partitioning large entries to improve vm_fault performance. 4252 */ 4253 if (entry->object.vm_object == NULL && !map->system_map) { 4254 if (use_read_lock && vm_map_lock_upgrade(map)) { 4255 /* lost lock */ 4256 use_read_lock = 0; 4257 goto RetryLookup; 4258 } 4259 use_read_lock = 0; 4260 4261 /* 4262 * Partition large entries, giving each its own VM object, 4263 * to improve concurrent fault performance. This is only 4264 * applicable to userspace. 4265 */ 4266 if (vaddr < VM_MAX_USER_ADDRESS && 4267 entry->maptype == VM_MAPTYPE_NORMAL && 4268 ((entry->start ^ entry->end) & ~MAP_ENTRY_PARTITION_MASK)) { 4269 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 4270 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 4271 ++mycpu->gd_cnt.v_intrans_coll; 4272 ++mycpu->gd_cnt.v_intrans_wait; 4273 vm_map_transition_wait(map); 4274 goto RetryLookup; 4275 } 4276 vm_map_entry_partition(map, entry, vaddr, &count); 4277 } 4278 vm_map_entry_allocate_object(entry); 4279 } 4280 4281 /* 4282 * Return the object/offset from this entry. If the entry was 4283 * copy-on-write or empty, it has been fixed up. 4284 */ 4285 *object = entry->object.vm_object; 4286 4287 skip: 4288 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); 4289 4290 /* 4291 * Return whether this is the only map sharing this data. On 4292 * success we return with a read lock held on the map. On failure 4293 * we return with the map unlocked. 4294 */ 4295 *out_prot = prot; 4296 done: 4297 if (rv == KERN_SUCCESS) { 4298 if (use_read_lock == 0) 4299 vm_map_lock_downgrade(map); 4300 } else if (use_read_lock) { 4301 vm_map_unlock_read(map); 4302 } else { 4303 vm_map_unlock(map); 4304 } 4305 if (vaddr < VM_MAX_USER_ADDRESS) 4306 vm_map_entry_release(count); 4307 4308 return (rv); 4309 } 4310 4311 /* 4312 * Releases locks acquired by a vm_map_lookup() 4313 * (according to the handle returned by that lookup). 4314 * 4315 * No other requirements. 4316 */ 4317 void 4318 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry, int count) 4319 { 4320 /* 4321 * Unlock the main-level map 4322 */ 4323 vm_map_unlock_read(map); 4324 if (count) 4325 vm_map_entry_release(count); 4326 } 4327 4328 static void 4329 vm_map_entry_partition(vm_map_t map, vm_map_entry_t entry, 4330 vm_offset_t vaddr, int *countp) 4331 { 4332 vaddr &= ~MAP_ENTRY_PARTITION_MASK; 4333 vm_map_clip_start(map, entry, vaddr, countp); 4334 vaddr += MAP_ENTRY_PARTITION_SIZE; 4335 vm_map_clip_end(map, entry, vaddr, countp); 4336 } 4337 4338 /* 4339 * Quick hack, needs some help to make it more SMP friendly. 4340 */ 4341 void 4342 vm_map_interlock(vm_map_t map, struct vm_map_ilock *ilock, 4343 vm_offset_t ran_beg, vm_offset_t ran_end) 4344 { 4345 struct vm_map_ilock *scan; 4346 4347 ilock->ran_beg = ran_beg; 4348 ilock->ran_end = ran_end; 4349 ilock->flags = 0; 4350 4351 spin_lock(&map->ilock_spin); 4352 restart: 4353 for (scan = map->ilock_base; scan; scan = scan->next) { 4354 if (ran_end > scan->ran_beg && ran_beg < scan->ran_end) { 4355 scan->flags |= ILOCK_WAITING; 4356 ssleep(scan, &map->ilock_spin, 0, "ilock", 0); 4357 goto restart; 4358 } 4359 } 4360 ilock->next = map->ilock_base; 4361 map->ilock_base = ilock; 4362 spin_unlock(&map->ilock_spin); 4363 } 4364 4365 void 4366 vm_map_deinterlock(vm_map_t map, struct vm_map_ilock *ilock) 4367 { 4368 struct vm_map_ilock *scan; 4369 struct vm_map_ilock **scanp; 4370 4371 spin_lock(&map->ilock_spin); 4372 scanp = &map->ilock_base; 4373 while ((scan = *scanp) != NULL) { 4374 if (scan == ilock) { 4375 *scanp = ilock->next; 4376 spin_unlock(&map->ilock_spin); 4377 if (ilock->flags & ILOCK_WAITING) 4378 wakeup(ilock); 4379 return; 4380 } 4381 scanp = &scan->next; 4382 } 4383 spin_unlock(&map->ilock_spin); 4384 panic("vm_map_deinterlock: missing ilock!"); 4385 } 4386 4387 #include "opt_ddb.h" 4388 #ifdef DDB 4389 #include <ddb/ddb.h> 4390 4391 /* 4392 * Debugging only 4393 */ 4394 DB_SHOW_COMMAND(map, vm_map_print) 4395 { 4396 static int nlines; 4397 /* XXX convert args. */ 4398 vm_map_t map = (vm_map_t)addr; 4399 boolean_t full = have_addr; 4400 4401 vm_map_entry_t entry; 4402 4403 db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n", 4404 (void *)map, 4405 (void *)map->pmap, map->nentries, map->timestamp); 4406 nlines++; 4407 4408 if (!full && db_indent) 4409 return; 4410 4411 db_indent += 2; 4412 for (entry = map->header.next; entry != &map->header; 4413 entry = entry->next) { 4414 db_iprintf("map entry %p: start=%p, end=%p\n", 4415 (void *)entry, (void *)entry->start, (void *)entry->end); 4416 nlines++; 4417 { 4418 static char *inheritance_name[4] = 4419 {"share", "copy", "none", "donate_copy"}; 4420 4421 db_iprintf(" prot=%x/%x/%s", 4422 entry->protection, 4423 entry->max_protection, 4424 inheritance_name[(int)(unsigned char) 4425 entry->inheritance]); 4426 if (entry->wired_count != 0) 4427 db_printf(", wired"); 4428 } 4429 switch(entry->maptype) { 4430 case VM_MAPTYPE_SUBMAP: 4431 /* XXX no %qd in kernel. Truncate entry->offset. */ 4432 db_printf(", share=%p, offset=0x%lx\n", 4433 (void *)entry->object.sub_map, 4434 (long)entry->offset); 4435 nlines++; 4436 if ((entry->prev == &map->header) || 4437 (entry->prev->object.sub_map != 4438 entry->object.sub_map)) { 4439 db_indent += 2; 4440 vm_map_print((db_expr_t)(intptr_t) 4441 entry->object.sub_map, 4442 full, 0, NULL); 4443 db_indent -= 2; 4444 } 4445 break; 4446 case VM_MAPTYPE_NORMAL: 4447 case VM_MAPTYPE_VPAGETABLE: 4448 /* XXX no %qd in kernel. Truncate entry->offset. */ 4449 db_printf(", object=%p, offset=0x%lx", 4450 (void *)entry->object.vm_object, 4451 (long)entry->offset); 4452 if (entry->eflags & MAP_ENTRY_COW) 4453 db_printf(", copy (%s)", 4454 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); 4455 db_printf("\n"); 4456 nlines++; 4457 4458 if ((entry->prev == &map->header) || 4459 (entry->prev->object.vm_object != 4460 entry->object.vm_object)) { 4461 db_indent += 2; 4462 vm_object_print((db_expr_t)(intptr_t) 4463 entry->object.vm_object, 4464 full, 0, NULL); 4465 nlines += 4; 4466 db_indent -= 2; 4467 } 4468 break; 4469 case VM_MAPTYPE_UKSMAP: 4470 db_printf(", uksmap=%p, offset=0x%lx", 4471 (void *)entry->object.uksmap, 4472 (long)entry->offset); 4473 if (entry->eflags & MAP_ENTRY_COW) 4474 db_printf(", copy (%s)", 4475 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); 4476 db_printf("\n"); 4477 nlines++; 4478 break; 4479 default: 4480 break; 4481 } 4482 } 4483 db_indent -= 2; 4484 if (db_indent == 0) 4485 nlines = 0; 4486 } 4487 4488 /* 4489 * Debugging only 4490 */ 4491 DB_SHOW_COMMAND(procvm, procvm) 4492 { 4493 struct proc *p; 4494 4495 if (have_addr) { 4496 p = (struct proc *) addr; 4497 } else { 4498 p = curproc; 4499 } 4500 4501 db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n", 4502 (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map, 4503 (void *)vmspace_pmap(p->p_vmspace)); 4504 4505 vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL); 4506 } 4507 4508 #endif /* DDB */ 4509