1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * Copyright (c) 2003-2017 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * The Mach Operating System project at Carnegie-Mellon University. 8 * 9 * This code is derived from software contributed to The DragonFly Project 10 * by Matthew Dillon <dillon@backplane.com> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_map.c 8.3 (Berkeley) 1/12/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $FreeBSD: src/sys/vm/vm_map.c,v 1.187.2.19 2003/05/27 00:47:02 alc Exp $ 65 */ 66 67 /* 68 * Virtual memory mapping module. 69 */ 70 71 #include <sys/param.h> 72 #include <sys/systm.h> 73 #include <sys/kernel.h> 74 #include <sys/proc.h> 75 #include <sys/serialize.h> 76 #include <sys/lock.h> 77 #include <sys/vmmeter.h> 78 #include <sys/mman.h> 79 #include <sys/vnode.h> 80 #include <sys/resourcevar.h> 81 #include <sys/shm.h> 82 #include <sys/tree.h> 83 #include <sys/malloc.h> 84 #include <sys/objcache.h> 85 #include <sys/kern_syscall.h> 86 87 #include <vm/vm.h> 88 #include <vm/vm_param.h> 89 #include <vm/pmap.h> 90 #include <vm/vm_map.h> 91 #include <vm/vm_page.h> 92 #include <vm/vm_object.h> 93 #include <vm/vm_pager.h> 94 #include <vm/vm_kern.h> 95 #include <vm/vm_extern.h> 96 #include <vm/swap_pager.h> 97 #include <vm/vm_zone.h> 98 99 #include <sys/random.h> 100 #include <sys/sysctl.h> 101 #include <sys/spinlock.h> 102 103 #include <sys/thread2.h> 104 #include <sys/spinlock2.h> 105 106 /* 107 * Virtual memory maps provide for the mapping, protection, and sharing 108 * of virtual memory objects. In addition, this module provides for an 109 * efficient virtual copy of memory from one map to another. 110 * 111 * Synchronization is required prior to most operations. 112 * 113 * Maps consist of an ordered doubly-linked list of simple entries. 114 * A hint and a RB tree is used to speed-up lookups. 115 * 116 * Callers looking to modify maps specify start/end addresses which cause 117 * the related map entry to be clipped if necessary, and then later 118 * recombined if the pieces remained compatible. 119 * 120 * Virtual copy operations are performed by copying VM object references 121 * from one map to another, and then marking both regions as copy-on-write. 122 */ 123 static boolean_t vmspace_ctor(void *obj, void *privdata, int ocflags); 124 static void vmspace_dtor(void *obj, void *privdata); 125 static void vmspace_terminate(struct vmspace *vm, int final); 126 127 MALLOC_DEFINE(M_VMSPACE, "vmspace", "vmspace objcache backingstore"); 128 MALLOC_DEFINE(M_MAP_BACKING, "map_backing", "vm_map_backing to entry"); 129 static struct objcache *vmspace_cache; 130 131 /* 132 * per-cpu page table cross mappings are initialized in early boot 133 * and might require a considerable number of vm_map_entry structures. 134 */ 135 #define MAPENTRYBSP_CACHE (MAXCPU+1) 136 #define MAPENTRYAP_CACHE 8 137 138 /* 139 * Partioning threaded programs with large anonymous memory areas can 140 * improve concurrent fault performance. 141 */ 142 #define MAP_ENTRY_PARTITION_SIZE ((vm_offset_t)(32 * 1024 * 1024)) 143 #define MAP_ENTRY_PARTITION_MASK (MAP_ENTRY_PARTITION_SIZE - 1) 144 145 #define VM_MAP_ENTRY_WITHIN_PARTITION(entry) \ 146 ((((entry)->start ^ (entry)->end) & ~MAP_ENTRY_PARTITION_MASK) == 0) 147 148 static struct vm_zone mapentzone_store; 149 static vm_zone_t mapentzone; 150 151 static struct vm_map_entry map_entry_init[MAX_MAPENT]; 152 static struct vm_map_entry cpu_map_entry_init_bsp[MAPENTRYBSP_CACHE]; 153 static struct vm_map_entry cpu_map_entry_init_ap[MAXCPU][MAPENTRYAP_CACHE]; 154 155 static int randomize_mmap; 156 SYSCTL_INT(_vm, OID_AUTO, randomize_mmap, CTLFLAG_RW, &randomize_mmap, 0, 157 "Randomize mmap offsets"); 158 static int vm_map_relock_enable = 1; 159 SYSCTL_INT(_vm, OID_AUTO, map_relock_enable, CTLFLAG_RW, 160 &vm_map_relock_enable, 0, "insert pop pgtable optimization"); 161 static int vm_map_partition_enable = 1; 162 SYSCTL_INT(_vm, OID_AUTO, map_partition_enable, CTLFLAG_RW, 163 &vm_map_partition_enable, 0, "Break up larger vm_map_entry's"); 164 static int vm_map_backing_limit = 5; 165 SYSCTL_INT(_vm, OID_AUTO, map_backing_limit, CTLFLAG_RW, 166 &vm_map_backing_limit, 0, "ba.backing_ba link depth"); 167 static int vm_map_backing_shadow_test = 1; 168 SYSCTL_INT(_vm, OID_AUTO, map_backing_shadow_test, CTLFLAG_RW, 169 &vm_map_backing_shadow_test, 0, "ba.object shadow test"); 170 171 static void vmspace_drop_notoken(struct vmspace *vm); 172 static void vm_map_entry_shadow(vm_map_entry_t entry, int addref); 173 static vm_map_entry_t vm_map_entry_create(vm_map_t map, int *); 174 static void vm_map_entry_dispose (vm_map_t map, vm_map_entry_t entry, int *); 175 static void vm_map_entry_dispose_ba (vm_map_backing_t ba); 176 static void _vm_map_clip_end (vm_map_t, vm_map_entry_t, vm_offset_t, int *); 177 static void _vm_map_clip_start (vm_map_t, vm_map_entry_t, vm_offset_t, int *); 178 static void vm_map_entry_delete (vm_map_t, vm_map_entry_t, int *); 179 static void vm_map_entry_unwire (vm_map_t, vm_map_entry_t); 180 static void vm_map_copy_entry (vm_map_t, vm_map_t, vm_map_entry_t, 181 vm_map_entry_t); 182 static void vm_map_unclip_range (vm_map_t map, vm_map_entry_t start_entry, 183 vm_offset_t start, vm_offset_t end, int *countp, int flags); 184 static void vm_map_entry_partition(vm_map_t map, vm_map_entry_t entry, 185 vm_offset_t vaddr, int *countp); 186 187 /* 188 * Initialize the vm_map module. Must be called before any other vm_map 189 * routines. 190 * 191 * Map and entry structures are allocated from the general purpose 192 * memory pool with some exceptions: 193 * 194 * - The kernel map is allocated statically. 195 * - Initial kernel map entries are allocated out of a static pool. 196 * - We must set ZONE_SPECIAL here or the early boot code can get 197 * stuck if there are >63 cores. 198 * 199 * These restrictions are necessary since malloc() uses the 200 * maps and requires map entries. 201 * 202 * Called from the low level boot code only. 203 */ 204 void 205 vm_map_startup(void) 206 { 207 mapentzone = &mapentzone_store; 208 zbootinit(mapentzone, "MAP ENTRY", sizeof (struct vm_map_entry), 209 map_entry_init, MAX_MAPENT); 210 mapentzone_store.zflags |= ZONE_SPECIAL; 211 } 212 213 /* 214 * Called prior to any vmspace allocations. 215 * 216 * Called from the low level boot code only. 217 */ 218 void 219 vm_init2(void) 220 { 221 vmspace_cache = objcache_create_mbacked(M_VMSPACE, 222 sizeof(struct vmspace), 223 0, ncpus * 4, 224 vmspace_ctor, vmspace_dtor, 225 NULL); 226 zinitna(mapentzone, NULL, 0, 0, ZONE_USE_RESERVE | ZONE_SPECIAL); 227 pmap_init2(); 228 vm_object_init2(); 229 } 230 231 /* 232 * objcache support. We leave the pmap root cached as long as possible 233 * for performance reasons. 234 */ 235 static 236 boolean_t 237 vmspace_ctor(void *obj, void *privdata, int ocflags) 238 { 239 struct vmspace *vm = obj; 240 241 bzero(vm, sizeof(*vm)); 242 vm->vm_refcnt = VM_REF_DELETED; 243 244 return 1; 245 } 246 247 static 248 void 249 vmspace_dtor(void *obj, void *privdata) 250 { 251 struct vmspace *vm = obj; 252 253 KKASSERT(vm->vm_refcnt == VM_REF_DELETED); 254 pmap_puninit(vmspace_pmap(vm)); 255 } 256 257 /* 258 * Red black tree functions 259 * 260 * The caller must hold the related map lock. 261 */ 262 static int rb_vm_map_compare(vm_map_entry_t a, vm_map_entry_t b); 263 RB_GENERATE(vm_map_rb_tree, vm_map_entry, rb_entry, rb_vm_map_compare); 264 265 /* a->start is address, and the only field which must be initialized */ 266 static int 267 rb_vm_map_compare(vm_map_entry_t a, vm_map_entry_t b) 268 { 269 if (a->start < b->start) 270 return(-1); 271 else if (a->start > b->start) 272 return(1); 273 return(0); 274 } 275 276 /* 277 * Initialize vmspace ref/hold counts vmspace0. There is a holdcnt for 278 * every refcnt. 279 */ 280 void 281 vmspace_initrefs(struct vmspace *vm) 282 { 283 vm->vm_refcnt = 1; 284 vm->vm_holdcnt = 1; 285 } 286 287 /* 288 * Allocate a vmspace structure, including a vm_map and pmap. 289 * Initialize numerous fields. While the initial allocation is zerod, 290 * subsequence reuse from the objcache leaves elements of the structure 291 * intact (particularly the pmap), so portions must be zerod. 292 * 293 * Returns a referenced vmspace. 294 * 295 * No requirements. 296 */ 297 struct vmspace * 298 vmspace_alloc(vm_offset_t min, vm_offset_t max) 299 { 300 struct vmspace *vm; 301 302 vm = objcache_get(vmspace_cache, M_WAITOK); 303 304 bzero(&vm->vm_startcopy, 305 (char *)&vm->vm_endcopy - (char *)&vm->vm_startcopy); 306 vm_map_init(&vm->vm_map, min, max, NULL); /* initializes token */ 307 308 /* 309 * NOTE: hold to acquires token for safety. 310 * 311 * On return vmspace is referenced (refs=1, hold=1). That is, 312 * each refcnt also has a holdcnt. There can be additional holds 313 * (holdcnt) above and beyond the refcnt. Finalization is handled in 314 * two stages, one on refs 1->0, and the the second on hold 1->0. 315 */ 316 KKASSERT(vm->vm_holdcnt == 0); 317 KKASSERT(vm->vm_refcnt == VM_REF_DELETED); 318 vmspace_initrefs(vm); 319 vmspace_hold(vm); 320 pmap_pinit(vmspace_pmap(vm)); /* (some fields reused) */ 321 vm->vm_map.pmap = vmspace_pmap(vm); /* XXX */ 322 vm->vm_shm = NULL; 323 vm->vm_flags = 0; 324 cpu_vmspace_alloc(vm); 325 vmspace_drop(vm); 326 327 return (vm); 328 } 329 330 /* 331 * NOTE: Can return 0 if the vmspace is exiting. 332 */ 333 int 334 vmspace_getrefs(struct vmspace *vm) 335 { 336 int32_t n; 337 338 n = vm->vm_refcnt; 339 cpu_ccfence(); 340 if (n & VM_REF_DELETED) 341 n = -1; 342 return n; 343 } 344 345 void 346 vmspace_hold(struct vmspace *vm) 347 { 348 atomic_add_int(&vm->vm_holdcnt, 1); 349 lwkt_gettoken(&vm->vm_map.token); 350 } 351 352 /* 353 * Drop with final termination interlock. 354 */ 355 void 356 vmspace_drop(struct vmspace *vm) 357 { 358 lwkt_reltoken(&vm->vm_map.token); 359 vmspace_drop_notoken(vm); 360 } 361 362 static void 363 vmspace_drop_notoken(struct vmspace *vm) 364 { 365 if (atomic_fetchadd_int(&vm->vm_holdcnt, -1) == 1) { 366 if (vm->vm_refcnt & VM_REF_DELETED) 367 vmspace_terminate(vm, 1); 368 } 369 } 370 371 /* 372 * A vmspace object must not be in a terminated state to be able to obtain 373 * additional refs on it. 374 * 375 * These are official references to the vmspace, the count is used to check 376 * for vmspace sharing. Foreign accessors should use 'hold' and not 'ref'. 377 * 378 * XXX we need to combine hold & ref together into one 64-bit field to allow 379 * holds to prevent stage-1 termination. 380 */ 381 void 382 vmspace_ref(struct vmspace *vm) 383 { 384 uint32_t n; 385 386 atomic_add_int(&vm->vm_holdcnt, 1); 387 n = atomic_fetchadd_int(&vm->vm_refcnt, 1); 388 KKASSERT((n & VM_REF_DELETED) == 0); 389 } 390 391 /* 392 * Release a ref on the vmspace. On the 1->0 transition we do stage-1 393 * termination of the vmspace. Then, on the final drop of the hold we 394 * will do stage-2 final termination. 395 */ 396 void 397 vmspace_rel(struct vmspace *vm) 398 { 399 uint32_t n; 400 401 /* 402 * Drop refs. Each ref also has a hold which is also dropped. 403 * 404 * When refs hits 0 compete to get the VM_REF_DELETED flag (hold 405 * prevent finalization) to start termination processing. 406 * Finalization occurs when the last hold count drops to 0. 407 */ 408 n = atomic_fetchadd_int(&vm->vm_refcnt, -1) - 1; 409 while (n == 0) { 410 if (atomic_cmpset_int(&vm->vm_refcnt, 0, VM_REF_DELETED)) { 411 vmspace_terminate(vm, 0); 412 break; 413 } 414 n = vm->vm_refcnt; 415 cpu_ccfence(); 416 } 417 vmspace_drop_notoken(vm); 418 } 419 420 /* 421 * This is called during exit indicating that the vmspace is no 422 * longer in used by an exiting process, but the process has not yet 423 * been reaped. 424 * 425 * We drop refs, allowing for stage-1 termination, but maintain a holdcnt 426 * to prevent stage-2 until the process is reaped. Note hte order of 427 * operation, we must hold first. 428 * 429 * No requirements. 430 */ 431 void 432 vmspace_relexit(struct vmspace *vm) 433 { 434 atomic_add_int(&vm->vm_holdcnt, 1); 435 vmspace_rel(vm); 436 } 437 438 /* 439 * Called during reap to disconnect the remainder of the vmspace from 440 * the process. On the hold drop the vmspace termination is finalized. 441 * 442 * No requirements. 443 */ 444 void 445 vmspace_exitfree(struct proc *p) 446 { 447 struct vmspace *vm; 448 449 vm = p->p_vmspace; 450 p->p_vmspace = NULL; 451 vmspace_drop_notoken(vm); 452 } 453 454 /* 455 * Called in two cases: 456 * 457 * (1) When the last refcnt is dropped and the vmspace becomes inactive, 458 * called with final == 0. refcnt will be (u_int)-1 at this point, 459 * and holdcnt will still be non-zero. 460 * 461 * (2) When holdcnt becomes 0, called with final == 1. There should no 462 * longer be anyone with access to the vmspace. 463 * 464 * VMSPACE_EXIT1 flags the primary deactivation 465 * VMSPACE_EXIT2 flags the last reap 466 */ 467 static void 468 vmspace_terminate(struct vmspace *vm, int final) 469 { 470 int count; 471 472 lwkt_gettoken(&vm->vm_map.token); 473 if (final == 0) { 474 KKASSERT((vm->vm_flags & VMSPACE_EXIT1) == 0); 475 vm->vm_flags |= VMSPACE_EXIT1; 476 477 /* 478 * Get rid of most of the resources. Leave the kernel pmap 479 * intact. 480 * 481 * If the pmap does not contain wired pages we can bulk-delete 482 * the pmap as a performance optimization before removing the 483 * related mappings. 484 * 485 * If the pmap contains wired pages we cannot do this 486 * pre-optimization because currently vm_fault_unwire() 487 * expects the pmap pages to exist and will not decrement 488 * p->wire_count if they do not. 489 */ 490 shmexit(vm); 491 if (vmspace_pmap(vm)->pm_stats.wired_count) { 492 vm_map_remove(&vm->vm_map, VM_MIN_USER_ADDRESS, 493 VM_MAX_USER_ADDRESS); 494 pmap_remove_pages(vmspace_pmap(vm), VM_MIN_USER_ADDRESS, 495 VM_MAX_USER_ADDRESS); 496 } else { 497 pmap_remove_pages(vmspace_pmap(vm), VM_MIN_USER_ADDRESS, 498 VM_MAX_USER_ADDRESS); 499 vm_map_remove(&vm->vm_map, VM_MIN_USER_ADDRESS, 500 VM_MAX_USER_ADDRESS); 501 } 502 lwkt_reltoken(&vm->vm_map.token); 503 } else { 504 KKASSERT((vm->vm_flags & VMSPACE_EXIT1) != 0); 505 KKASSERT((vm->vm_flags & VMSPACE_EXIT2) == 0); 506 507 /* 508 * Get rid of remaining basic resources. 509 */ 510 vm->vm_flags |= VMSPACE_EXIT2; 511 shmexit(vm); 512 513 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 514 vm_map_lock(&vm->vm_map); 515 cpu_vmspace_free(vm); 516 517 /* 518 * Lock the map, to wait out all other references to it. 519 * Delete all of the mappings and pages they hold, then call 520 * the pmap module to reclaim anything left. 521 */ 522 vm_map_delete(&vm->vm_map, 523 vm_map_min(&vm->vm_map), 524 vm_map_max(&vm->vm_map), 525 &count); 526 vm_map_unlock(&vm->vm_map); 527 vm_map_entry_release(count); 528 529 pmap_release(vmspace_pmap(vm)); 530 lwkt_reltoken(&vm->vm_map.token); 531 objcache_put(vmspace_cache, vm); 532 } 533 } 534 535 /* 536 * Swap useage is determined by taking the proportional swap used by 537 * VM objects backing the VM map. To make up for fractional losses, 538 * if the VM object has any swap use at all the associated map entries 539 * count for at least 1 swap page. 540 * 541 * No requirements. 542 */ 543 vm_offset_t 544 vmspace_swap_count(struct vmspace *vm) 545 { 546 vm_map_t map = &vm->vm_map; 547 vm_map_entry_t cur; 548 vm_object_t object; 549 vm_offset_t count = 0; 550 vm_offset_t n; 551 552 vmspace_hold(vm); 553 554 RB_FOREACH(cur, vm_map_rb_tree, &map->rb_root) { 555 switch(cur->maptype) { 556 case VM_MAPTYPE_NORMAL: 557 case VM_MAPTYPE_VPAGETABLE: 558 if ((object = cur->ba.object) == NULL) 559 break; 560 if (object->swblock_count) { 561 n = (cur->end - cur->start) / PAGE_SIZE; 562 count += object->swblock_count * 563 SWAP_META_PAGES * n / object->size + 1; 564 } 565 break; 566 default: 567 break; 568 } 569 } 570 vmspace_drop(vm); 571 572 return(count); 573 } 574 575 /* 576 * Calculate the approximate number of anonymous pages in use by 577 * this vmspace. To make up for fractional losses, we count each 578 * VM object as having at least 1 anonymous page. 579 * 580 * No requirements. 581 */ 582 vm_offset_t 583 vmspace_anonymous_count(struct vmspace *vm) 584 { 585 vm_map_t map = &vm->vm_map; 586 vm_map_entry_t cur; 587 vm_object_t object; 588 vm_offset_t count = 0; 589 590 vmspace_hold(vm); 591 RB_FOREACH(cur, vm_map_rb_tree, &map->rb_root) { 592 switch(cur->maptype) { 593 case VM_MAPTYPE_NORMAL: 594 case VM_MAPTYPE_VPAGETABLE: 595 if ((object = cur->ba.object) == NULL) 596 break; 597 if (object->type != OBJT_DEFAULT && 598 object->type != OBJT_SWAP) { 599 break; 600 } 601 count += object->resident_page_count; 602 break; 603 default: 604 break; 605 } 606 } 607 vmspace_drop(vm); 608 609 return(count); 610 } 611 612 /* 613 * Initialize an existing vm_map structure such as that in the vmspace 614 * structure. The pmap is initialized elsewhere. 615 * 616 * No requirements. 617 */ 618 void 619 vm_map_init(struct vm_map *map, vm_offset_t min_addr, vm_offset_t max_addr, 620 pmap_t pmap) 621 { 622 RB_INIT(&map->rb_root); 623 spin_init(&map->ilock_spin, "ilock"); 624 map->ilock_base = NULL; 625 map->nentries = 0; 626 map->size = 0; 627 map->system_map = 0; 628 vm_map_min(map) = min_addr; 629 vm_map_max(map) = max_addr; 630 map->pmap = pmap; 631 map->timestamp = 0; 632 map->flags = 0; 633 bzero(&map->freehint, sizeof(map->freehint)); 634 lwkt_token_init(&map->token, "vm_map"); 635 lockinit(&map->lock, "vm_maplk", (hz + 9) / 10, 0); 636 } 637 638 /* 639 * Find the first possible free address for the specified request length. 640 * Returns 0 if we don't have one cached. 641 */ 642 static 643 vm_offset_t 644 vm_map_freehint_find(vm_map_t map, vm_size_t length, vm_size_t align) 645 { 646 vm_map_freehint_t *scan; 647 648 scan = &map->freehint[0]; 649 while (scan < &map->freehint[VM_MAP_FFCOUNT]) { 650 if (scan->length == length && scan->align == align) 651 return(scan->start); 652 ++scan; 653 } 654 return 0; 655 } 656 657 /* 658 * Unconditionally set the freehint. Called by vm_map_findspace() after 659 * it finds an address. This will help us iterate optimally on the next 660 * similar findspace. 661 */ 662 static 663 void 664 vm_map_freehint_update(vm_map_t map, vm_offset_t start, 665 vm_size_t length, vm_size_t align) 666 { 667 vm_map_freehint_t *scan; 668 669 scan = &map->freehint[0]; 670 while (scan < &map->freehint[VM_MAP_FFCOUNT]) { 671 if (scan->length == length && scan->align == align) { 672 scan->start = start; 673 return; 674 } 675 ++scan; 676 } 677 scan = &map->freehint[map->freehint_newindex & VM_MAP_FFMASK]; 678 scan->start = start; 679 scan->align = align; 680 scan->length = length; 681 ++map->freehint_newindex; 682 } 683 684 /* 685 * Update any existing freehints (for any alignment), for the hole we just 686 * added. 687 */ 688 static 689 void 690 vm_map_freehint_hole(vm_map_t map, vm_offset_t start, vm_size_t length) 691 { 692 vm_map_freehint_t *scan; 693 694 scan = &map->freehint[0]; 695 while (scan < &map->freehint[VM_MAP_FFCOUNT]) { 696 if (scan->length <= length && scan->start > start) 697 scan->start = start; 698 ++scan; 699 } 700 } 701 702 /* 703 * This function handles MAP_ENTRY_NEEDS_COPY by inserting a fronting 704 * object in the entry for COW faults. 705 * 706 * The entire chain including entry->ba (prior to inserting the fronting 707 * object) essentially becomes set in stone... elements of it can be paged 708 * in or out, but cannot be further modified. 709 * 710 * NOTE: If we do not optimize the backing chain then a unique copy is not 711 * needed. Note, however, that because portions of the chain are 712 * shared across pmaps we cannot make any changes to the vm_map_backing 713 * elements themselves. 714 * 715 * If the map segment is governed by a virtual page table then it is 716 * possible to address offsets beyond the mapped area. Just allocate 717 * a maximally sized object for this case. 718 * 719 * If addref is non-zero an additional reference is added to the returned 720 * entry. This mechanic exists because the additional reference might have 721 * to be added atomically and not after return to prevent a premature 722 * collapse. XXX currently there is no collapse code. 723 * 724 * The vm_map must be exclusively locked. 725 * No other requirements. 726 */ 727 static 728 void 729 vm_map_entry_shadow(vm_map_entry_t entry, int addref) 730 { 731 vm_map_backing_t ba; 732 vm_size_t length; 733 vm_object_t source; 734 vm_object_t result; 735 int drop_source; 736 737 if (entry->maptype == VM_MAPTYPE_VPAGETABLE) 738 length = 0x7FFFFFFF; 739 else 740 length = atop(entry->end - entry->start); 741 ba = kmalloc(sizeof(*ba), M_MAP_BACKING, M_INTWAIT); /* copied later */ 742 743 /* 744 * The ref on source is inherited when we move it into the ba. 745 */ 746 source = entry->ba.object; 747 748 /* 749 * Don't create the new object if the old object isn't shared. 750 * 751 * If addref is non-zero additional ref(s) are being added (probably 752 * for map entry fork purposes), so clear OBJ_ONEMAPPING. 753 * 754 * WARNING! Checking ref_count == 1 only works because we are testing 755 * the object embedded in the entry (entry->ba.object). 756 * This test DOES NOT WORK if checking an object hanging off 757 * the backing chain (entry->ba.backing_ba list) because the 758 * vm_map_backing might be shared, or part of a chain that 759 * is shared. Checking ba->refs is worthless. 760 */ 761 drop_source = 0; 762 if (source) { 763 if (source->type != OBJT_VNODE) { 764 vm_object_hold(source); 765 if (source->ref_count == 1 && 766 source->handle == NULL && 767 (source->type == OBJT_DEFAULT || 768 source->type == OBJT_SWAP)) { 769 if (addref) { 770 vm_object_reference_locked(source); 771 vm_object_clear_flag(source, 772 OBJ_ONEMAPPING); 773 } 774 vm_object_drop(source); 775 kfree(ba, M_MAP_BACKING); 776 goto done; 777 } 778 /*vm_object_reference_locked(source);*/ 779 vm_object_clear_flag(source, OBJ_ONEMAPPING); 780 drop_source = 1; /* drop source at end */ 781 } else { 782 /*vm_object_reference_quick(source);*/ 783 vm_object_clear_flag(source, OBJ_ONEMAPPING); 784 } 785 } 786 787 /* 788 * Allocate a new object with the given length. The new object 789 * is returned referenced but we may have to add another one. 790 * If we are adding a second reference we must clear OBJ_ONEMAPPING. 791 * (typically because the caller is about to clone a vm_map_entry). 792 * 793 * The source object currently has an extra reference to prevent 794 * collapses into it while we mess with its shadow list, which 795 * we will remove later in this routine. 796 * 797 * The target object may require a second reference if asked for one 798 * by the caller. 799 */ 800 result = vm_object_allocate(OBJT_DEFAULT, length); 801 if (result == NULL) 802 panic("vm_object_shadow: no object for shadowing"); 803 vm_object_hold(result); 804 if (addref) { 805 vm_object_reference_locked(result); 806 vm_object_clear_flag(result, OBJ_ONEMAPPING); 807 } 808 809 /* 810 * The new object shadows the source object. 811 * 812 * Try to optimize the result object's page color when shadowing 813 * in order to maintain page coloring consistency in the combined 814 * shadowed object. 815 * 816 * The source object is moved to ba, retaining its existing ref-count. 817 * No additional ref is needed. 818 * 819 * SHADOWING IS NOT APPLICABLE TO OBJT_VNODE OBJECTS 820 */ 821 *ba = entry->ba; /* previous ba */ 822 ba->refs = 1; /* initialize ref count */ 823 entry->ba.object = result; /* new ba (at head of entry) */ 824 entry->ba.backing_ba = ba; 825 entry->ba.backing_count = ba->backing_count + 1; 826 entry->ba.offset = 0; 827 entry->ba.refs = 0; 828 829 if (source) { 830 #if 0 831 /* shadowing no longer messes with generation count */ 832 if (drop_source) { 833 atomic_add_int(&source->generation, 1); 834 vm_object_set_flag(result, OBJ_ONSHADOW); 835 } 836 #endif 837 /* cpu localization twist */ 838 result->pg_color = vm_quickcolor(); 839 } 840 841 /* 842 * Adjust the return storage. Drop the ref on source before 843 * returning. 844 */ 845 vm_object_drop(result); 846 if (source) { 847 if (drop_source) { 848 /*vm_object_deallocate_locked(source);*/ 849 vm_object_drop(source); 850 } else { 851 /*vm_object_deallocate(source);*/ 852 } 853 } 854 855 done: 856 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 857 } 858 859 /* 860 * Allocate an object for a vm_map_entry. 861 * 862 * Object allocation for anonymous mappings is defered as long as possible. 863 * This function is called when we can defer no longer, generally when a map 864 * entry might be split or forked or takes a page fault. 865 * 866 * If the map segment is governed by a virtual page table then it is 867 * possible to address offsets beyond the mapped area. Just allocate 868 * a maximally sized object for this case. 869 * 870 * The vm_map must be exclusively locked. 871 * No other requirements. 872 */ 873 void 874 vm_map_entry_allocate_object(vm_map_entry_t entry) 875 { 876 vm_object_t obj; 877 878 /* 879 * ba.offset is added cumulatively in the backing_ba scan, so we 880 * can noly reset it to zero if ba.backing_ba is NULL. We reset 881 * it to 0 only for debugging convenience. 882 * 883 * ba.offset cannot otherwise be modified because it effects 884 * the offsets for the entire backing_ba chain. 885 */ 886 if (entry->ba.backing_ba == NULL) 887 entry->ba.offset = 0; 888 889 if (entry->maptype == VM_MAPTYPE_VPAGETABLE) { 890 obj = vm_object_allocate(OBJT_DEFAULT, 0x7FFFFFFF); /* XXX */ 891 } else { 892 obj = vm_object_allocate(OBJT_DEFAULT, 893 atop(entry->end - entry->start) + 894 entry->ba.offset); 895 } 896 entry->ba.object = obj; 897 } 898 899 /* 900 * Set an initial negative count so the first attempt to reserve 901 * space preloads a bunch of vm_map_entry's for this cpu. Also 902 * pre-allocate 2 vm_map_entries which will be needed by zalloc() to 903 * map a new page for vm_map_entry structures. SMP systems are 904 * particularly sensitive. 905 * 906 * This routine is called in early boot so we cannot just call 907 * vm_map_entry_reserve(). 908 * 909 * Called from the low level boot code only (for each cpu) 910 * 911 * WARNING! Take care not to have too-big a static/BSS structure here 912 * as MAXCPU can be 256+, otherwise the loader's 64MB heap 913 * can get blown out by the kernel plus the initrd image. 914 */ 915 void 916 vm_map_entry_reserve_cpu_init(globaldata_t gd) 917 { 918 vm_map_entry_t entry; 919 int count; 920 int i; 921 922 atomic_add_int(&gd->gd_vme_avail, -MAP_RESERVE_COUNT * 2); 923 if (gd->gd_cpuid == 0) { 924 entry = &cpu_map_entry_init_bsp[0]; 925 count = MAPENTRYBSP_CACHE; 926 } else { 927 entry = &cpu_map_entry_init_ap[gd->gd_cpuid][0]; 928 count = MAPENTRYAP_CACHE; 929 } 930 for (i = 0; i < count; ++i, ++entry) { 931 MAPENT_FREELIST(entry) = gd->gd_vme_base; 932 gd->gd_vme_base = entry; 933 } 934 } 935 936 /* 937 * Reserves vm_map_entry structures so code later-on can manipulate 938 * map_entry structures within a locked map without blocking trying 939 * to allocate a new vm_map_entry. 940 * 941 * No requirements. 942 * 943 * WARNING! We must not decrement gd_vme_avail until after we have 944 * ensured that sufficient entries exist, otherwise we can 945 * get into an endless call recursion in the zalloc code 946 * itself. 947 */ 948 int 949 vm_map_entry_reserve(int count) 950 { 951 struct globaldata *gd = mycpu; 952 vm_map_entry_t entry; 953 954 /* 955 * Make sure we have enough structures in gd_vme_base to handle 956 * the reservation request. 957 * 958 * Use a critical section to protect against VM faults. It might 959 * not be needed, but we have to be careful here. 960 */ 961 if (gd->gd_vme_avail < count) { 962 crit_enter(); 963 while (gd->gd_vme_avail < count) { 964 entry = zalloc(mapentzone); 965 MAPENT_FREELIST(entry) = gd->gd_vme_base; 966 gd->gd_vme_base = entry; 967 atomic_add_int(&gd->gd_vme_avail, 1); 968 } 969 crit_exit(); 970 } 971 atomic_add_int(&gd->gd_vme_avail, -count); 972 973 return(count); 974 } 975 976 /* 977 * Releases previously reserved vm_map_entry structures that were not 978 * used. If we have too much junk in our per-cpu cache clean some of 979 * it out. 980 * 981 * No requirements. 982 */ 983 void 984 vm_map_entry_release(int count) 985 { 986 struct globaldata *gd = mycpu; 987 vm_map_entry_t entry; 988 vm_map_entry_t efree; 989 990 count = atomic_fetchadd_int(&gd->gd_vme_avail, count) + count; 991 if (gd->gd_vme_avail > MAP_RESERVE_SLOP) { 992 efree = NULL; 993 crit_enter(); 994 while (gd->gd_vme_avail > MAP_RESERVE_HYST) { 995 entry = gd->gd_vme_base; 996 KKASSERT(entry != NULL); 997 gd->gd_vme_base = MAPENT_FREELIST(entry); 998 atomic_add_int(&gd->gd_vme_avail, -1); 999 MAPENT_FREELIST(entry) = efree; 1000 efree = entry; 1001 } 1002 crit_exit(); 1003 while ((entry = efree) != NULL) { 1004 efree = MAPENT_FREELIST(efree); 1005 zfree(mapentzone, entry); 1006 } 1007 } 1008 } 1009 1010 /* 1011 * Reserve map entry structures for use in kernel_map itself. These 1012 * entries have *ALREADY* been reserved on a per-cpu basis when the map 1013 * was inited. This function is used by zalloc() to avoid a recursion 1014 * when zalloc() itself needs to allocate additional kernel memory. 1015 * 1016 * This function works like the normal reserve but does not load the 1017 * vm_map_entry cache (because that would result in an infinite 1018 * recursion). Note that gd_vme_avail may go negative. This is expected. 1019 * 1020 * Any caller of this function must be sure to renormalize after 1021 * potentially eating entries to ensure that the reserve supply 1022 * remains intact. 1023 * 1024 * No requirements. 1025 */ 1026 int 1027 vm_map_entry_kreserve(int count) 1028 { 1029 struct globaldata *gd = mycpu; 1030 1031 atomic_add_int(&gd->gd_vme_avail, -count); 1032 KASSERT(gd->gd_vme_base != NULL, 1033 ("no reserved entries left, gd_vme_avail = %d", 1034 gd->gd_vme_avail)); 1035 return(count); 1036 } 1037 1038 /* 1039 * Release previously reserved map entries for kernel_map. We do not 1040 * attempt to clean up like the normal release function as this would 1041 * cause an unnecessary (but probably not fatal) deep procedure call. 1042 * 1043 * No requirements. 1044 */ 1045 void 1046 vm_map_entry_krelease(int count) 1047 { 1048 struct globaldata *gd = mycpu; 1049 1050 atomic_add_int(&gd->gd_vme_avail, count); 1051 } 1052 1053 /* 1054 * Allocates a VM map entry for insertion. No entry fields are filled in. 1055 * 1056 * The entries should have previously been reserved. The reservation count 1057 * is tracked in (*countp). 1058 * 1059 * No requirements. 1060 */ 1061 static vm_map_entry_t 1062 vm_map_entry_create(vm_map_t map, int *countp) 1063 { 1064 struct globaldata *gd = mycpu; 1065 vm_map_entry_t entry; 1066 1067 KKASSERT(*countp > 0); 1068 --*countp; 1069 crit_enter(); 1070 entry = gd->gd_vme_base; 1071 KASSERT(entry != NULL, ("gd_vme_base NULL! count %d", *countp)); 1072 gd->gd_vme_base = MAPENT_FREELIST(entry); 1073 crit_exit(); 1074 1075 return(entry); 1076 } 1077 1078 /* 1079 * Dispose of the dynamically allocated backing_ba chain associated 1080 * with a vm_map_entry. 1081 * 1082 * We decrement the (possibly shared) element and kfree() on the 1083 * 1->0 transition. We only iterate to the next backing_ba when 1084 * the previous one went through a 1->0 transition. 1085 */ 1086 static void 1087 vm_map_entry_dispose_ba(vm_map_backing_t ba) 1088 { 1089 vm_map_backing_t next; 1090 long refs; 1091 1092 while (ba) { 1093 refs = atomic_fetchadd_long(&ba->refs, -1); 1094 if (refs > 1) 1095 break; 1096 KKASSERT(refs == 1); /* transitioned 1->0 */ 1097 if (ba->object) 1098 vm_object_deallocate(ba->object); 1099 next = ba->backing_ba; 1100 kfree(ba, M_MAP_BACKING); 1101 ba = next; 1102 } 1103 } 1104 1105 /* 1106 * Dispose of a vm_map_entry that is no longer being referenced. 1107 * 1108 * No requirements. 1109 */ 1110 static void 1111 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry, int *countp) 1112 { 1113 struct globaldata *gd = mycpu; 1114 1115 /* 1116 * Dispose of the base object and the backing link. 1117 */ 1118 switch(entry->maptype) { 1119 case VM_MAPTYPE_NORMAL: 1120 case VM_MAPTYPE_VPAGETABLE: 1121 case VM_MAPTYPE_SUBMAP: 1122 if (entry->ba.object) 1123 vm_object_deallocate(entry->ba.object); 1124 break; 1125 case VM_MAPTYPE_UKSMAP: 1126 /* XXX TODO */ 1127 break; 1128 default: 1129 break; 1130 } 1131 vm_map_entry_dispose_ba(entry->ba.backing_ba); 1132 1133 /* 1134 * Cleanup for safety. 1135 */ 1136 entry->ba.backing_ba = NULL; 1137 entry->ba.object = NULL; 1138 entry->ba.offset = 0; 1139 1140 ++*countp; 1141 crit_enter(); 1142 MAPENT_FREELIST(entry) = gd->gd_vme_base; 1143 gd->gd_vme_base = entry; 1144 crit_exit(); 1145 } 1146 1147 1148 /* 1149 * Insert/remove entries from maps. 1150 * 1151 * The related map must be exclusively locked. 1152 * The caller must hold map->token 1153 * No other requirements. 1154 */ 1155 static __inline void 1156 vm_map_entry_link(vm_map_t map, vm_map_entry_t entry) 1157 { 1158 ASSERT_VM_MAP_LOCKED(map); 1159 1160 map->nentries++; 1161 if (vm_map_rb_tree_RB_INSERT(&map->rb_root, entry)) 1162 panic("vm_map_entry_link: dup addr map %p ent %p", map, entry); 1163 } 1164 1165 static __inline void 1166 vm_map_entry_unlink(vm_map_t map, 1167 vm_map_entry_t entry) 1168 { 1169 ASSERT_VM_MAP_LOCKED(map); 1170 1171 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 1172 panic("vm_map_entry_unlink: attempt to mess with " 1173 "locked entry! %p", entry); 1174 } 1175 vm_map_rb_tree_RB_REMOVE(&map->rb_root, entry); 1176 map->nentries--; 1177 } 1178 1179 /* 1180 * Finds the map entry containing (or immediately preceding) the specified 1181 * address in the given map. The entry is returned in (*entry). 1182 * 1183 * The boolean result indicates whether the address is actually contained 1184 * in the map. 1185 * 1186 * The related map must be locked. 1187 * No other requirements. 1188 */ 1189 boolean_t 1190 vm_map_lookup_entry(vm_map_t map, vm_offset_t address, vm_map_entry_t *entry) 1191 { 1192 vm_map_entry_t tmp; 1193 vm_map_entry_t last; 1194 1195 ASSERT_VM_MAP_LOCKED(map); 1196 1197 /* 1198 * Locate the record from the top of the tree. 'last' tracks the 1199 * closest prior record and is returned if no match is found, which 1200 * in binary tree terms means tracking the most recent right-branch 1201 * taken. If there is no prior record, *entry is set to NULL. 1202 */ 1203 last = NULL; 1204 tmp = RB_ROOT(&map->rb_root); 1205 1206 while (tmp) { 1207 if (address >= tmp->start) { 1208 if (address < tmp->end) { 1209 *entry = tmp; 1210 return(TRUE); 1211 } 1212 last = tmp; 1213 tmp = RB_RIGHT(tmp, rb_entry); 1214 } else { 1215 tmp = RB_LEFT(tmp, rb_entry); 1216 } 1217 } 1218 *entry = last; 1219 return (FALSE); 1220 } 1221 1222 /* 1223 * Inserts the given whole VM object into the target map at the specified 1224 * address range. The object's size should match that of the address range. 1225 * 1226 * The map must be exclusively locked. 1227 * The object must be held. 1228 * The caller must have reserved sufficient vm_map_entry structures. 1229 * 1230 * If object is non-NULL, ref count must be bumped by caller prior to 1231 * making call to account for the new entry. 1232 */ 1233 int 1234 vm_map_insert(vm_map_t map, int *countp, void *map_object, void *map_aux, 1235 vm_ooffset_t offset, vm_offset_t start, vm_offset_t end, 1236 vm_maptype_t maptype, vm_subsys_t id, 1237 vm_prot_t prot, vm_prot_t max, int cow) 1238 { 1239 vm_map_entry_t new_entry; 1240 vm_map_entry_t prev_entry; 1241 vm_map_entry_t next; 1242 vm_map_entry_t temp_entry; 1243 vm_eflags_t protoeflags; 1244 vm_object_t object; 1245 int must_drop = 0; 1246 1247 if (maptype == VM_MAPTYPE_UKSMAP) 1248 object = NULL; 1249 else 1250 object = map_object; 1251 1252 ASSERT_VM_MAP_LOCKED(map); 1253 if (object) 1254 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 1255 1256 /* 1257 * Check that the start and end points are not bogus. 1258 */ 1259 if ((start < vm_map_min(map)) || (end > vm_map_max(map)) || 1260 (start >= end)) { 1261 return (KERN_INVALID_ADDRESS); 1262 } 1263 1264 /* 1265 * Find the entry prior to the proposed starting address; if it's part 1266 * of an existing entry, this range is bogus. 1267 */ 1268 if (vm_map_lookup_entry(map, start, &temp_entry)) 1269 return (KERN_NO_SPACE); 1270 prev_entry = temp_entry; 1271 1272 /* 1273 * Assert that the next entry doesn't overlap the end point. 1274 */ 1275 if (prev_entry) 1276 next = vm_map_rb_tree_RB_NEXT(prev_entry); 1277 else 1278 next = RB_MIN(vm_map_rb_tree, &map->rb_root); 1279 if (next && next->start < end) 1280 return (KERN_NO_SPACE); 1281 1282 protoeflags = 0; 1283 1284 if (cow & MAP_COPY_ON_WRITE) 1285 protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY; 1286 1287 if (cow & MAP_NOFAULT) { 1288 protoeflags |= MAP_ENTRY_NOFAULT; 1289 1290 KASSERT(object == NULL, 1291 ("vm_map_insert: paradoxical MAP_NOFAULT request")); 1292 } 1293 if (cow & MAP_DISABLE_SYNCER) 1294 protoeflags |= MAP_ENTRY_NOSYNC; 1295 if (cow & MAP_DISABLE_COREDUMP) 1296 protoeflags |= MAP_ENTRY_NOCOREDUMP; 1297 if (cow & MAP_IS_STACK) 1298 protoeflags |= MAP_ENTRY_STACK; 1299 if (cow & MAP_IS_KSTACK) 1300 protoeflags |= MAP_ENTRY_KSTACK; 1301 1302 lwkt_gettoken(&map->token); 1303 1304 if (object) { 1305 /* 1306 * When object is non-NULL, it could be shared with another 1307 * process. We have to set or clear OBJ_ONEMAPPING 1308 * appropriately. 1309 * 1310 * NOTE: This flag is only applicable to DEFAULT and SWAP 1311 * objects and will already be clear in other types 1312 * of objects, so a shared object lock is ok for 1313 * VNODE objects. 1314 */ 1315 if (object->ref_count > 1) 1316 vm_object_clear_flag(object, OBJ_ONEMAPPING); 1317 } 1318 else if (prev_entry && 1319 (prev_entry->eflags == protoeflags) && 1320 (prev_entry->end == start) && 1321 (prev_entry->wired_count == 0) && 1322 (prev_entry->id == id) && 1323 prev_entry->maptype == maptype && 1324 maptype == VM_MAPTYPE_NORMAL && 1325 prev_entry->ba.backing_ba == NULL && /* not backed */ 1326 ((prev_entry->ba.object == NULL) || 1327 vm_object_coalesce(prev_entry->ba.object, 1328 OFF_TO_IDX(prev_entry->ba.offset), 1329 (vm_size_t)(prev_entry->end - prev_entry->start), 1330 (vm_size_t)(end - prev_entry->end)))) { 1331 /* 1332 * We were able to extend the object. Determine if we 1333 * can extend the previous map entry to include the 1334 * new range as well. 1335 */ 1336 if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) && 1337 (prev_entry->protection == prot) && 1338 (prev_entry->max_protection == max)) { 1339 map->size += (end - prev_entry->end); 1340 prev_entry->end = end; 1341 vm_map_simplify_entry(map, prev_entry, countp); 1342 lwkt_reltoken(&map->token); 1343 return (KERN_SUCCESS); 1344 } 1345 1346 /* 1347 * If we can extend the object but cannot extend the 1348 * map entry, we have to create a new map entry. We 1349 * must bump the ref count on the extended object to 1350 * account for it. object may be NULL. 1351 */ 1352 object = prev_entry->ba.object; 1353 offset = prev_entry->ba.offset + 1354 (prev_entry->end - prev_entry->start); 1355 if (object) { 1356 vm_object_hold(object); 1357 vm_object_lock_swap(); /* map->token order */ 1358 vm_object_reference_locked(object); 1359 map_object = object; 1360 must_drop = 1; 1361 } 1362 } 1363 1364 /* 1365 * NOTE: if conditionals fail, object can be NULL here. This occurs 1366 * in things like the buffer map where we manage kva but do not manage 1367 * backing objects. 1368 */ 1369 1370 /* 1371 * Create a new entry 1372 */ 1373 new_entry = vm_map_entry_create(map, countp); 1374 new_entry->start = start; 1375 new_entry->end = end; 1376 new_entry->id = id; 1377 1378 new_entry->maptype = maptype; 1379 new_entry->eflags = protoeflags; 1380 new_entry->aux.master_pde = 0; /* in case size is different */ 1381 new_entry->aux.map_aux = map_aux; 1382 new_entry->ba.map_object = map_object; 1383 new_entry->ba.backing_ba = NULL; 1384 new_entry->ba.backing_count = 0; 1385 new_entry->ba.offset = offset; 1386 new_entry->ba.refs = 0; 1387 new_entry->ba.flags = 0; 1388 1389 new_entry->inheritance = VM_INHERIT_DEFAULT; 1390 new_entry->protection = prot; 1391 new_entry->max_protection = max; 1392 new_entry->wired_count = 0; 1393 1394 /* 1395 * Insert the new entry into the list 1396 */ 1397 1398 vm_map_entry_link(map, new_entry); 1399 map->size += new_entry->end - new_entry->start; 1400 1401 /* 1402 * Don't worry about updating freehint[] when inserting, allow 1403 * addresses to be lower than the actual first free spot. 1404 */ 1405 #if 0 1406 /* 1407 * Temporarily removed to avoid MAP_STACK panic, due to 1408 * MAP_STACK being a huge hack. Will be added back in 1409 * when MAP_STACK (and the user stack mapping) is fixed. 1410 */ 1411 /* 1412 * It may be possible to simplify the entry 1413 */ 1414 vm_map_simplify_entry(map, new_entry, countp); 1415 #endif 1416 1417 /* 1418 * Try to pre-populate the page table. Mappings governed by virtual 1419 * page tables cannot be prepopulated without a lot of work, so 1420 * don't try. 1421 */ 1422 if ((cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) && 1423 maptype != VM_MAPTYPE_VPAGETABLE && 1424 maptype != VM_MAPTYPE_UKSMAP) { 1425 int dorelock = 0; 1426 if (vm_map_relock_enable && (cow & MAP_PREFAULT_RELOCK)) { 1427 dorelock = 1; 1428 vm_object_lock_swap(); 1429 vm_object_drop(object); 1430 } 1431 pmap_object_init_pt(map->pmap, start, prot, 1432 object, OFF_TO_IDX(offset), end - start, 1433 cow & MAP_PREFAULT_PARTIAL); 1434 if (dorelock) { 1435 vm_object_hold(object); 1436 vm_object_lock_swap(); 1437 } 1438 } 1439 lwkt_reltoken(&map->token); 1440 if (must_drop) 1441 vm_object_drop(object); 1442 1443 return (KERN_SUCCESS); 1444 } 1445 1446 /* 1447 * Find sufficient space for `length' bytes in the given map, starting at 1448 * `start'. Returns 0 on success, 1 on no space. 1449 * 1450 * This function will returned an arbitrarily aligned pointer. If no 1451 * particular alignment is required you should pass align as 1. Note that 1452 * the map may return PAGE_SIZE aligned pointers if all the lengths used in 1453 * the map are a multiple of PAGE_SIZE, even if you pass a smaller align 1454 * argument. 1455 * 1456 * 'align' should be a power of 2 but is not required to be. 1457 * 1458 * The map must be exclusively locked. 1459 * No other requirements. 1460 */ 1461 int 1462 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length, 1463 vm_size_t align, int flags, vm_offset_t *addr) 1464 { 1465 vm_map_entry_t entry; 1466 vm_map_entry_t tmp; 1467 vm_offset_t hole_start; 1468 vm_offset_t end; 1469 vm_offset_t align_mask; 1470 1471 if (start < vm_map_min(map)) 1472 start = vm_map_min(map); 1473 if (start > vm_map_max(map)) 1474 return (1); 1475 1476 /* 1477 * If the alignment is not a power of 2 we will have to use 1478 * a mod/division, set align_mask to a special value. 1479 */ 1480 if ((align | (align - 1)) + 1 != (align << 1)) 1481 align_mask = (vm_offset_t)-1; 1482 else 1483 align_mask = align - 1; 1484 1485 /* 1486 * Use freehint to adjust the start point, hopefully reducing 1487 * the iteration to O(1). 1488 */ 1489 hole_start = vm_map_freehint_find(map, length, align); 1490 if (start < hole_start) 1491 start = hole_start; 1492 if (vm_map_lookup_entry(map, start, &tmp)) 1493 start = tmp->end; 1494 entry = tmp; /* may be NULL */ 1495 1496 /* 1497 * Look through the rest of the map, trying to fit a new region in the 1498 * gap between existing regions, or after the very last region. 1499 */ 1500 for (;;) { 1501 /* 1502 * Adjust the proposed start by the requested alignment, 1503 * be sure that we didn't wrap the address. 1504 */ 1505 if (align_mask == (vm_offset_t)-1) 1506 end = roundup(start, align); 1507 else 1508 end = (start + align_mask) & ~align_mask; 1509 if (end < start) 1510 return (1); 1511 start = end; 1512 1513 /* 1514 * Find the end of the proposed new region. Be sure we didn't 1515 * go beyond the end of the map, or wrap around the address. 1516 * Then check to see if this is the last entry or if the 1517 * proposed end fits in the gap between this and the next 1518 * entry. 1519 */ 1520 end = start + length; 1521 if (end > vm_map_max(map) || end < start) 1522 return (1); 1523 1524 /* 1525 * Locate the next entry, we can stop if this is the 1526 * last entry (we know we are in-bounds so that would 1527 * be a sucess). 1528 */ 1529 if (entry) 1530 entry = vm_map_rb_tree_RB_NEXT(entry); 1531 else 1532 entry = RB_MIN(vm_map_rb_tree, &map->rb_root); 1533 if (entry == NULL) 1534 break; 1535 1536 /* 1537 * Determine if the proposed area would overlap the 1538 * next entry. 1539 * 1540 * When matching against a STACK entry, only allow the 1541 * memory map to intrude on the ungrown portion of the 1542 * STACK entry when MAP_TRYFIXED is set. 1543 */ 1544 if (entry->start >= end) { 1545 if ((entry->eflags & MAP_ENTRY_STACK) == 0) 1546 break; 1547 if (flags & MAP_TRYFIXED) 1548 break; 1549 if (entry->start - entry->aux.avail_ssize >= end) 1550 break; 1551 } 1552 start = entry->end; 1553 } 1554 1555 /* 1556 * Update the freehint 1557 */ 1558 vm_map_freehint_update(map, start, length, align); 1559 1560 /* 1561 * Grow the kernel_map if necessary. pmap_growkernel() will panic 1562 * if it fails. The kernel_map is locked and nothing can steal 1563 * our address space if pmap_growkernel() blocks. 1564 * 1565 * NOTE: This may be unconditionally called for kldload areas on 1566 * x86_64 because these do not bump kernel_vm_end (which would 1567 * fill 128G worth of page tables!). Therefore we must not 1568 * retry. 1569 */ 1570 if (map == &kernel_map) { 1571 vm_offset_t kstop; 1572 1573 kstop = round_page(start + length); 1574 if (kstop > kernel_vm_end) 1575 pmap_growkernel(start, kstop); 1576 } 1577 *addr = start; 1578 return (0); 1579 } 1580 1581 /* 1582 * vm_map_find finds an unallocated region in the target address map with 1583 * the given length and allocates it. The search is defined to be first-fit 1584 * from the specified address; the region found is returned in the same 1585 * parameter. 1586 * 1587 * If object is non-NULL, ref count must be bumped by caller 1588 * prior to making call to account for the new entry. 1589 * 1590 * No requirements. This function will lock the map temporarily. 1591 */ 1592 int 1593 vm_map_find(vm_map_t map, void *map_object, void *map_aux, 1594 vm_ooffset_t offset, vm_offset_t *addr, 1595 vm_size_t length, vm_size_t align, boolean_t fitit, 1596 vm_maptype_t maptype, vm_subsys_t id, 1597 vm_prot_t prot, vm_prot_t max, int cow) 1598 { 1599 vm_offset_t start; 1600 vm_object_t object; 1601 int result; 1602 int count; 1603 1604 if (maptype == VM_MAPTYPE_UKSMAP) 1605 object = NULL; 1606 else 1607 object = map_object; 1608 1609 start = *addr; 1610 1611 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 1612 vm_map_lock(map); 1613 if (object) 1614 vm_object_hold_shared(object); 1615 if (fitit) { 1616 if (vm_map_findspace(map, start, length, align, 0, addr)) { 1617 if (object) 1618 vm_object_drop(object); 1619 vm_map_unlock(map); 1620 vm_map_entry_release(count); 1621 return (KERN_NO_SPACE); 1622 } 1623 start = *addr; 1624 } 1625 result = vm_map_insert(map, &count, map_object, map_aux, 1626 offset, start, start + length, 1627 maptype, id, prot, max, cow); 1628 if (object) 1629 vm_object_drop(object); 1630 vm_map_unlock(map); 1631 vm_map_entry_release(count); 1632 1633 return (result); 1634 } 1635 1636 /* 1637 * Simplify the given map entry by merging with either neighbor. This 1638 * routine also has the ability to merge with both neighbors. 1639 * 1640 * This routine guarentees that the passed entry remains valid (though 1641 * possibly extended). When merging, this routine may delete one or 1642 * both neighbors. No action is taken on entries which have their 1643 * in-transition flag set. 1644 * 1645 * The map must be exclusively locked. 1646 */ 1647 void 1648 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry, int *countp) 1649 { 1650 vm_map_entry_t next, prev; 1651 vm_size_t prevsize, esize; 1652 1653 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 1654 ++mycpu->gd_cnt.v_intrans_coll; 1655 return; 1656 } 1657 1658 if (entry->maptype == VM_MAPTYPE_SUBMAP) 1659 return; 1660 if (entry->maptype == VM_MAPTYPE_UKSMAP) 1661 return; 1662 1663 prev = vm_map_rb_tree_RB_PREV(entry); 1664 if (prev) { 1665 prevsize = prev->end - prev->start; 1666 if ( (prev->end == entry->start) && 1667 (prev->maptype == entry->maptype) && 1668 (prev->ba.object == entry->ba.object) && 1669 (prev->ba.backing_ba == entry->ba.backing_ba) && 1670 (!prev->ba.object || 1671 (prev->ba.offset + prevsize == entry->ba.offset)) && 1672 (prev->eflags == entry->eflags) && 1673 (prev->protection == entry->protection) && 1674 (prev->max_protection == entry->max_protection) && 1675 (prev->inheritance == entry->inheritance) && 1676 (prev->id == entry->id) && 1677 (prev->wired_count == entry->wired_count)) { 1678 vm_map_entry_unlink(map, prev); 1679 entry->start = prev->start; 1680 entry->ba.offset = prev->ba.offset; 1681 vm_map_entry_dispose(map, prev, countp); 1682 } 1683 } 1684 1685 next = vm_map_rb_tree_RB_NEXT(entry); 1686 if (next) { 1687 esize = entry->end - entry->start; 1688 if ((entry->end == next->start) && 1689 (next->maptype == entry->maptype) && 1690 (next->ba.object == entry->ba.object) && 1691 (prev->ba.backing_ba == entry->ba.backing_ba) && 1692 (!entry->ba.object || 1693 (entry->ba.offset + esize == next->ba.offset)) && 1694 (next->eflags == entry->eflags) && 1695 (next->protection == entry->protection) && 1696 (next->max_protection == entry->max_protection) && 1697 (next->inheritance == entry->inheritance) && 1698 (next->id == entry->id) && 1699 (next->wired_count == entry->wired_count)) { 1700 vm_map_entry_unlink(map, next); 1701 entry->end = next->end; 1702 vm_map_entry_dispose(map, next, countp); 1703 } 1704 } 1705 } 1706 1707 /* 1708 * Asserts that the given entry begins at or after the specified address. 1709 * If necessary, it splits the entry into two. 1710 */ 1711 #define vm_map_clip_start(map, entry, startaddr, countp) \ 1712 { \ 1713 if (startaddr > entry->start) \ 1714 _vm_map_clip_start(map, entry, startaddr, countp); \ 1715 } 1716 1717 /* 1718 * This routine is called only when it is known that the entry must be split. 1719 * 1720 * The map must be exclusively locked. 1721 */ 1722 static void 1723 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start, 1724 int *countp) 1725 { 1726 vm_map_entry_t new_entry; 1727 1728 /* 1729 * Split off the front portion -- note that we must insert the new 1730 * entry BEFORE this one, so that this entry has the specified 1731 * starting address. 1732 */ 1733 1734 vm_map_simplify_entry(map, entry, countp); 1735 1736 /* 1737 * If there is no object backing this entry, we might as well create 1738 * one now. If we defer it, an object can get created after the map 1739 * is clipped, and individual objects will be created for the split-up 1740 * map. This is a bit of a hack, but is also about the best place to 1741 * put this improvement. 1742 */ 1743 if (entry->ba.object == NULL && !map->system_map && 1744 VM_MAP_ENTRY_WITHIN_PARTITION(entry)) { 1745 vm_map_entry_allocate_object(entry); 1746 } 1747 1748 new_entry = vm_map_entry_create(map, countp); 1749 *new_entry = *entry; 1750 1751 new_entry->end = start; 1752 entry->ba.offset += (start - entry->start); 1753 entry->start = start; 1754 if (new_entry->ba.backing_ba) 1755 atomic_add_long(&new_entry->ba.backing_ba->refs, 1); 1756 1757 vm_map_entry_link(map, new_entry); 1758 1759 switch(entry->maptype) { 1760 case VM_MAPTYPE_NORMAL: 1761 case VM_MAPTYPE_VPAGETABLE: 1762 if (new_entry->ba.object) { 1763 vm_object_hold(new_entry->ba.object); 1764 vm_object_reference_locked(new_entry->ba.object); 1765 vm_object_drop(new_entry->ba.object); 1766 } 1767 break; 1768 default: 1769 break; 1770 } 1771 } 1772 1773 /* 1774 * Asserts that the given entry ends at or before the specified address. 1775 * If necessary, it splits the entry into two. 1776 * 1777 * The map must be exclusively locked. 1778 */ 1779 #define vm_map_clip_end(map, entry, endaddr, countp) \ 1780 { \ 1781 if (endaddr < entry->end) \ 1782 _vm_map_clip_end(map, entry, endaddr, countp); \ 1783 } 1784 1785 /* 1786 * This routine is called only when it is known that the entry must be split. 1787 * 1788 * The map must be exclusively locked. 1789 */ 1790 static void 1791 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end, 1792 int *countp) 1793 { 1794 vm_map_entry_t new_entry; 1795 1796 /* 1797 * If there is no object backing this entry, we might as well create 1798 * one now. If we defer it, an object can get created after the map 1799 * is clipped, and individual objects will be created for the split-up 1800 * map. This is a bit of a hack, but is also about the best place to 1801 * put this improvement. 1802 */ 1803 1804 if (entry->ba.object == NULL && !map->system_map && 1805 VM_MAP_ENTRY_WITHIN_PARTITION(entry)) { 1806 vm_map_entry_allocate_object(entry); 1807 } 1808 1809 /* 1810 * Create a new entry and insert it AFTER the specified entry 1811 */ 1812 new_entry = vm_map_entry_create(map, countp); 1813 *new_entry = *entry; 1814 1815 new_entry->start = entry->end = end; 1816 new_entry->ba.offset += (end - entry->start); 1817 if (new_entry->ba.backing_ba) 1818 atomic_add_long(&new_entry->ba.backing_ba->refs, 1); 1819 1820 vm_map_entry_link(map, new_entry); 1821 1822 switch(entry->maptype) { 1823 case VM_MAPTYPE_NORMAL: 1824 case VM_MAPTYPE_VPAGETABLE: 1825 if (new_entry->ba.object) { 1826 vm_object_hold(new_entry->ba.object); 1827 vm_object_reference_locked(new_entry->ba.object); 1828 vm_object_drop(new_entry->ba.object); 1829 } 1830 break; 1831 default: 1832 break; 1833 } 1834 } 1835 1836 /* 1837 * Asserts that the starting and ending region addresses fall within the 1838 * valid range for the map. 1839 */ 1840 #define VM_MAP_RANGE_CHECK(map, start, end) \ 1841 { \ 1842 if (start < vm_map_min(map)) \ 1843 start = vm_map_min(map); \ 1844 if (end > vm_map_max(map)) \ 1845 end = vm_map_max(map); \ 1846 if (start > end) \ 1847 start = end; \ 1848 } 1849 1850 /* 1851 * Used to block when an in-transition collison occurs. The map 1852 * is unlocked for the sleep and relocked before the return. 1853 */ 1854 void 1855 vm_map_transition_wait(vm_map_t map, int relock) 1856 { 1857 tsleep_interlock(map, 0); 1858 vm_map_unlock(map); 1859 tsleep(map, PINTERLOCKED, "vment", 0); 1860 if (relock) 1861 vm_map_lock(map); 1862 } 1863 1864 /* 1865 * When we do blocking operations with the map lock held it is 1866 * possible that a clip might have occured on our in-transit entry, 1867 * requiring an adjustment to the entry in our loop. These macros 1868 * help the pageable and clip_range code deal with the case. The 1869 * conditional costs virtually nothing if no clipping has occured. 1870 */ 1871 1872 #define CLIP_CHECK_BACK(entry, save_start) \ 1873 do { \ 1874 while (entry->start != save_start) { \ 1875 entry = vm_map_rb_tree_RB_PREV(entry); \ 1876 KASSERT(entry, ("bad entry clip")); \ 1877 } \ 1878 } while(0) 1879 1880 #define CLIP_CHECK_FWD(entry, save_end) \ 1881 do { \ 1882 while (entry->end != save_end) { \ 1883 entry = vm_map_rb_tree_RB_NEXT(entry); \ 1884 KASSERT(entry, ("bad entry clip")); \ 1885 } \ 1886 } while(0) 1887 1888 1889 /* 1890 * Clip the specified range and return the base entry. The 1891 * range may cover several entries starting at the returned base 1892 * and the first and last entry in the covering sequence will be 1893 * properly clipped to the requested start and end address. 1894 * 1895 * If no holes are allowed you should pass the MAP_CLIP_NO_HOLES 1896 * flag. 1897 * 1898 * The MAP_ENTRY_IN_TRANSITION flag will be set for the entries 1899 * covered by the requested range. 1900 * 1901 * The map must be exclusively locked on entry and will remain locked 1902 * on return. If no range exists or the range contains holes and you 1903 * specified that no holes were allowed, NULL will be returned. This 1904 * routine may temporarily unlock the map in order avoid a deadlock when 1905 * sleeping. 1906 */ 1907 static 1908 vm_map_entry_t 1909 vm_map_clip_range(vm_map_t map, vm_offset_t start, vm_offset_t end, 1910 int *countp, int flags) 1911 { 1912 vm_map_entry_t start_entry; 1913 vm_map_entry_t entry; 1914 vm_map_entry_t next; 1915 1916 /* 1917 * Locate the entry and effect initial clipping. The in-transition 1918 * case does not occur very often so do not try to optimize it. 1919 */ 1920 again: 1921 if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) 1922 return (NULL); 1923 entry = start_entry; 1924 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 1925 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 1926 ++mycpu->gd_cnt.v_intrans_coll; 1927 ++mycpu->gd_cnt.v_intrans_wait; 1928 vm_map_transition_wait(map, 1); 1929 /* 1930 * entry and/or start_entry may have been clipped while 1931 * we slept, or may have gone away entirely. We have 1932 * to restart from the lookup. 1933 */ 1934 goto again; 1935 } 1936 1937 /* 1938 * Since we hold an exclusive map lock we do not have to restart 1939 * after clipping, even though clipping may block in zalloc. 1940 */ 1941 vm_map_clip_start(map, entry, start, countp); 1942 vm_map_clip_end(map, entry, end, countp); 1943 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 1944 1945 /* 1946 * Scan entries covered by the range. When working on the next 1947 * entry a restart need only re-loop on the current entry which 1948 * we have already locked, since 'next' may have changed. Also, 1949 * even though entry is safe, it may have been clipped so we 1950 * have to iterate forwards through the clip after sleeping. 1951 */ 1952 for (;;) { 1953 next = vm_map_rb_tree_RB_NEXT(entry); 1954 if (next == NULL || next->start >= end) 1955 break; 1956 if (flags & MAP_CLIP_NO_HOLES) { 1957 if (next->start > entry->end) { 1958 vm_map_unclip_range(map, start_entry, 1959 start, entry->end, countp, flags); 1960 return(NULL); 1961 } 1962 } 1963 1964 if (next->eflags & MAP_ENTRY_IN_TRANSITION) { 1965 vm_offset_t save_end = entry->end; 1966 next->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 1967 ++mycpu->gd_cnt.v_intrans_coll; 1968 ++mycpu->gd_cnt.v_intrans_wait; 1969 vm_map_transition_wait(map, 1); 1970 1971 /* 1972 * clips might have occured while we blocked. 1973 */ 1974 CLIP_CHECK_FWD(entry, save_end); 1975 CLIP_CHECK_BACK(start_entry, start); 1976 continue; 1977 } 1978 1979 /* 1980 * No restart necessary even though clip_end may block, we 1981 * are holding the map lock. 1982 */ 1983 vm_map_clip_end(map, next, end, countp); 1984 next->eflags |= MAP_ENTRY_IN_TRANSITION; 1985 entry = next; 1986 } 1987 if (flags & MAP_CLIP_NO_HOLES) { 1988 if (entry->end != end) { 1989 vm_map_unclip_range(map, start_entry, 1990 start, entry->end, countp, flags); 1991 return(NULL); 1992 } 1993 } 1994 return(start_entry); 1995 } 1996 1997 /* 1998 * Undo the effect of vm_map_clip_range(). You should pass the same 1999 * flags and the same range that you passed to vm_map_clip_range(). 2000 * This code will clear the in-transition flag on the entries and 2001 * wake up anyone waiting. This code will also simplify the sequence 2002 * and attempt to merge it with entries before and after the sequence. 2003 * 2004 * The map must be locked on entry and will remain locked on return. 2005 * 2006 * Note that you should also pass the start_entry returned by 2007 * vm_map_clip_range(). However, if you block between the two calls 2008 * with the map unlocked please be aware that the start_entry may 2009 * have been clipped and you may need to scan it backwards to find 2010 * the entry corresponding with the original start address. You are 2011 * responsible for this, vm_map_unclip_range() expects the correct 2012 * start_entry to be passed to it and will KASSERT otherwise. 2013 */ 2014 static 2015 void 2016 vm_map_unclip_range(vm_map_t map, vm_map_entry_t start_entry, 2017 vm_offset_t start, vm_offset_t end, 2018 int *countp, int flags) 2019 { 2020 vm_map_entry_t entry; 2021 2022 entry = start_entry; 2023 2024 KASSERT(entry->start == start, ("unclip_range: illegal base entry")); 2025 while (entry && entry->start < end) { 2026 KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION, 2027 ("in-transition flag not set during unclip on: %p", 2028 entry)); 2029 KASSERT(entry->end <= end, 2030 ("unclip_range: tail wasn't clipped")); 2031 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; 2032 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 2033 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 2034 wakeup(map); 2035 } 2036 entry = vm_map_rb_tree_RB_NEXT(entry); 2037 } 2038 2039 /* 2040 * Simplification does not block so there is no restart case. 2041 */ 2042 entry = start_entry; 2043 while (entry && entry->start < end) { 2044 vm_map_simplify_entry(map, entry, countp); 2045 entry = vm_map_rb_tree_RB_NEXT(entry); 2046 } 2047 } 2048 2049 /* 2050 * Mark the given range as handled by a subordinate map. 2051 * 2052 * This range must have been created with vm_map_find(), and no other 2053 * operations may have been performed on this range prior to calling 2054 * vm_map_submap(). 2055 * 2056 * Submappings cannot be removed. 2057 * 2058 * No requirements. 2059 */ 2060 int 2061 vm_map_submap(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_map_t submap) 2062 { 2063 vm_map_entry_t entry; 2064 int result = KERN_INVALID_ARGUMENT; 2065 int count; 2066 2067 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 2068 vm_map_lock(map); 2069 2070 VM_MAP_RANGE_CHECK(map, start, end); 2071 2072 if (vm_map_lookup_entry(map, start, &entry)) { 2073 vm_map_clip_start(map, entry, start, &count); 2074 } else if (entry) { 2075 entry = vm_map_rb_tree_RB_NEXT(entry); 2076 } else { 2077 entry = RB_MIN(vm_map_rb_tree, &map->rb_root); 2078 } 2079 2080 vm_map_clip_end(map, entry, end, &count); 2081 2082 if ((entry->start == start) && (entry->end == end) && 2083 ((entry->eflags & MAP_ENTRY_COW) == 0) && 2084 (entry->ba.object == NULL)) { 2085 entry->ba.sub_map = submap; 2086 entry->maptype = VM_MAPTYPE_SUBMAP; 2087 result = KERN_SUCCESS; 2088 } 2089 vm_map_unlock(map); 2090 vm_map_entry_release(count); 2091 2092 return (result); 2093 } 2094 2095 /* 2096 * Sets the protection of the specified address region in the target map. 2097 * If "set_max" is specified, the maximum protection is to be set; 2098 * otherwise, only the current protection is affected. 2099 * 2100 * The protection is not applicable to submaps, but is applicable to normal 2101 * maps and maps governed by virtual page tables. For example, when operating 2102 * on a virtual page table our protection basically controls how COW occurs 2103 * on the backing object, whereas the virtual page table abstraction itself 2104 * is an abstraction for userland. 2105 * 2106 * No requirements. 2107 */ 2108 int 2109 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, 2110 vm_prot_t new_prot, boolean_t set_max) 2111 { 2112 vm_map_entry_t current; 2113 vm_map_entry_t entry; 2114 int count; 2115 2116 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 2117 vm_map_lock(map); 2118 2119 VM_MAP_RANGE_CHECK(map, start, end); 2120 2121 if (vm_map_lookup_entry(map, start, &entry)) { 2122 vm_map_clip_start(map, entry, start, &count); 2123 } else if (entry) { 2124 entry = vm_map_rb_tree_RB_NEXT(entry); 2125 } else { 2126 entry = RB_MIN(vm_map_rb_tree, &map->rb_root); 2127 } 2128 2129 /* 2130 * Make a first pass to check for protection violations. 2131 */ 2132 current = entry; 2133 while (current && current->start < end) { 2134 if (current->maptype == VM_MAPTYPE_SUBMAP) { 2135 vm_map_unlock(map); 2136 vm_map_entry_release(count); 2137 return (KERN_INVALID_ARGUMENT); 2138 } 2139 if ((new_prot & current->max_protection) != new_prot) { 2140 vm_map_unlock(map); 2141 vm_map_entry_release(count); 2142 return (KERN_PROTECTION_FAILURE); 2143 } 2144 2145 /* 2146 * When making a SHARED+RW file mmap writable, update 2147 * v_lastwrite_ts. 2148 */ 2149 if (new_prot & PROT_WRITE && 2150 (current->eflags & MAP_ENTRY_NEEDS_COPY) == 0 && 2151 (current->maptype == VM_MAPTYPE_NORMAL || 2152 current->maptype == VM_MAPTYPE_VPAGETABLE) && 2153 current->ba.object && 2154 current->ba.object->type == OBJT_VNODE) { 2155 struct vnode *vp; 2156 2157 vp = current->ba.object->handle; 2158 if (vp && vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT) == 0) { 2159 vfs_timestamp(&vp->v_lastwrite_ts); 2160 vsetflags(vp, VLASTWRITETS); 2161 vn_unlock(vp); 2162 } 2163 } 2164 current = vm_map_rb_tree_RB_NEXT(current); 2165 } 2166 2167 /* 2168 * Go back and fix up protections. [Note that clipping is not 2169 * necessary the second time.] 2170 */ 2171 current = entry; 2172 2173 while (current && current->start < end) { 2174 vm_prot_t old_prot; 2175 2176 vm_map_clip_end(map, current, end, &count); 2177 2178 old_prot = current->protection; 2179 if (set_max) { 2180 current->max_protection = new_prot; 2181 current->protection = new_prot & old_prot; 2182 } else { 2183 current->protection = new_prot; 2184 } 2185 2186 /* 2187 * Update physical map if necessary. Worry about copy-on-write 2188 * here -- CHECK THIS XXX 2189 */ 2190 if (current->protection != old_prot) { 2191 #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ 2192 VM_PROT_ALL) 2193 2194 pmap_protect(map->pmap, current->start, 2195 current->end, 2196 current->protection & MASK(current)); 2197 #undef MASK 2198 } 2199 2200 vm_map_simplify_entry(map, current, &count); 2201 2202 current = vm_map_rb_tree_RB_NEXT(current); 2203 } 2204 vm_map_unlock(map); 2205 vm_map_entry_release(count); 2206 return (KERN_SUCCESS); 2207 } 2208 2209 /* 2210 * This routine traverses a processes map handling the madvise 2211 * system call. Advisories are classified as either those effecting 2212 * the vm_map_entry structure, or those effecting the underlying 2213 * objects. 2214 * 2215 * The <value> argument is used for extended madvise calls. 2216 * 2217 * No requirements. 2218 */ 2219 int 2220 vm_map_madvise(vm_map_t map, vm_offset_t start, vm_offset_t end, 2221 int behav, off_t value) 2222 { 2223 vm_map_entry_t current, entry; 2224 int modify_map = 0; 2225 int error = 0; 2226 int count; 2227 2228 /* 2229 * Some madvise calls directly modify the vm_map_entry, in which case 2230 * we need to use an exclusive lock on the map and we need to perform 2231 * various clipping operations. Otherwise we only need a read-lock 2232 * on the map. 2233 */ 2234 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 2235 2236 switch(behav) { 2237 case MADV_NORMAL: 2238 case MADV_SEQUENTIAL: 2239 case MADV_RANDOM: 2240 case MADV_NOSYNC: 2241 case MADV_AUTOSYNC: 2242 case MADV_NOCORE: 2243 case MADV_CORE: 2244 case MADV_SETMAP: 2245 modify_map = 1; 2246 vm_map_lock(map); 2247 break; 2248 case MADV_INVAL: 2249 case MADV_WILLNEED: 2250 case MADV_DONTNEED: 2251 case MADV_FREE: 2252 vm_map_lock_read(map); 2253 break; 2254 default: 2255 vm_map_entry_release(count); 2256 return (EINVAL); 2257 } 2258 2259 /* 2260 * Locate starting entry and clip if necessary. 2261 */ 2262 2263 VM_MAP_RANGE_CHECK(map, start, end); 2264 2265 if (vm_map_lookup_entry(map, start, &entry)) { 2266 if (modify_map) 2267 vm_map_clip_start(map, entry, start, &count); 2268 } else if (entry) { 2269 entry = vm_map_rb_tree_RB_NEXT(entry); 2270 } else { 2271 entry = RB_MIN(vm_map_rb_tree, &map->rb_root); 2272 } 2273 2274 if (modify_map) { 2275 /* 2276 * madvise behaviors that are implemented in the vm_map_entry. 2277 * 2278 * We clip the vm_map_entry so that behavioral changes are 2279 * limited to the specified address range. 2280 */ 2281 for (current = entry; 2282 current && current->start < end; 2283 current = vm_map_rb_tree_RB_NEXT(current)) { 2284 /* 2285 * Ignore submaps 2286 */ 2287 if (current->maptype == VM_MAPTYPE_SUBMAP) 2288 continue; 2289 2290 vm_map_clip_end(map, current, end, &count); 2291 2292 switch (behav) { 2293 case MADV_NORMAL: 2294 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL); 2295 break; 2296 case MADV_SEQUENTIAL: 2297 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL); 2298 break; 2299 case MADV_RANDOM: 2300 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM); 2301 break; 2302 case MADV_NOSYNC: 2303 current->eflags |= MAP_ENTRY_NOSYNC; 2304 break; 2305 case MADV_AUTOSYNC: 2306 current->eflags &= ~MAP_ENTRY_NOSYNC; 2307 break; 2308 case MADV_NOCORE: 2309 current->eflags |= MAP_ENTRY_NOCOREDUMP; 2310 break; 2311 case MADV_CORE: 2312 current->eflags &= ~MAP_ENTRY_NOCOREDUMP; 2313 break; 2314 case MADV_SETMAP: 2315 /* 2316 * Set the page directory page for a map 2317 * governed by a virtual page table. Mark 2318 * the entry as being governed by a virtual 2319 * page table if it is not. 2320 * 2321 * XXX the page directory page is stored 2322 * in the avail_ssize field if the map_entry. 2323 * 2324 * XXX the map simplification code does not 2325 * compare this field so weird things may 2326 * happen if you do not apply this function 2327 * to the entire mapping governed by the 2328 * virtual page table. 2329 */ 2330 if (current->maptype != VM_MAPTYPE_VPAGETABLE) { 2331 error = EINVAL; 2332 break; 2333 } 2334 current->aux.master_pde = value; 2335 pmap_remove(map->pmap, 2336 current->start, current->end); 2337 break; 2338 case MADV_INVAL: 2339 /* 2340 * Invalidate the related pmap entries, used 2341 * to flush portions of the real kernel's 2342 * pmap when the caller has removed or 2343 * modified existing mappings in a virtual 2344 * page table. 2345 * 2346 * (exclusive locked map version does not 2347 * need the range interlock). 2348 */ 2349 pmap_remove(map->pmap, 2350 current->start, current->end); 2351 break; 2352 default: 2353 error = EINVAL; 2354 break; 2355 } 2356 vm_map_simplify_entry(map, current, &count); 2357 } 2358 vm_map_unlock(map); 2359 } else { 2360 vm_pindex_t pindex; 2361 vm_pindex_t delta; 2362 2363 /* 2364 * madvise behaviors that are implemented in the underlying 2365 * vm_object. 2366 * 2367 * Since we don't clip the vm_map_entry, we have to clip 2368 * the vm_object pindex and count. 2369 * 2370 * NOTE! These functions are only supported on normal maps, 2371 * except MADV_INVAL which is also supported on 2372 * virtual page tables. 2373 * 2374 * NOTE! These functions only apply to the top-most object. 2375 * It is not applicable to backing objects. 2376 */ 2377 for (current = entry; 2378 current && current->start < end; 2379 current = vm_map_rb_tree_RB_NEXT(current)) { 2380 vm_offset_t useStart; 2381 2382 if (current->maptype != VM_MAPTYPE_NORMAL && 2383 (current->maptype != VM_MAPTYPE_VPAGETABLE || 2384 behav != MADV_INVAL)) { 2385 continue; 2386 } 2387 2388 pindex = OFF_TO_IDX(current->ba.offset); 2389 delta = atop(current->end - current->start); 2390 useStart = current->start; 2391 2392 if (current->start < start) { 2393 pindex += atop(start - current->start); 2394 delta -= atop(start - current->start); 2395 useStart = start; 2396 } 2397 if (current->end > end) 2398 delta -= atop(current->end - end); 2399 2400 if ((vm_spindex_t)delta <= 0) 2401 continue; 2402 2403 if (behav == MADV_INVAL) { 2404 /* 2405 * Invalidate the related pmap entries, used 2406 * to flush portions of the real kernel's 2407 * pmap when the caller has removed or 2408 * modified existing mappings in a virtual 2409 * page table. 2410 * 2411 * (shared locked map version needs the 2412 * interlock, see vm_fault()). 2413 */ 2414 struct vm_map_ilock ilock; 2415 2416 KASSERT(useStart >= VM_MIN_USER_ADDRESS && 2417 useStart + ptoa(delta) <= 2418 VM_MAX_USER_ADDRESS, 2419 ("Bad range %016jx-%016jx (%016jx)", 2420 useStart, useStart + ptoa(delta), 2421 delta)); 2422 vm_map_interlock(map, &ilock, 2423 useStart, 2424 useStart + ptoa(delta)); 2425 pmap_remove(map->pmap, 2426 useStart, 2427 useStart + ptoa(delta)); 2428 vm_map_deinterlock(map, &ilock); 2429 } else { 2430 vm_object_madvise(current->ba.object, 2431 pindex, delta, behav); 2432 } 2433 2434 /* 2435 * Try to populate the page table. Mappings governed 2436 * by virtual page tables cannot be pre-populated 2437 * without a lot of work so don't try. 2438 */ 2439 if (behav == MADV_WILLNEED && 2440 current->maptype != VM_MAPTYPE_VPAGETABLE) { 2441 pmap_object_init_pt( 2442 map->pmap, 2443 useStart, 2444 current->protection, 2445 current->ba.object, 2446 pindex, 2447 (count << PAGE_SHIFT), 2448 MAP_PREFAULT_MADVISE 2449 ); 2450 } 2451 } 2452 vm_map_unlock_read(map); 2453 } 2454 vm_map_entry_release(count); 2455 return(error); 2456 } 2457 2458 2459 /* 2460 * Sets the inheritance of the specified address range in the target map. 2461 * Inheritance affects how the map will be shared with child maps at the 2462 * time of vm_map_fork. 2463 */ 2464 int 2465 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, 2466 vm_inherit_t new_inheritance) 2467 { 2468 vm_map_entry_t entry; 2469 vm_map_entry_t temp_entry; 2470 int count; 2471 2472 switch (new_inheritance) { 2473 case VM_INHERIT_NONE: 2474 case VM_INHERIT_COPY: 2475 case VM_INHERIT_SHARE: 2476 break; 2477 default: 2478 return (KERN_INVALID_ARGUMENT); 2479 } 2480 2481 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 2482 vm_map_lock(map); 2483 2484 VM_MAP_RANGE_CHECK(map, start, end); 2485 2486 if (vm_map_lookup_entry(map, start, &temp_entry)) { 2487 entry = temp_entry; 2488 vm_map_clip_start(map, entry, start, &count); 2489 } else if (temp_entry) { 2490 entry = vm_map_rb_tree_RB_NEXT(temp_entry); 2491 } else { 2492 entry = RB_MIN(vm_map_rb_tree, &map->rb_root); 2493 } 2494 2495 while (entry && entry->start < end) { 2496 vm_map_clip_end(map, entry, end, &count); 2497 2498 entry->inheritance = new_inheritance; 2499 2500 vm_map_simplify_entry(map, entry, &count); 2501 2502 entry = vm_map_rb_tree_RB_NEXT(entry); 2503 } 2504 vm_map_unlock(map); 2505 vm_map_entry_release(count); 2506 return (KERN_SUCCESS); 2507 } 2508 2509 /* 2510 * Implement the semantics of mlock 2511 */ 2512 int 2513 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t real_end, 2514 boolean_t new_pageable) 2515 { 2516 vm_map_entry_t entry; 2517 vm_map_entry_t start_entry; 2518 vm_offset_t end; 2519 int rv = KERN_SUCCESS; 2520 int count; 2521 2522 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 2523 vm_map_lock(map); 2524 VM_MAP_RANGE_CHECK(map, start, real_end); 2525 end = real_end; 2526 2527 start_entry = vm_map_clip_range(map, start, end, &count, 2528 MAP_CLIP_NO_HOLES); 2529 if (start_entry == NULL) { 2530 vm_map_unlock(map); 2531 vm_map_entry_release(count); 2532 return (KERN_INVALID_ADDRESS); 2533 } 2534 2535 if (new_pageable == 0) { 2536 entry = start_entry; 2537 while (entry && entry->start < end) { 2538 vm_offset_t save_start; 2539 vm_offset_t save_end; 2540 2541 /* 2542 * Already user wired or hard wired (trivial cases) 2543 */ 2544 if (entry->eflags & MAP_ENTRY_USER_WIRED) { 2545 entry = vm_map_rb_tree_RB_NEXT(entry); 2546 continue; 2547 } 2548 if (entry->wired_count != 0) { 2549 entry->wired_count++; 2550 entry->eflags |= MAP_ENTRY_USER_WIRED; 2551 entry = vm_map_rb_tree_RB_NEXT(entry); 2552 continue; 2553 } 2554 2555 /* 2556 * A new wiring requires instantiation of appropriate 2557 * management structures and the faulting in of the 2558 * page. 2559 */ 2560 if (entry->maptype == VM_MAPTYPE_NORMAL || 2561 entry->maptype == VM_MAPTYPE_VPAGETABLE) { 2562 int copyflag = entry->eflags & 2563 MAP_ENTRY_NEEDS_COPY; 2564 if (copyflag && ((entry->protection & 2565 VM_PROT_WRITE) != 0)) { 2566 vm_map_entry_shadow(entry, 0); 2567 } else if (entry->ba.object == NULL && 2568 !map->system_map) { 2569 vm_map_entry_allocate_object(entry); 2570 } 2571 } 2572 entry->wired_count++; 2573 entry->eflags |= MAP_ENTRY_USER_WIRED; 2574 2575 /* 2576 * Now fault in the area. Note that vm_fault_wire() 2577 * may release the map lock temporarily, it will be 2578 * relocked on return. The in-transition 2579 * flag protects the entries. 2580 */ 2581 save_start = entry->start; 2582 save_end = entry->end; 2583 rv = vm_fault_wire(map, entry, TRUE, 0); 2584 if (rv) { 2585 CLIP_CHECK_BACK(entry, save_start); 2586 for (;;) { 2587 KASSERT(entry->wired_count == 1, ("bad wired_count on entry")); 2588 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2589 entry->wired_count = 0; 2590 if (entry->end == save_end) 2591 break; 2592 entry = vm_map_rb_tree_RB_NEXT(entry); 2593 KASSERT(entry, 2594 ("bad entry clip during backout")); 2595 } 2596 end = save_start; /* unwire the rest */ 2597 break; 2598 } 2599 /* 2600 * note that even though the entry might have been 2601 * clipped, the USER_WIRED flag we set prevents 2602 * duplication so we do not have to do a 2603 * clip check. 2604 */ 2605 entry = vm_map_rb_tree_RB_NEXT(entry); 2606 } 2607 2608 /* 2609 * If we failed fall through to the unwiring section to 2610 * unwire what we had wired so far. 'end' has already 2611 * been adjusted. 2612 */ 2613 if (rv) 2614 new_pageable = 1; 2615 2616 /* 2617 * start_entry might have been clipped if we unlocked the 2618 * map and blocked. No matter how clipped it has gotten 2619 * there should be a fragment that is on our start boundary. 2620 */ 2621 CLIP_CHECK_BACK(start_entry, start); 2622 } 2623 2624 /* 2625 * Deal with the unwiring case. 2626 */ 2627 if (new_pageable) { 2628 /* 2629 * This is the unwiring case. We must first ensure that the 2630 * range to be unwired is really wired down. We know there 2631 * are no holes. 2632 */ 2633 entry = start_entry; 2634 while (entry && entry->start < end) { 2635 if ((entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { 2636 rv = KERN_INVALID_ARGUMENT; 2637 goto done; 2638 } 2639 KASSERT(entry->wired_count != 0, 2640 ("wired count was 0 with USER_WIRED set! %p", 2641 entry)); 2642 entry = vm_map_rb_tree_RB_NEXT(entry); 2643 } 2644 2645 /* 2646 * Now decrement the wiring count for each region. If a region 2647 * becomes completely unwired, unwire its physical pages and 2648 * mappings. 2649 */ 2650 /* 2651 * The map entries are processed in a loop, checking to 2652 * make sure the entry is wired and asserting it has a wired 2653 * count. However, another loop was inserted more-or-less in 2654 * the middle of the unwiring path. This loop picks up the 2655 * "entry" loop variable from the first loop without first 2656 * setting it to start_entry. Naturally, the secound loop 2657 * is never entered and the pages backing the entries are 2658 * never unwired. This can lead to a leak of wired pages. 2659 */ 2660 entry = start_entry; 2661 while (entry && entry->start < end) { 2662 KASSERT(entry->eflags & MAP_ENTRY_USER_WIRED, 2663 ("expected USER_WIRED on entry %p", entry)); 2664 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2665 entry->wired_count--; 2666 if (entry->wired_count == 0) 2667 vm_fault_unwire(map, entry); 2668 entry = vm_map_rb_tree_RB_NEXT(entry); 2669 } 2670 } 2671 done: 2672 vm_map_unclip_range(map, start_entry, start, real_end, &count, 2673 MAP_CLIP_NO_HOLES); 2674 vm_map_unlock(map); 2675 vm_map_entry_release(count); 2676 2677 return (rv); 2678 } 2679 2680 /* 2681 * Sets the pageability of the specified address range in the target map. 2682 * Regions specified as not pageable require locked-down physical 2683 * memory and physical page maps. 2684 * 2685 * The map must not be locked, but a reference must remain to the map 2686 * throughout the call. 2687 * 2688 * This function may be called via the zalloc path and must properly 2689 * reserve map entries for kernel_map. 2690 * 2691 * No requirements. 2692 */ 2693 int 2694 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t real_end, int kmflags) 2695 { 2696 vm_map_entry_t entry; 2697 vm_map_entry_t start_entry; 2698 vm_offset_t end; 2699 int rv = KERN_SUCCESS; 2700 int count; 2701 2702 if (kmflags & KM_KRESERVE) 2703 count = vm_map_entry_kreserve(MAP_RESERVE_COUNT); 2704 else 2705 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 2706 vm_map_lock(map); 2707 VM_MAP_RANGE_CHECK(map, start, real_end); 2708 end = real_end; 2709 2710 start_entry = vm_map_clip_range(map, start, end, &count, 2711 MAP_CLIP_NO_HOLES); 2712 if (start_entry == NULL) { 2713 vm_map_unlock(map); 2714 rv = KERN_INVALID_ADDRESS; 2715 goto failure; 2716 } 2717 if ((kmflags & KM_PAGEABLE) == 0) { 2718 /* 2719 * Wiring. 2720 * 2721 * 1. Holding the write lock, we create any shadow or zero-fill 2722 * objects that need to be created. Then we clip each map 2723 * entry to the region to be wired and increment its wiring 2724 * count. We create objects before clipping the map entries 2725 * to avoid object proliferation. 2726 * 2727 * 2. We downgrade to a read lock, and call vm_fault_wire to 2728 * fault in the pages for any newly wired area (wired_count is 2729 * 1). 2730 * 2731 * Downgrading to a read lock for vm_fault_wire avoids a 2732 * possible deadlock with another process that may have faulted 2733 * on one of the pages to be wired (it would mark the page busy, 2734 * blocking us, then in turn block on the map lock that we 2735 * hold). Because of problems in the recursive lock package, 2736 * we cannot upgrade to a write lock in vm_map_lookup. Thus, 2737 * any actions that require the write lock must be done 2738 * beforehand. Because we keep the read lock on the map, the 2739 * copy-on-write status of the entries we modify here cannot 2740 * change. 2741 */ 2742 entry = start_entry; 2743 while (entry && entry->start < end) { 2744 /* 2745 * Trivial case if the entry is already wired 2746 */ 2747 if (entry->wired_count) { 2748 entry->wired_count++; 2749 entry = vm_map_rb_tree_RB_NEXT(entry); 2750 continue; 2751 } 2752 2753 /* 2754 * The entry is being newly wired, we have to setup 2755 * appropriate management structures. A shadow 2756 * object is required for a copy-on-write region, 2757 * or a normal object for a zero-fill region. We 2758 * do not have to do this for entries that point to sub 2759 * maps because we won't hold the lock on the sub map. 2760 */ 2761 if (entry->maptype == VM_MAPTYPE_NORMAL || 2762 entry->maptype == VM_MAPTYPE_VPAGETABLE) { 2763 int copyflag = entry->eflags & 2764 MAP_ENTRY_NEEDS_COPY; 2765 if (copyflag && ((entry->protection & 2766 VM_PROT_WRITE) != 0)) { 2767 vm_map_entry_shadow(entry, 0); 2768 } else if (entry->ba.object == NULL && 2769 !map->system_map) { 2770 vm_map_entry_allocate_object(entry); 2771 } 2772 } 2773 entry->wired_count++; 2774 entry = vm_map_rb_tree_RB_NEXT(entry); 2775 } 2776 2777 /* 2778 * Pass 2. 2779 */ 2780 2781 /* 2782 * HACK HACK HACK HACK 2783 * 2784 * vm_fault_wire() temporarily unlocks the map to avoid 2785 * deadlocks. The in-transition flag from vm_map_clip_range 2786 * call should protect us from changes while the map is 2787 * unlocked. T 2788 * 2789 * NOTE: Previously this comment stated that clipping might 2790 * still occur while the entry is unlocked, but from 2791 * what I can tell it actually cannot. 2792 * 2793 * It is unclear whether the CLIP_CHECK_*() calls 2794 * are still needed but we keep them in anyway. 2795 * 2796 * HACK HACK HACK HACK 2797 */ 2798 2799 entry = start_entry; 2800 while (entry && entry->start < end) { 2801 /* 2802 * If vm_fault_wire fails for any page we need to undo 2803 * what has been done. We decrement the wiring count 2804 * for those pages which have not yet been wired (now) 2805 * and unwire those that have (later). 2806 */ 2807 vm_offset_t save_start = entry->start; 2808 vm_offset_t save_end = entry->end; 2809 2810 if (entry->wired_count == 1) 2811 rv = vm_fault_wire(map, entry, FALSE, kmflags); 2812 if (rv) { 2813 CLIP_CHECK_BACK(entry, save_start); 2814 for (;;) { 2815 KASSERT(entry->wired_count == 1, 2816 ("wired_count changed unexpectedly")); 2817 entry->wired_count = 0; 2818 if (entry->end == save_end) 2819 break; 2820 entry = vm_map_rb_tree_RB_NEXT(entry); 2821 KASSERT(entry, 2822 ("bad entry clip during backout")); 2823 } 2824 end = save_start; 2825 break; 2826 } 2827 CLIP_CHECK_FWD(entry, save_end); 2828 entry = vm_map_rb_tree_RB_NEXT(entry); 2829 } 2830 2831 /* 2832 * If a failure occured undo everything by falling through 2833 * to the unwiring code. 'end' has already been adjusted 2834 * appropriately. 2835 */ 2836 if (rv) 2837 kmflags |= KM_PAGEABLE; 2838 2839 /* 2840 * start_entry is still IN_TRANSITION but may have been 2841 * clipped since vm_fault_wire() unlocks and relocks the 2842 * map. No matter how clipped it has gotten there should 2843 * be a fragment that is on our start boundary. 2844 */ 2845 CLIP_CHECK_BACK(start_entry, start); 2846 } 2847 2848 if (kmflags & KM_PAGEABLE) { 2849 /* 2850 * This is the unwiring case. We must first ensure that the 2851 * range to be unwired is really wired down. We know there 2852 * are no holes. 2853 */ 2854 entry = start_entry; 2855 while (entry && entry->start < end) { 2856 if (entry->wired_count == 0) { 2857 rv = KERN_INVALID_ARGUMENT; 2858 goto done; 2859 } 2860 entry = vm_map_rb_tree_RB_NEXT(entry); 2861 } 2862 2863 /* 2864 * Now decrement the wiring count for each region. If a region 2865 * becomes completely unwired, unwire its physical pages and 2866 * mappings. 2867 */ 2868 entry = start_entry; 2869 while (entry && entry->start < end) { 2870 entry->wired_count--; 2871 if (entry->wired_count == 0) 2872 vm_fault_unwire(map, entry); 2873 entry = vm_map_rb_tree_RB_NEXT(entry); 2874 } 2875 } 2876 done: 2877 vm_map_unclip_range(map, start_entry, start, real_end, 2878 &count, MAP_CLIP_NO_HOLES); 2879 vm_map_unlock(map); 2880 failure: 2881 if (kmflags & KM_KRESERVE) 2882 vm_map_entry_krelease(count); 2883 else 2884 vm_map_entry_release(count); 2885 return (rv); 2886 } 2887 2888 /* 2889 * Mark a newly allocated address range as wired but do not fault in 2890 * the pages. The caller is expected to load the pages into the object. 2891 * 2892 * The map must be locked on entry and will remain locked on return. 2893 * No other requirements. 2894 */ 2895 void 2896 vm_map_set_wired_quick(vm_map_t map, vm_offset_t addr, vm_size_t size, 2897 int *countp) 2898 { 2899 vm_map_entry_t scan; 2900 vm_map_entry_t entry; 2901 2902 entry = vm_map_clip_range(map, addr, addr + size, 2903 countp, MAP_CLIP_NO_HOLES); 2904 scan = entry; 2905 while (scan && scan->start < addr + size) { 2906 KKASSERT(scan->wired_count == 0); 2907 scan->wired_count = 1; 2908 scan = vm_map_rb_tree_RB_NEXT(scan); 2909 } 2910 vm_map_unclip_range(map, entry, addr, addr + size, 2911 countp, MAP_CLIP_NO_HOLES); 2912 } 2913 2914 /* 2915 * Push any dirty cached pages in the address range to their pager. 2916 * If syncio is TRUE, dirty pages are written synchronously. 2917 * If invalidate is TRUE, any cached pages are freed as well. 2918 * 2919 * This routine is called by sys_msync() 2920 * 2921 * Returns an error if any part of the specified range is not mapped. 2922 * 2923 * No requirements. 2924 */ 2925 int 2926 vm_map_clean(vm_map_t map, vm_offset_t start, vm_offset_t end, 2927 boolean_t syncio, boolean_t invalidate) 2928 { 2929 vm_map_entry_t current; 2930 vm_map_entry_t next; 2931 vm_map_entry_t entry; 2932 vm_map_backing_t ba; 2933 vm_size_t size; 2934 vm_object_t object; 2935 vm_ooffset_t offset; 2936 2937 vm_map_lock_read(map); 2938 VM_MAP_RANGE_CHECK(map, start, end); 2939 if (!vm_map_lookup_entry(map, start, &entry)) { 2940 vm_map_unlock_read(map); 2941 return (KERN_INVALID_ADDRESS); 2942 } 2943 lwkt_gettoken(&map->token); 2944 2945 /* 2946 * Make a first pass to check for holes. 2947 */ 2948 current = entry; 2949 while (current && current->start < end) { 2950 if (current->maptype == VM_MAPTYPE_SUBMAP) { 2951 lwkt_reltoken(&map->token); 2952 vm_map_unlock_read(map); 2953 return (KERN_INVALID_ARGUMENT); 2954 } 2955 next = vm_map_rb_tree_RB_NEXT(current); 2956 if (end > current->end && 2957 (next == NULL || 2958 current->end != next->start)) { 2959 lwkt_reltoken(&map->token); 2960 vm_map_unlock_read(map); 2961 return (KERN_INVALID_ADDRESS); 2962 } 2963 current = next; 2964 } 2965 2966 if (invalidate) 2967 pmap_remove(vm_map_pmap(map), start, end); 2968 2969 /* 2970 * Make a second pass, cleaning/uncaching pages from the indicated 2971 * objects as we go. 2972 */ 2973 current = entry; 2974 while (current && current->start < end) { 2975 offset = current->ba.offset + (start - current->start); 2976 size = (end <= current->end ? end : current->end) - start; 2977 2978 switch(current->maptype) { 2979 case VM_MAPTYPE_SUBMAP: 2980 { 2981 vm_map_t smap; 2982 vm_map_entry_t tentry; 2983 vm_size_t tsize; 2984 2985 smap = current->ba.sub_map; 2986 vm_map_lock_read(smap); 2987 vm_map_lookup_entry(smap, offset, &tentry); 2988 if (tentry == NULL) { 2989 tsize = vm_map_max(smap) - offset; 2990 ba = NULL; 2991 offset = 0 + (offset - vm_map_min(smap)); 2992 } else { 2993 tsize = tentry->end - offset; 2994 ba = &tentry->ba; 2995 offset = tentry->ba.offset + 2996 (offset - tentry->start); 2997 } 2998 vm_map_unlock_read(smap); 2999 if (tsize < size) 3000 size = tsize; 3001 break; 3002 } 3003 case VM_MAPTYPE_NORMAL: 3004 case VM_MAPTYPE_VPAGETABLE: 3005 ba = ¤t->ba; 3006 break; 3007 default: 3008 ba = NULL; 3009 break; 3010 } 3011 if (ba) { 3012 object = ba->object; 3013 if (object) 3014 vm_object_hold(object); 3015 } else { 3016 object = NULL; 3017 } 3018 3019 /* 3020 * Note that there is absolutely no sense in writing out 3021 * anonymous objects, so we track down the vnode object 3022 * to write out. 3023 * We invalidate (remove) all pages from the address space 3024 * anyway, for semantic correctness. 3025 * 3026 * note: certain anonymous maps, such as MAP_NOSYNC maps, 3027 * may start out with a NULL object. 3028 * 3029 * XXX do we really want to stop at the first backing store 3030 * here if there are more? XXX 3031 */ 3032 if (ba) { 3033 vm_object_t tobj; 3034 3035 tobj = object; 3036 while (ba->backing_ba != NULL) { 3037 ba = ba->backing_ba; 3038 offset += ba->offset; 3039 tobj = ba->object; 3040 if (tobj->size < OFF_TO_IDX(offset + size)) 3041 size = IDX_TO_OFF(tobj->size) - offset; 3042 break; /* XXX this break is not correct */ 3043 } 3044 if (object != tobj) { 3045 if (object) 3046 vm_object_drop(object); 3047 object = tobj; 3048 vm_object_hold(object); 3049 } 3050 } 3051 3052 if (object && (object->type == OBJT_VNODE) && 3053 (current->protection & VM_PROT_WRITE) && 3054 (object->flags & OBJ_NOMSYNC) == 0) { 3055 /* 3056 * Flush pages if writing is allowed, invalidate them 3057 * if invalidation requested. Pages undergoing I/O 3058 * will be ignored by vm_object_page_remove(). 3059 * 3060 * We cannot lock the vnode and then wait for paging 3061 * to complete without deadlocking against vm_fault. 3062 * Instead we simply call vm_object_page_remove() and 3063 * allow it to block internally on a page-by-page 3064 * basis when it encounters pages undergoing async 3065 * I/O. 3066 */ 3067 int flags; 3068 3069 /* no chain wait needed for vnode objects */ 3070 vm_object_reference_locked(object); 3071 vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY); 3072 flags = (syncio || invalidate) ? OBJPC_SYNC : 0; 3073 flags |= invalidate ? OBJPC_INVAL : 0; 3074 3075 /* 3076 * When operating on a virtual page table just 3077 * flush the whole object. XXX we probably ought 3078 * to 3079 */ 3080 switch(current->maptype) { 3081 case VM_MAPTYPE_NORMAL: 3082 vm_object_page_clean(object, 3083 OFF_TO_IDX(offset), 3084 OFF_TO_IDX(offset + size + PAGE_MASK), 3085 flags); 3086 break; 3087 case VM_MAPTYPE_VPAGETABLE: 3088 vm_object_page_clean(object, 0, 0, flags); 3089 break; 3090 } 3091 vn_unlock(((struct vnode *)object->handle)); 3092 vm_object_deallocate_locked(object); 3093 } 3094 if (object && invalidate && 3095 ((object->type == OBJT_VNODE) || 3096 (object->type == OBJT_DEVICE) || 3097 (object->type == OBJT_MGTDEVICE))) { 3098 int clean_only = 3099 ((object->type == OBJT_DEVICE) || 3100 (object->type == OBJT_MGTDEVICE)) ? FALSE : TRUE; 3101 /* no chain wait needed for vnode/device objects */ 3102 vm_object_reference_locked(object); 3103 switch(current->maptype) { 3104 case VM_MAPTYPE_NORMAL: 3105 vm_object_page_remove(object, 3106 OFF_TO_IDX(offset), 3107 OFF_TO_IDX(offset + size + PAGE_MASK), 3108 clean_only); 3109 break; 3110 case VM_MAPTYPE_VPAGETABLE: 3111 vm_object_page_remove(object, 0, 0, clean_only); 3112 break; 3113 } 3114 vm_object_deallocate_locked(object); 3115 } 3116 start += size; 3117 if (object) 3118 vm_object_drop(object); 3119 current = vm_map_rb_tree_RB_NEXT(current); 3120 } 3121 3122 lwkt_reltoken(&map->token); 3123 vm_map_unlock_read(map); 3124 3125 return (KERN_SUCCESS); 3126 } 3127 3128 /* 3129 * Make the region specified by this entry pageable. 3130 * 3131 * The vm_map must be exclusively locked. 3132 */ 3133 static void 3134 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry) 3135 { 3136 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 3137 entry->wired_count = 0; 3138 vm_fault_unwire(map, entry); 3139 } 3140 3141 /* 3142 * Deallocate the given entry from the target map. 3143 * 3144 * The vm_map must be exclusively locked. 3145 */ 3146 static void 3147 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry, int *countp) 3148 { 3149 vm_map_entry_unlink(map, entry); 3150 map->size -= entry->end - entry->start; 3151 vm_map_entry_dispose(map, entry, countp); 3152 } 3153 3154 /* 3155 * Deallocates the given address range from the target map. 3156 * 3157 * The vm_map must be exclusively locked. 3158 */ 3159 int 3160 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end, int *countp) 3161 { 3162 vm_object_t object; 3163 vm_map_entry_t entry; 3164 vm_map_entry_t first_entry; 3165 vm_offset_t hole_start; 3166 3167 ASSERT_VM_MAP_LOCKED(map); 3168 lwkt_gettoken(&map->token); 3169 again: 3170 /* 3171 * Find the start of the region, and clip it. Set entry to point 3172 * at the first record containing the requested address or, if no 3173 * such record exists, the next record with a greater address. The 3174 * loop will run from this point until a record beyond the termination 3175 * address is encountered. 3176 * 3177 * Adjust freehint[] for either the clip case or the extension case. 3178 * 3179 * GGG see other GGG comment. 3180 */ 3181 if (vm_map_lookup_entry(map, start, &first_entry)) { 3182 entry = first_entry; 3183 vm_map_clip_start(map, entry, start, countp); 3184 hole_start = start; 3185 } else { 3186 if (first_entry) { 3187 entry = vm_map_rb_tree_RB_NEXT(first_entry); 3188 if (entry == NULL) 3189 hole_start = first_entry->start; 3190 else 3191 hole_start = first_entry->end; 3192 } else { 3193 entry = RB_MIN(vm_map_rb_tree, &map->rb_root); 3194 if (entry == NULL) 3195 hole_start = vm_map_min(map); 3196 else 3197 hole_start = vm_map_max(map); 3198 } 3199 } 3200 3201 /* 3202 * Step through all entries in this region 3203 */ 3204 while (entry && entry->start < end) { 3205 vm_map_entry_t next; 3206 vm_offset_t s, e; 3207 vm_pindex_t offidxstart, offidxend, count; 3208 3209 /* 3210 * If we hit an in-transition entry we have to sleep and 3211 * retry. It's easier (and not really slower) to just retry 3212 * since this case occurs so rarely and the hint is already 3213 * pointing at the right place. We have to reset the 3214 * start offset so as not to accidently delete an entry 3215 * another process just created in vacated space. 3216 */ 3217 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 3218 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 3219 start = entry->start; 3220 ++mycpu->gd_cnt.v_intrans_coll; 3221 ++mycpu->gd_cnt.v_intrans_wait; 3222 vm_map_transition_wait(map, 1); 3223 goto again; 3224 } 3225 vm_map_clip_end(map, entry, end, countp); 3226 3227 s = entry->start; 3228 e = entry->end; 3229 next = vm_map_rb_tree_RB_NEXT(entry); 3230 3231 offidxstart = OFF_TO_IDX(entry->ba.offset); 3232 count = OFF_TO_IDX(e - s); 3233 3234 switch(entry->maptype) { 3235 case VM_MAPTYPE_NORMAL: 3236 case VM_MAPTYPE_VPAGETABLE: 3237 case VM_MAPTYPE_SUBMAP: 3238 object = entry->ba.object; 3239 break; 3240 default: 3241 object = NULL; 3242 break; 3243 } 3244 3245 /* 3246 * Unwire before removing addresses from the pmap; otherwise, 3247 * unwiring will put the entries back in the pmap. 3248 * 3249 * Generally speaking, doing a bulk pmap_remove() before 3250 * removing the pages from the VM object is better at 3251 * reducing unnecessary IPIs. The pmap code is now optimized 3252 * to not blindly iterate the range when pt and pd pages 3253 * are missing. 3254 */ 3255 if (entry->wired_count != 0) 3256 vm_map_entry_unwire(map, entry); 3257 3258 offidxend = offidxstart + count; 3259 3260 if (object == &kernel_object) { 3261 pmap_remove(map->pmap, s, e); 3262 vm_object_hold(object); 3263 vm_object_page_remove(object, offidxstart, 3264 offidxend, FALSE); 3265 vm_object_drop(object); 3266 } else if (object && object->type != OBJT_DEFAULT && 3267 object->type != OBJT_SWAP) { 3268 /* 3269 * vnode object routines cannot be chain-locked, 3270 * but since we aren't removing pages from the 3271 * object here we can use a shared hold. 3272 */ 3273 vm_object_hold_shared(object); 3274 pmap_remove(map->pmap, s, e); 3275 vm_object_drop(object); 3276 } else if (object) { 3277 vm_object_hold(object); 3278 pmap_remove(map->pmap, s, e); 3279 3280 if (object != NULL && 3281 object->ref_count != 1 && 3282 (object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == 3283 OBJ_ONEMAPPING && 3284 (object->type == OBJT_DEFAULT || 3285 object->type == OBJT_SWAP)) { 3286 /* 3287 * When ONEMAPPING is set we can destroy the 3288 * pages underlying the entry's range. 3289 */ 3290 vm_object_page_remove(object, offidxstart, 3291 offidxend, FALSE); 3292 if (object->type == OBJT_SWAP) { 3293 swap_pager_freespace(object, 3294 offidxstart, 3295 count); 3296 } 3297 if (offidxend >= object->size && 3298 offidxstart < object->size) { 3299 object->size = offidxstart; 3300 } 3301 } 3302 vm_object_drop(object); 3303 } else if (entry->maptype == VM_MAPTYPE_UKSMAP) { 3304 pmap_remove(map->pmap, s, e); 3305 } 3306 3307 /* 3308 * Delete the entry (which may delete the object) only after 3309 * removing all pmap entries pointing to its pages. 3310 * (Otherwise, its page frames may be reallocated, and any 3311 * modify bits will be set in the wrong object!) 3312 */ 3313 vm_map_entry_delete(map, entry, countp); 3314 entry = next; 3315 } 3316 3317 /* 3318 * We either reached the end and use vm_map_max as the end 3319 * address, or we didn't and we use the next entry as the 3320 * end address. 3321 */ 3322 if (entry == NULL) { 3323 vm_map_freehint_hole(map, hole_start, 3324 vm_map_max(map) - hole_start); 3325 } else { 3326 vm_map_freehint_hole(map, hole_start, 3327 entry->start - hole_start); 3328 } 3329 3330 lwkt_reltoken(&map->token); 3331 3332 return (KERN_SUCCESS); 3333 } 3334 3335 /* 3336 * Remove the given address range from the target map. 3337 * This is the exported form of vm_map_delete. 3338 * 3339 * No requirements. 3340 */ 3341 int 3342 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end) 3343 { 3344 int result; 3345 int count; 3346 3347 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 3348 vm_map_lock(map); 3349 VM_MAP_RANGE_CHECK(map, start, end); 3350 result = vm_map_delete(map, start, end, &count); 3351 vm_map_unlock(map); 3352 vm_map_entry_release(count); 3353 3354 return (result); 3355 } 3356 3357 /* 3358 * Assert that the target map allows the specified privilege on the 3359 * entire address region given. The entire region must be allocated. 3360 * 3361 * The caller must specify whether the vm_map is already locked or not. 3362 */ 3363 boolean_t 3364 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end, 3365 vm_prot_t protection, boolean_t have_lock) 3366 { 3367 vm_map_entry_t entry; 3368 vm_map_entry_t tmp_entry; 3369 boolean_t result; 3370 3371 if (have_lock == FALSE) 3372 vm_map_lock_read(map); 3373 3374 if (!vm_map_lookup_entry(map, start, &tmp_entry)) { 3375 if (have_lock == FALSE) 3376 vm_map_unlock_read(map); 3377 return (FALSE); 3378 } 3379 entry = tmp_entry; 3380 3381 result = TRUE; 3382 while (start < end) { 3383 if (entry == NULL) { 3384 result = FALSE; 3385 break; 3386 } 3387 3388 /* 3389 * No holes allowed! 3390 */ 3391 3392 if (start < entry->start) { 3393 result = FALSE; 3394 break; 3395 } 3396 /* 3397 * Check protection associated with entry. 3398 */ 3399 3400 if ((entry->protection & protection) != protection) { 3401 result = FALSE; 3402 break; 3403 } 3404 /* go to next entry */ 3405 start = entry->end; 3406 entry = vm_map_rb_tree_RB_NEXT(entry); 3407 } 3408 if (have_lock == FALSE) 3409 vm_map_unlock_read(map); 3410 return (result); 3411 } 3412 3413 /* 3414 * Handles the dirty work of making src_entry and dst_entry copy-on-write 3415 * after src_entry has been cloned to dst_entry. 3416 * 3417 * The vm_maps must be exclusively locked. 3418 * The vm_map's token must be held. 3419 * 3420 * Because the maps are locked no faults can be in progress during the 3421 * operation. 3422 */ 3423 static void 3424 vm_map_copy_entry(vm_map_t src_map, vm_map_t dst_map, 3425 vm_map_entry_t src_entry, vm_map_entry_t dst_entry) 3426 { 3427 vm_object_t src_object; 3428 3429 /* 3430 * Nothing to do for special map types 3431 */ 3432 if (dst_entry->maptype == VM_MAPTYPE_SUBMAP || 3433 dst_entry->maptype == VM_MAPTYPE_UKSMAP) { 3434 return; 3435 } 3436 if (src_entry->maptype == VM_MAPTYPE_SUBMAP || 3437 src_entry->maptype == VM_MAPTYPE_UKSMAP) { 3438 return; 3439 } 3440 3441 if (src_entry->wired_count) { 3442 /* 3443 * Of course, wired down pages can't be set copy-on-write. 3444 * Cause wired pages to be copied into the new map by 3445 * simulating faults (the new pages are pageable) 3446 * 3447 * Scrap ba.object (its ref-count has not yet been adjusted 3448 * so we can just NULL out the field). Remove the backing 3449 * store. 3450 * 3451 * Then call vm_fault_copy_entry() to create a new object 3452 * in dst_entry and copy the wired pages from src to dst. 3453 */ 3454 dst_entry->ba.object = NULL; 3455 vm_map_entry_dispose_ba(dst_entry->ba.backing_ba); 3456 dst_entry->ba.backing_ba = NULL; 3457 dst_entry->ba.backing_count = 0; 3458 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry); 3459 } else { 3460 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { 3461 /* 3462 * If the source entry is not already marked NEEDS_COPY 3463 * we need to write-protect the PTEs. 3464 */ 3465 pmap_protect(src_map->pmap, 3466 src_entry->start, 3467 src_entry->end, 3468 src_entry->protection & ~VM_PROT_WRITE); 3469 } 3470 3471 /* 3472 * dst_entry.ba_object might be stale. Update it (its 3473 * ref-count has not yet been updated so just overwrite 3474 * the field). 3475 * 3476 * If there is no object then we are golden. Also, in 3477 * this situation if there are no backing_ba linkages then 3478 * we can set ba.offset to 0 for debugging convenience. 3479 * 3480 * ba.offset cannot otherwise be modified because it effects 3481 * the offsets for the entire backing_ba chain. 3482 */ 3483 src_object = src_entry->ba.object; 3484 3485 if (src_object) { 3486 vm_object_hold(src_object); /* for ref & flag clr */ 3487 vm_object_reference_locked(src_object); 3488 vm_object_clear_flag(src_object, OBJ_ONEMAPPING); 3489 3490 src_entry->eflags |= (MAP_ENTRY_COW | 3491 MAP_ENTRY_NEEDS_COPY); 3492 dst_entry->eflags |= (MAP_ENTRY_COW | 3493 MAP_ENTRY_NEEDS_COPY); 3494 KKASSERT(dst_entry->ba.offset == src_entry->ba.offset); 3495 vm_object_drop(src_object); 3496 } else { 3497 if (dst_entry->ba.backing_ba == NULL) 3498 dst_entry->ba.offset = 0; 3499 } 3500 3501 /* 3502 * Normal, allow the backing_ba link depth to 3503 * increase. 3504 */ 3505 pmap_copy(dst_map->pmap, src_map->pmap, 3506 dst_entry->start, 3507 dst_entry->end - dst_entry->start, 3508 src_entry->start); 3509 } 3510 } 3511 3512 /* 3513 * vmspace_fork: 3514 * Create a new process vmspace structure and vm_map 3515 * based on those of an existing process. The new map 3516 * is based on the old map, according to the inheritance 3517 * values on the regions in that map. 3518 * 3519 * The source map must not be locked. 3520 * No requirements. 3521 */ 3522 static void vmspace_fork_normal_entry(vm_map_t old_map, vm_map_t new_map, 3523 vm_map_entry_t old_entry, int *countp); 3524 static void vmspace_fork_uksmap_entry(vm_map_t old_map, vm_map_t new_map, 3525 vm_map_entry_t old_entry, int *countp); 3526 3527 struct vmspace * 3528 vmspace_fork(struct vmspace *vm1) 3529 { 3530 struct vmspace *vm2; 3531 vm_map_t old_map = &vm1->vm_map; 3532 vm_map_t new_map; 3533 vm_map_entry_t old_entry; 3534 int count; 3535 3536 lwkt_gettoken(&vm1->vm_map.token); 3537 vm_map_lock(old_map); 3538 3539 vm2 = vmspace_alloc(vm_map_min(old_map), vm_map_max(old_map)); 3540 lwkt_gettoken(&vm2->vm_map.token); 3541 3542 /* 3543 * We must bump the timestamp to force any concurrent fault 3544 * to retry. 3545 */ 3546 bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy, 3547 (caddr_t)&vm1->vm_endcopy - (caddr_t)&vm1->vm_startcopy); 3548 new_map = &vm2->vm_map; /* XXX */ 3549 new_map->timestamp = 1; 3550 3551 vm_map_lock(new_map); 3552 3553 count = old_map->nentries; 3554 count = vm_map_entry_reserve(count + MAP_RESERVE_COUNT); 3555 3556 RB_FOREACH(old_entry, vm_map_rb_tree, &old_map->rb_root) { 3557 switch(old_entry->maptype) { 3558 case VM_MAPTYPE_SUBMAP: 3559 panic("vm_map_fork: encountered a submap"); 3560 break; 3561 case VM_MAPTYPE_UKSMAP: 3562 vmspace_fork_uksmap_entry(old_map, new_map, 3563 old_entry, &count); 3564 break; 3565 case VM_MAPTYPE_NORMAL: 3566 case VM_MAPTYPE_VPAGETABLE: 3567 vmspace_fork_normal_entry(old_map, new_map, 3568 old_entry, &count); 3569 break; 3570 } 3571 } 3572 3573 new_map->size = old_map->size; 3574 vm_map_unlock(new_map); 3575 vm_map_unlock(old_map); 3576 vm_map_entry_release(count); 3577 3578 lwkt_reltoken(&vm2->vm_map.token); 3579 lwkt_reltoken(&vm1->vm_map.token); 3580 3581 return (vm2); 3582 } 3583 3584 static 3585 void 3586 vmspace_fork_normal_entry(vm_map_t old_map, vm_map_t new_map, 3587 vm_map_entry_t old_entry, int *countp) 3588 { 3589 vm_map_entry_t new_entry; 3590 vm_map_backing_t ba; 3591 vm_object_t object; 3592 3593 #if 0 3594 /* 3595 * Any uninterrupted sequence of ba->refs == 1 in the backing_ba 3596 * list can be collapsed. It's a good time to do this check with 3597 * regards to prior forked children likely having exited or execd. 3598 * 3599 * Only the specific page ranges within the object(s) specified by 3600 * the entry can be collapsed. 3601 * 3602 * Once we hit ba->refs > 1, or a non-anonymous-memory object, 3603 * we're done. Even if later ba's beyond this parent ba have 3604 * a ref count of 1 the whole sub-list could be shared at the this 3605 * parent ba and so we have to stop. 3606 * 3607 * We do not have to test OBJ_ONEMAPPING here (it probably won't be 3608 * set anyway due to previous sharing of the object). Also the objects 3609 * themselves might have a ref_count > 1 due to clips and forks 3610 * related to OTHER page ranges. That is, the vm_object itself might 3611 * still be associated with multiple pmaps... just not this particular 3612 * page range within the object. 3613 */ 3614 while ((ba = old_entry->ba.backing_ba) && ba->refs == 1) { 3615 if (ba.object->type != OBJT_DEFAULT && 3616 ba.object->type != OBJT_SWAP) { 3617 break; 3618 } 3619 object = vm_object_collapse(old_entry->ba.object, ba->object); 3620 if (object == old_entry->ba.object) { 3621 /* 3622 * Merged into base, remove intermediate ba. 3623 */ 3624 kprintf("A"); 3625 --old_entry->ba.backing_count; 3626 old_entry->ba.backing_ba = ba->backing_ba; 3627 if (ba->backing_ba) 3628 ba->backing_ba->offset += ba->offset; 3629 ba->backing_ba = NULL; 3630 vm_map_entry_dispose_ba(ba); 3631 } else if (object == ba->object) { 3632 /* 3633 * Merged into intermediate ba, shift it into 3634 * the base. 3635 */ 3636 kprintf("B"); 3637 vm_object_deallocate(old_entry->ba.object); 3638 --old_entry->ba.backing_count; 3639 old_entry->ba.backing_ba = ba->backing_ba; 3640 old_entry->ba.object = ba->object; 3641 old_entry->ba.offset += ba->offset; 3642 ba->object = NULL; 3643 ba->backing_ba = NULL; 3644 vm_map_entry_dispose_ba(ba); 3645 } else { 3646 break; 3647 } 3648 } 3649 #endif 3650 3651 /* 3652 * If the backing_ba link list gets too long then fault it 3653 * all into the head object and dispose of the list. We do 3654 * this in old_entry prior to cloning in order to benefit both 3655 * parent and child. 3656 * 3657 * We can test our fronting object's size against its 3658 * resident_page_count for a really cheap (but probably not perfect) 3659 * all-shadowed test, allowing us to disconnect the backing_ba 3660 * link list early. 3661 */ 3662 object = old_entry->ba.object; 3663 if (old_entry->ba.backing_ba && 3664 (old_entry->ba.backing_count >= vm_map_backing_limit || 3665 (vm_map_backing_shadow_test && object && 3666 object->size == object->resident_page_count))) { 3667 /* 3668 * If there are too many backing_ba linkages we 3669 * collapse everything into the head 3670 * 3671 * This will also remove all the pte's. 3672 */ 3673 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) 3674 vm_map_entry_shadow(old_entry, 0); 3675 if (object == NULL) 3676 vm_map_entry_allocate_object(old_entry); 3677 if (vm_fault_collapse(old_map, old_entry) == KERN_SUCCESS) { 3678 ba = old_entry->ba.backing_ba; 3679 old_entry->ba.backing_ba = NULL; 3680 old_entry->ba.backing_count = 0; 3681 vm_map_entry_dispose_ba(ba); 3682 } 3683 } 3684 object = NULL; /* object variable is now invalid */ 3685 3686 /* 3687 * Fork the entry 3688 */ 3689 switch (old_entry->inheritance) { 3690 case VM_INHERIT_NONE: 3691 break; 3692 case VM_INHERIT_SHARE: 3693 /* 3694 * Clone the entry as a shared entry. This will look like 3695 * shared memory across the old and the new process. We must 3696 * ensure that the object is allocated. 3697 */ 3698 if (old_entry->ba.object == NULL) 3699 vm_map_entry_allocate_object(old_entry); 3700 3701 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) { 3702 /* 3703 * Create the fronting vm_map_backing for 3704 * an entry which needs a copy, plus an extra 3705 * ref because we are going to duplicate it 3706 * in the fork. 3707 * 3708 * The call to vm_map_entry_shadow() will also clear 3709 * OBJ_ONEMAPPING. 3710 * 3711 * XXX no more collapse. Still need extra ref 3712 * for the fork. 3713 */ 3714 vm_map_entry_shadow(old_entry, 1); 3715 } else if (old_entry->ba.object) { 3716 /* 3717 * We will make a shared copy of the object, 3718 * and must clear OBJ_ONEMAPPING. 3719 * 3720 * Optimize vnode objects. OBJ_ONEMAPPING 3721 * is non-applicable but clear it anyway, 3722 * and its terminal so we don't have to deal 3723 * with chains. Reduces SMP conflicts. 3724 * 3725 * XXX assert that object.vm_object != NULL 3726 * since we allocate it above. 3727 */ 3728 object = old_entry->ba.object; 3729 if (object->type == OBJT_VNODE) { 3730 vm_object_reference_quick(object); 3731 vm_object_clear_flag(object, 3732 OBJ_ONEMAPPING); 3733 } else { 3734 vm_object_hold(object); 3735 vm_object_reference_locked(object); 3736 vm_object_clear_flag(object, OBJ_ONEMAPPING); 3737 vm_object_drop(object); 3738 } 3739 } 3740 3741 /* 3742 * Clone the entry. We've already bumped the ref on 3743 * the vm_object for our new entry. 3744 */ 3745 new_entry = vm_map_entry_create(new_map, countp); 3746 *new_entry = *old_entry; 3747 3748 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED; 3749 new_entry->wired_count = 0; 3750 if (new_entry->ba.backing_ba) 3751 atomic_add_long(&new_entry->ba.backing_ba->refs, 1); 3752 3753 /* 3754 * Insert the entry into the new map -- we know we're 3755 * inserting at the end of the new map. 3756 */ 3757 vm_map_entry_link(new_map, new_entry); 3758 3759 /* 3760 * Update the physical map 3761 */ 3762 pmap_copy(new_map->pmap, old_map->pmap, 3763 new_entry->start, 3764 (old_entry->end - old_entry->start), 3765 old_entry->start); 3766 break; 3767 case VM_INHERIT_COPY: 3768 /* 3769 * Clone the entry and link the copy into the new map. 3770 * 3771 * Note that ref-counting adjustment for old_entry->ba.object 3772 * (if it isn't a special map that is) is handled by 3773 * vm_map_copy_entry(). 3774 */ 3775 new_entry = vm_map_entry_create(new_map, countp); 3776 *new_entry = *old_entry; 3777 3778 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED; 3779 new_entry->wired_count = 0; 3780 if (new_entry->ba.backing_ba) 3781 atomic_add_long(&new_entry->ba.backing_ba->refs, 1); 3782 3783 vm_map_entry_link(new_map, new_entry); 3784 3785 /* 3786 * This does the actual dirty work of making both entries 3787 * copy-on-write, and will also handle the fronting object. 3788 */ 3789 vm_map_copy_entry(old_map, new_map, old_entry, new_entry); 3790 break; 3791 } 3792 } 3793 3794 /* 3795 * When forking user-kernel shared maps, the map might change in the 3796 * child so do not try to copy the underlying pmap entries. 3797 */ 3798 static 3799 void 3800 vmspace_fork_uksmap_entry(vm_map_t old_map, vm_map_t new_map, 3801 vm_map_entry_t old_entry, int *countp) 3802 { 3803 vm_map_entry_t new_entry; 3804 3805 new_entry = vm_map_entry_create(new_map, countp); 3806 *new_entry = *old_entry; 3807 3808 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED; 3809 new_entry->wired_count = 0; 3810 if (new_entry->ba.backing_ba) 3811 atomic_add_long(&new_entry->ba.backing_ba->refs, 1); 3812 3813 vm_map_entry_link(new_map, new_entry); 3814 } 3815 3816 /* 3817 * Create an auto-grow stack entry 3818 * 3819 * No requirements. 3820 */ 3821 int 3822 vm_map_stack (vm_map_t map, vm_offset_t *addrbos, vm_size_t max_ssize, 3823 int flags, vm_prot_t prot, vm_prot_t max, int cow) 3824 { 3825 vm_map_entry_t prev_entry; 3826 vm_map_entry_t next; 3827 vm_size_t init_ssize; 3828 int rv; 3829 int count; 3830 vm_offset_t tmpaddr; 3831 3832 cow |= MAP_IS_STACK; 3833 3834 if (max_ssize < sgrowsiz) 3835 init_ssize = max_ssize; 3836 else 3837 init_ssize = sgrowsiz; 3838 3839 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 3840 vm_map_lock(map); 3841 3842 /* 3843 * Find space for the mapping 3844 */ 3845 if ((flags & (MAP_FIXED | MAP_TRYFIXED)) == 0) { 3846 if (vm_map_findspace(map, *addrbos, max_ssize, 1, 3847 flags, &tmpaddr)) { 3848 vm_map_unlock(map); 3849 vm_map_entry_release(count); 3850 return (KERN_NO_SPACE); 3851 } 3852 *addrbos = tmpaddr; 3853 } 3854 3855 /* If addr is already mapped, no go */ 3856 if (vm_map_lookup_entry(map, *addrbos, &prev_entry)) { 3857 vm_map_unlock(map); 3858 vm_map_entry_release(count); 3859 return (KERN_NO_SPACE); 3860 } 3861 3862 #if 0 3863 /* XXX already handled by kern_mmap() */ 3864 /* If we would blow our VMEM resource limit, no go */ 3865 if (map->size + init_ssize > 3866 curproc->p_rlimit[RLIMIT_VMEM].rlim_cur) { 3867 vm_map_unlock(map); 3868 vm_map_entry_release(count); 3869 return (KERN_NO_SPACE); 3870 } 3871 #endif 3872 3873 /* 3874 * If we can't accomodate max_ssize in the current mapping, 3875 * no go. However, we need to be aware that subsequent user 3876 * mappings might map into the space we have reserved for 3877 * stack, and currently this space is not protected. 3878 * 3879 * Hopefully we will at least detect this condition 3880 * when we try to grow the stack. 3881 */ 3882 if (prev_entry) 3883 next = vm_map_rb_tree_RB_NEXT(prev_entry); 3884 else 3885 next = RB_MIN(vm_map_rb_tree, &map->rb_root); 3886 3887 if (next && next->start < *addrbos + max_ssize) { 3888 vm_map_unlock(map); 3889 vm_map_entry_release(count); 3890 return (KERN_NO_SPACE); 3891 } 3892 3893 /* 3894 * We initially map a stack of only init_ssize. We will 3895 * grow as needed later. Since this is to be a grow 3896 * down stack, we map at the top of the range. 3897 * 3898 * Note: we would normally expect prot and max to be 3899 * VM_PROT_ALL, and cow to be 0. Possibly we should 3900 * eliminate these as input parameters, and just 3901 * pass these values here in the insert call. 3902 */ 3903 rv = vm_map_insert(map, &count, NULL, NULL, 3904 0, *addrbos + max_ssize - init_ssize, 3905 *addrbos + max_ssize, 3906 VM_MAPTYPE_NORMAL, 3907 VM_SUBSYS_STACK, prot, max, cow); 3908 3909 /* Now set the avail_ssize amount */ 3910 if (rv == KERN_SUCCESS) { 3911 if (prev_entry) 3912 next = vm_map_rb_tree_RB_NEXT(prev_entry); 3913 else 3914 next = RB_MIN(vm_map_rb_tree, &map->rb_root); 3915 if (prev_entry != NULL) { 3916 vm_map_clip_end(map, 3917 prev_entry, 3918 *addrbos + max_ssize - init_ssize, 3919 &count); 3920 } 3921 if (next->end != *addrbos + max_ssize || 3922 next->start != *addrbos + max_ssize - init_ssize){ 3923 panic ("Bad entry start/end for new stack entry"); 3924 } else { 3925 next->aux.avail_ssize = max_ssize - init_ssize; 3926 } 3927 } 3928 3929 vm_map_unlock(map); 3930 vm_map_entry_release(count); 3931 return (rv); 3932 } 3933 3934 /* 3935 * Attempts to grow a vm stack entry. Returns KERN_SUCCESS if the 3936 * desired address is already mapped, or if we successfully grow 3937 * the stack. Also returns KERN_SUCCESS if addr is outside the 3938 * stack range (this is strange, but preserves compatibility with 3939 * the grow function in vm_machdep.c). 3940 * 3941 * No requirements. 3942 */ 3943 int 3944 vm_map_growstack (vm_map_t map, vm_offset_t addr) 3945 { 3946 vm_map_entry_t prev_entry; 3947 vm_map_entry_t stack_entry; 3948 vm_map_entry_t next; 3949 struct vmspace *vm; 3950 struct lwp *lp; 3951 struct proc *p; 3952 vm_offset_t end; 3953 int grow_amount; 3954 int rv = KERN_SUCCESS; 3955 int is_procstack; 3956 int use_read_lock = 1; 3957 int count; 3958 3959 /* 3960 * Find the vm 3961 */ 3962 lp = curthread->td_lwp; 3963 p = curthread->td_proc; 3964 KKASSERT(lp != NULL); 3965 vm = lp->lwp_vmspace; 3966 3967 /* 3968 * Growstack is only allowed on the current process. We disallow 3969 * other use cases, e.g. trying to access memory via procfs that 3970 * the stack hasn't grown into. 3971 */ 3972 if (map != &vm->vm_map) { 3973 return KERN_FAILURE; 3974 } 3975 3976 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 3977 Retry: 3978 if (use_read_lock) 3979 vm_map_lock_read(map); 3980 else 3981 vm_map_lock(map); 3982 3983 /* 3984 * If addr is already in the entry range, no need to grow. 3985 * prev_entry returns NULL if addr is at the head. 3986 */ 3987 if (vm_map_lookup_entry(map, addr, &prev_entry)) 3988 goto done; 3989 if (prev_entry) 3990 stack_entry = vm_map_rb_tree_RB_NEXT(prev_entry); 3991 else 3992 stack_entry = RB_MIN(vm_map_rb_tree, &map->rb_root); 3993 3994 if (stack_entry == NULL) 3995 goto done; 3996 if (prev_entry == NULL) 3997 end = stack_entry->start - stack_entry->aux.avail_ssize; 3998 else 3999 end = prev_entry->end; 4000 4001 /* 4002 * This next test mimics the old grow function in vm_machdep.c. 4003 * It really doesn't quite make sense, but we do it anyway 4004 * for compatibility. 4005 * 4006 * If not growable stack, return success. This signals the 4007 * caller to proceed as he would normally with normal vm. 4008 */ 4009 if (stack_entry->aux.avail_ssize < 1 || 4010 addr >= stack_entry->start || 4011 addr < stack_entry->start - stack_entry->aux.avail_ssize) { 4012 goto done; 4013 } 4014 4015 /* Find the minimum grow amount */ 4016 grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE); 4017 if (grow_amount > stack_entry->aux.avail_ssize) { 4018 rv = KERN_NO_SPACE; 4019 goto done; 4020 } 4021 4022 /* 4023 * If there is no longer enough space between the entries 4024 * nogo, and adjust the available space. Note: this 4025 * should only happen if the user has mapped into the 4026 * stack area after the stack was created, and is 4027 * probably an error. 4028 * 4029 * This also effectively destroys any guard page the user 4030 * might have intended by limiting the stack size. 4031 */ 4032 if (grow_amount > stack_entry->start - end) { 4033 if (use_read_lock && vm_map_lock_upgrade(map)) { 4034 /* lost lock */ 4035 use_read_lock = 0; 4036 goto Retry; 4037 } 4038 use_read_lock = 0; 4039 stack_entry->aux.avail_ssize = stack_entry->start - end; 4040 rv = KERN_NO_SPACE; 4041 goto done; 4042 } 4043 4044 is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr; 4045 4046 /* If this is the main process stack, see if we're over the 4047 * stack limit. 4048 */ 4049 if (is_procstack && (vm->vm_ssize + grow_amount > 4050 p->p_rlimit[RLIMIT_STACK].rlim_cur)) { 4051 rv = KERN_NO_SPACE; 4052 goto done; 4053 } 4054 4055 /* Round up the grow amount modulo SGROWSIZ */ 4056 grow_amount = roundup (grow_amount, sgrowsiz); 4057 if (grow_amount > stack_entry->aux.avail_ssize) { 4058 grow_amount = stack_entry->aux.avail_ssize; 4059 } 4060 if (is_procstack && (vm->vm_ssize + grow_amount > 4061 p->p_rlimit[RLIMIT_STACK].rlim_cur)) { 4062 grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur - vm->vm_ssize; 4063 } 4064 4065 /* If we would blow our VMEM resource limit, no go */ 4066 if (map->size + grow_amount > p->p_rlimit[RLIMIT_VMEM].rlim_cur) { 4067 rv = KERN_NO_SPACE; 4068 goto done; 4069 } 4070 4071 if (use_read_lock && vm_map_lock_upgrade(map)) { 4072 /* lost lock */ 4073 use_read_lock = 0; 4074 goto Retry; 4075 } 4076 use_read_lock = 0; 4077 4078 /* Get the preliminary new entry start value */ 4079 addr = stack_entry->start - grow_amount; 4080 4081 /* If this puts us into the previous entry, cut back our growth 4082 * to the available space. Also, see the note above. 4083 */ 4084 if (addr < end) { 4085 stack_entry->aux.avail_ssize = stack_entry->start - end; 4086 addr = end; 4087 } 4088 4089 rv = vm_map_insert(map, &count, NULL, NULL, 4090 0, addr, stack_entry->start, 4091 VM_MAPTYPE_NORMAL, 4092 VM_SUBSYS_STACK, VM_PROT_ALL, VM_PROT_ALL, 0); 4093 4094 /* Adjust the available stack space by the amount we grew. */ 4095 if (rv == KERN_SUCCESS) { 4096 if (prev_entry) { 4097 vm_map_clip_end(map, prev_entry, addr, &count); 4098 next = vm_map_rb_tree_RB_NEXT(prev_entry); 4099 } else { 4100 next = RB_MIN(vm_map_rb_tree, &map->rb_root); 4101 } 4102 if (next->end != stack_entry->start || 4103 next->start != addr) { 4104 panic ("Bad stack grow start/end in new stack entry"); 4105 } else { 4106 next->aux.avail_ssize = 4107 stack_entry->aux.avail_ssize - 4108 (next->end - next->start); 4109 if (is_procstack) { 4110 vm->vm_ssize += next->end - 4111 next->start; 4112 } 4113 } 4114 4115 if (map->flags & MAP_WIREFUTURE) 4116 vm_map_unwire(map, next->start, next->end, FALSE); 4117 } 4118 4119 done: 4120 if (use_read_lock) 4121 vm_map_unlock_read(map); 4122 else 4123 vm_map_unlock(map); 4124 vm_map_entry_release(count); 4125 return (rv); 4126 } 4127 4128 /* 4129 * Unshare the specified VM space for exec. If other processes are 4130 * mapped to it, then create a new one. The new vmspace is null. 4131 * 4132 * No requirements. 4133 */ 4134 void 4135 vmspace_exec(struct proc *p, struct vmspace *vmcopy) 4136 { 4137 struct vmspace *oldvmspace = p->p_vmspace; 4138 struct vmspace *newvmspace; 4139 vm_map_t map = &p->p_vmspace->vm_map; 4140 4141 /* 4142 * If we are execing a resident vmspace we fork it, otherwise 4143 * we create a new vmspace. Note that exitingcnt is not 4144 * copied to the new vmspace. 4145 */ 4146 lwkt_gettoken(&oldvmspace->vm_map.token); 4147 if (vmcopy) { 4148 newvmspace = vmspace_fork(vmcopy); 4149 lwkt_gettoken(&newvmspace->vm_map.token); 4150 } else { 4151 newvmspace = vmspace_alloc(vm_map_min(map), vm_map_max(map)); 4152 lwkt_gettoken(&newvmspace->vm_map.token); 4153 bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy, 4154 (caddr_t)&oldvmspace->vm_endcopy - 4155 (caddr_t)&oldvmspace->vm_startcopy); 4156 } 4157 4158 /* 4159 * Finish initializing the vmspace before assigning it 4160 * to the process. The vmspace will become the current vmspace 4161 * if p == curproc. 4162 */ 4163 pmap_pinit2(vmspace_pmap(newvmspace)); 4164 pmap_replacevm(p, newvmspace, 0); 4165 lwkt_reltoken(&newvmspace->vm_map.token); 4166 lwkt_reltoken(&oldvmspace->vm_map.token); 4167 vmspace_rel(oldvmspace); 4168 } 4169 4170 /* 4171 * Unshare the specified VM space for forcing COW. This 4172 * is called by rfork, for the (RFMEM|RFPROC) == 0 case. 4173 */ 4174 void 4175 vmspace_unshare(struct proc *p) 4176 { 4177 struct vmspace *oldvmspace = p->p_vmspace; 4178 struct vmspace *newvmspace; 4179 4180 lwkt_gettoken(&oldvmspace->vm_map.token); 4181 if (vmspace_getrefs(oldvmspace) == 1) { 4182 lwkt_reltoken(&oldvmspace->vm_map.token); 4183 return; 4184 } 4185 newvmspace = vmspace_fork(oldvmspace); 4186 lwkt_gettoken(&newvmspace->vm_map.token); 4187 pmap_pinit2(vmspace_pmap(newvmspace)); 4188 pmap_replacevm(p, newvmspace, 0); 4189 lwkt_reltoken(&newvmspace->vm_map.token); 4190 lwkt_reltoken(&oldvmspace->vm_map.token); 4191 vmspace_rel(oldvmspace); 4192 } 4193 4194 /* 4195 * vm_map_hint: return the beginning of the best area suitable for 4196 * creating a new mapping with "prot" protection. 4197 * 4198 * No requirements. 4199 */ 4200 vm_offset_t 4201 vm_map_hint(struct proc *p, vm_offset_t addr, vm_prot_t prot) 4202 { 4203 struct vmspace *vms = p->p_vmspace; 4204 struct rlimit limit; 4205 rlim_t dsiz; 4206 4207 /* 4208 * Acquire datasize limit for mmap() operation, 4209 * calculate nearest power of 2. 4210 */ 4211 if (kern_getrlimit(RLIMIT_DATA, &limit)) 4212 limit.rlim_cur = maxdsiz; 4213 dsiz = limit.rlim_cur; 4214 4215 if (!randomize_mmap || addr != 0) { 4216 /* 4217 * Set a reasonable start point for the hint if it was 4218 * not specified or if it falls within the heap space. 4219 * Hinted mmap()s do not allocate out of the heap space. 4220 */ 4221 if (addr == 0 || 4222 (addr >= round_page((vm_offset_t)vms->vm_taddr) && 4223 addr < round_page((vm_offset_t)vms->vm_daddr + dsiz))) { 4224 addr = round_page((vm_offset_t)vms->vm_daddr + dsiz); 4225 } 4226 4227 return addr; 4228 } 4229 4230 /* 4231 * randomize_mmap && addr == 0. For now randomize the 4232 * address within a dsiz range beyond the data limit. 4233 */ 4234 addr = (vm_offset_t)vms->vm_daddr + dsiz; 4235 if (dsiz) 4236 addr += (karc4random64() & 0x7FFFFFFFFFFFFFFFLU) % dsiz; 4237 return (round_page(addr)); 4238 } 4239 4240 /* 4241 * Finds the VM object, offset, and protection for a given virtual address 4242 * in the specified map, assuming a page fault of the type specified. 4243 * 4244 * Leaves the map in question locked for read; return values are guaranteed 4245 * until a vm_map_lookup_done call is performed. Note that the map argument 4246 * is in/out; the returned map must be used in the call to vm_map_lookup_done. 4247 * 4248 * A handle (out_entry) is returned for use in vm_map_lookup_done, to make 4249 * that fast. 4250 * 4251 * If a lookup is requested with "write protection" specified, the map may 4252 * be changed to perform virtual copying operations, although the data 4253 * referenced will remain the same. 4254 * 4255 * No requirements. 4256 */ 4257 int 4258 vm_map_lookup(vm_map_t *var_map, /* IN/OUT */ 4259 vm_offset_t vaddr, 4260 vm_prot_t fault_typea, 4261 vm_map_entry_t *out_entry, /* OUT */ 4262 struct vm_map_backing **bap, /* OUT */ 4263 vm_pindex_t *pindex, /* OUT */ 4264 vm_prot_t *out_prot, /* OUT */ 4265 int *wflags) /* OUT */ 4266 { 4267 vm_map_entry_t entry; 4268 vm_map_t map = *var_map; 4269 vm_prot_t prot; 4270 vm_prot_t fault_type = fault_typea; 4271 int use_read_lock = 1; 4272 int rv = KERN_SUCCESS; 4273 int count; 4274 thread_t td = curthread; 4275 4276 /* 4277 * vm_map_entry_reserve() implements an important mitigation 4278 * against mmap() span running the kernel out of vm_map_entry 4279 * structures, but it can also cause an infinite call recursion. 4280 * Use td_nest_count to prevent an infinite recursion (allows 4281 * the vm_map code to dig into the pcpu vm_map_entry reserve). 4282 */ 4283 count = 0; 4284 if (td->td_nest_count == 0) { 4285 ++td->td_nest_count; 4286 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 4287 --td->td_nest_count; 4288 } 4289 RetryLookup: 4290 if (use_read_lock) 4291 vm_map_lock_read(map); 4292 else 4293 vm_map_lock(map); 4294 4295 /* 4296 * Always do a full lookup. The hint doesn't get us much anymore 4297 * now that the map is RB'd. 4298 */ 4299 cpu_ccfence(); 4300 *out_entry = NULL; 4301 *bap = NULL; 4302 4303 { 4304 vm_map_entry_t tmp_entry; 4305 4306 if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) { 4307 rv = KERN_INVALID_ADDRESS; 4308 goto done; 4309 } 4310 entry = tmp_entry; 4311 *out_entry = entry; 4312 } 4313 4314 /* 4315 * Handle submaps. 4316 */ 4317 if (entry->maptype == VM_MAPTYPE_SUBMAP) { 4318 vm_map_t old_map = map; 4319 4320 *var_map = map = entry->ba.sub_map; 4321 if (use_read_lock) 4322 vm_map_unlock_read(old_map); 4323 else 4324 vm_map_unlock(old_map); 4325 use_read_lock = 1; 4326 goto RetryLookup; 4327 } 4328 4329 /* 4330 * Check whether this task is allowed to have this page. 4331 * Note the special case for MAP_ENTRY_COW pages with an override. 4332 * This is to implement a forced COW for debuggers. 4333 */ 4334 if (fault_type & VM_PROT_OVERRIDE_WRITE) 4335 prot = entry->max_protection; 4336 else 4337 prot = entry->protection; 4338 4339 fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); 4340 if ((fault_type & prot) != fault_type) { 4341 rv = KERN_PROTECTION_FAILURE; 4342 goto done; 4343 } 4344 4345 if ((entry->eflags & MAP_ENTRY_USER_WIRED) && 4346 (entry->eflags & MAP_ENTRY_COW) && 4347 (fault_type & VM_PROT_WRITE) && 4348 (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) { 4349 rv = KERN_PROTECTION_FAILURE; 4350 goto done; 4351 } 4352 4353 /* 4354 * If this page is not pageable, we have to get it for all possible 4355 * accesses. 4356 */ 4357 *wflags = 0; 4358 if (entry->wired_count) { 4359 *wflags |= FW_WIRED; 4360 prot = fault_type = entry->protection; 4361 } 4362 4363 /* 4364 * Virtual page tables may need to update the accessed (A) bit 4365 * in a page table entry. Upgrade the fault to a write fault for 4366 * that case if the map will support it. If the map does not support 4367 * it the page table entry simply will not be updated. 4368 */ 4369 if (entry->maptype == VM_MAPTYPE_VPAGETABLE) { 4370 if (prot & VM_PROT_WRITE) 4371 fault_type |= VM_PROT_WRITE; 4372 } 4373 4374 if (curthread->td_lwp && curthread->td_lwp->lwp_vmspace && 4375 pmap_emulate_ad_bits(&curthread->td_lwp->lwp_vmspace->vm_pmap)) { 4376 if ((prot & VM_PROT_WRITE) == 0) 4377 fault_type |= VM_PROT_WRITE; 4378 } 4379 4380 /* 4381 * Only NORMAL and VPAGETABLE maps are object-based. UKSMAPs are not. 4382 */ 4383 if (entry->maptype != VM_MAPTYPE_NORMAL && 4384 entry->maptype != VM_MAPTYPE_VPAGETABLE) { 4385 *bap = NULL; 4386 goto skip; 4387 } 4388 4389 /* 4390 * If the entry was copy-on-write, we either ... 4391 */ 4392 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 4393 /* 4394 * If we want to write the page, we may as well handle that 4395 * now since we've got the map locked. 4396 * 4397 * If we don't need to write the page, we just demote the 4398 * permissions allowed. 4399 */ 4400 if (fault_type & VM_PROT_WRITE) { 4401 /* 4402 * Not allowed if TDF_NOFAULT is set as the shadowing 4403 * operation can deadlock against the faulting 4404 * function due to the copy-on-write. 4405 */ 4406 if (curthread->td_flags & TDF_NOFAULT) { 4407 rv = KERN_FAILURE_NOFAULT; 4408 goto done; 4409 } 4410 4411 /* 4412 * Make a new vm_map_backing + object, and place it 4413 * in the object chain. Note that no new references 4414 * have appeared -- one just moved from the map to 4415 * the new object. 4416 */ 4417 if (use_read_lock && vm_map_lock_upgrade(map)) { 4418 /* lost lock */ 4419 use_read_lock = 0; 4420 goto RetryLookup; 4421 } 4422 use_read_lock = 0; 4423 vm_map_entry_shadow(entry, 0); 4424 *wflags |= FW_DIDCOW; 4425 } else { 4426 /* 4427 * We're attempting to read a copy-on-write page -- 4428 * don't allow writes. 4429 */ 4430 prot &= ~VM_PROT_WRITE; 4431 } 4432 } 4433 4434 /* 4435 * Create an object if necessary. This code also handles 4436 * partitioning large entries to improve vm_fault performance. 4437 */ 4438 if (entry->ba.object == NULL && !map->system_map) { 4439 if (use_read_lock && vm_map_lock_upgrade(map)) { 4440 /* lost lock */ 4441 use_read_lock = 0; 4442 goto RetryLookup; 4443 } 4444 use_read_lock = 0; 4445 4446 /* 4447 * Partition large entries, giving each its own VM object, 4448 * to improve concurrent fault performance. This is only 4449 * applicable to userspace. 4450 */ 4451 if (map != &kernel_map && 4452 entry->maptype == VM_MAPTYPE_NORMAL && 4453 ((entry->start ^ entry->end) & ~MAP_ENTRY_PARTITION_MASK) && 4454 vm_map_partition_enable) { 4455 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 4456 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 4457 ++mycpu->gd_cnt.v_intrans_coll; 4458 ++mycpu->gd_cnt.v_intrans_wait; 4459 vm_map_transition_wait(map, 0); 4460 goto RetryLookup; 4461 } 4462 vm_map_entry_partition(map, entry, vaddr, &count); 4463 } 4464 vm_map_entry_allocate_object(entry); 4465 } 4466 4467 /* 4468 * Return the object/offset from this entry. If the entry was 4469 * copy-on-write or empty, it has been fixed up. 4470 */ 4471 *bap = &entry->ba; 4472 4473 skip: 4474 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->ba.offset); 4475 4476 /* 4477 * Return whether this is the only map sharing this data. On 4478 * success we return with a read lock held on the map. On failure 4479 * we return with the map unlocked. 4480 */ 4481 *out_prot = prot; 4482 done: 4483 if (rv == KERN_SUCCESS) { 4484 if (use_read_lock == 0) 4485 vm_map_lock_downgrade(map); 4486 } else if (use_read_lock) { 4487 vm_map_unlock_read(map); 4488 } else { 4489 vm_map_unlock(map); 4490 } 4491 if (count > 0) 4492 vm_map_entry_release(count); 4493 4494 return (rv); 4495 } 4496 4497 /* 4498 * Releases locks acquired by a vm_map_lookup() 4499 * (according to the handle returned by that lookup). 4500 * 4501 * No other requirements. 4502 */ 4503 void 4504 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry, int count) 4505 { 4506 /* 4507 * Unlock the main-level map 4508 */ 4509 vm_map_unlock_read(map); 4510 if (count) 4511 vm_map_entry_release(count); 4512 } 4513 4514 static void 4515 vm_map_entry_partition(vm_map_t map, vm_map_entry_t entry, 4516 vm_offset_t vaddr, int *countp) 4517 { 4518 vaddr &= ~MAP_ENTRY_PARTITION_MASK; 4519 vm_map_clip_start(map, entry, vaddr, countp); 4520 vaddr += MAP_ENTRY_PARTITION_SIZE; 4521 vm_map_clip_end(map, entry, vaddr, countp); 4522 } 4523 4524 /* 4525 * Quick hack, needs some help to make it more SMP friendly. 4526 */ 4527 void 4528 vm_map_interlock(vm_map_t map, struct vm_map_ilock *ilock, 4529 vm_offset_t ran_beg, vm_offset_t ran_end) 4530 { 4531 struct vm_map_ilock *scan; 4532 4533 ilock->ran_beg = ran_beg; 4534 ilock->ran_end = ran_end; 4535 ilock->flags = 0; 4536 4537 spin_lock(&map->ilock_spin); 4538 restart: 4539 for (scan = map->ilock_base; scan; scan = scan->next) { 4540 if (ran_end > scan->ran_beg && ran_beg < scan->ran_end) { 4541 scan->flags |= ILOCK_WAITING; 4542 ssleep(scan, &map->ilock_spin, 0, "ilock", 0); 4543 goto restart; 4544 } 4545 } 4546 ilock->next = map->ilock_base; 4547 map->ilock_base = ilock; 4548 spin_unlock(&map->ilock_spin); 4549 } 4550 4551 void 4552 vm_map_deinterlock(vm_map_t map, struct vm_map_ilock *ilock) 4553 { 4554 struct vm_map_ilock *scan; 4555 struct vm_map_ilock **scanp; 4556 4557 spin_lock(&map->ilock_spin); 4558 scanp = &map->ilock_base; 4559 while ((scan = *scanp) != NULL) { 4560 if (scan == ilock) { 4561 *scanp = ilock->next; 4562 spin_unlock(&map->ilock_spin); 4563 if (ilock->flags & ILOCK_WAITING) 4564 wakeup(ilock); 4565 return; 4566 } 4567 scanp = &scan->next; 4568 } 4569 spin_unlock(&map->ilock_spin); 4570 panic("vm_map_deinterlock: missing ilock!"); 4571 } 4572 4573 #include "opt_ddb.h" 4574 #ifdef DDB 4575 #include <ddb/ddb.h> 4576 4577 /* 4578 * Debugging only 4579 */ 4580 DB_SHOW_COMMAND(map, vm_map_print) 4581 { 4582 static int nlines; 4583 /* XXX convert args. */ 4584 vm_map_t map = (vm_map_t)addr; 4585 boolean_t full = have_addr; 4586 4587 vm_map_entry_t entry; 4588 4589 db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n", 4590 (void *)map, 4591 (void *)map->pmap, map->nentries, map->timestamp); 4592 nlines++; 4593 4594 if (!full && db_indent) 4595 return; 4596 4597 db_indent += 2; 4598 RB_FOREACH(entry, vm_map_rb_tree, &map->rb_root) { 4599 db_iprintf("map entry %p: start=%p, end=%p\n", 4600 (void *)entry, (void *)entry->start, (void *)entry->end); 4601 nlines++; 4602 { 4603 static char *inheritance_name[4] = 4604 {"share", "copy", "none", "donate_copy"}; 4605 4606 db_iprintf(" prot=%x/%x/%s", 4607 entry->protection, 4608 entry->max_protection, 4609 inheritance_name[(int)(unsigned char) 4610 entry->inheritance]); 4611 if (entry->wired_count != 0) 4612 db_printf(", wired"); 4613 } 4614 switch(entry->maptype) { 4615 case VM_MAPTYPE_SUBMAP: 4616 /* XXX no %qd in kernel. Truncate entry->ba.offset. */ 4617 db_printf(", share=%p, offset=0x%lx\n", 4618 (void *)entry->ba.sub_map, 4619 (long)entry->ba.offset); 4620 nlines++; 4621 4622 db_indent += 2; 4623 vm_map_print((db_expr_t)(intptr_t)entry->ba.sub_map, 4624 full, 0, NULL); 4625 db_indent -= 2; 4626 break; 4627 case VM_MAPTYPE_NORMAL: 4628 case VM_MAPTYPE_VPAGETABLE: 4629 /* XXX no %qd in kernel. Truncate entry->ba.offset. */ 4630 db_printf(", object=%p, offset=0x%lx", 4631 (void *)entry->ba.object, 4632 (long)entry->ba.offset); 4633 if (entry->eflags & MAP_ENTRY_COW) 4634 db_printf(", copy (%s)", 4635 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); 4636 db_printf("\n"); 4637 nlines++; 4638 4639 if (entry->ba.object) { 4640 db_indent += 2; 4641 vm_object_print((db_expr_t)(intptr_t) 4642 entry->ba.object, 4643 full, 0, NULL); 4644 nlines += 4; 4645 db_indent -= 2; 4646 } 4647 break; 4648 case VM_MAPTYPE_UKSMAP: 4649 db_printf(", uksmap=%p, offset=0x%lx", 4650 (void *)entry->ba.uksmap, 4651 (long)entry->ba.offset); 4652 if (entry->eflags & MAP_ENTRY_COW) 4653 db_printf(", copy (%s)", 4654 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); 4655 db_printf("\n"); 4656 nlines++; 4657 break; 4658 default: 4659 break; 4660 } 4661 } 4662 db_indent -= 2; 4663 if (db_indent == 0) 4664 nlines = 0; 4665 } 4666 4667 /* 4668 * Debugging only 4669 */ 4670 DB_SHOW_COMMAND(procvm, procvm) 4671 { 4672 struct proc *p; 4673 4674 if (have_addr) { 4675 p = (struct proc *) addr; 4676 } else { 4677 p = curproc; 4678 } 4679 4680 db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n", 4681 (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map, 4682 (void *)vmspace_pmap(p->p_vmspace)); 4683 4684 vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL); 4685 } 4686 4687 #endif /* DDB */ 4688