1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * Copyright (c) 2003-2019 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * The Mach Operating System project at Carnegie-Mellon University. 8 * 9 * This code is derived from software contributed to The DragonFly Project 10 * by Matthew Dillon <dillon@backplane.com> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_map.c 8.3 (Berkeley) 1/12/94 37 * 38 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 39 * All rights reserved. 40 * 41 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 42 * 43 * Permission to use, copy, modify and distribute this software and 44 * its documentation is hereby granted, provided that both the copyright 45 * notice and this permission notice appear in all copies of the 46 * software, derivative works or modified versions, and any portions 47 * thereof, and that both notices appear in supporting documentation. 48 * 49 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 50 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 51 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 52 * 53 * Carnegie Mellon requests users of this software to return to 54 * 55 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 56 * School of Computer Science 57 * Carnegie Mellon University 58 * Pittsburgh PA 15213-3890 59 * 60 * any improvements or extensions that they make and grant Carnegie the 61 * rights to redistribute these changes. 62 */ 63 #include <sys/param.h> 64 #include <sys/systm.h> 65 #include <sys/kernel.h> 66 #include <sys/proc.h> 67 #include <sys/serialize.h> 68 #include <sys/lock.h> 69 #include <sys/vmmeter.h> 70 #include <sys/mman.h> 71 #include <sys/vnode.h> 72 #include <sys/resourcevar.h> 73 #include <sys/shm.h> 74 #include <sys/tree.h> 75 #include <sys/malloc.h> 76 #include <sys/objcache.h> 77 #include <sys/kern_syscall.h> 78 79 #include <vm/vm.h> 80 #include <vm/vm_param.h> 81 #include <vm/pmap.h> 82 #include <vm/vm_map.h> 83 #include <vm/vm_page.h> 84 #include <vm/vm_object.h> 85 #include <vm/vm_pager.h> 86 #include <vm/vm_kern.h> 87 #include <vm/vm_extern.h> 88 #include <vm/swap_pager.h> 89 #include <vm/vm_zone.h> 90 91 #include <sys/random.h> 92 #include <sys/sysctl.h> 93 #include <sys/spinlock.h> 94 95 #include <sys/thread2.h> 96 #include <sys/spinlock2.h> 97 98 /* 99 * Virtual memory maps provide for the mapping, protection, and sharing 100 * of virtual memory objects. In addition, this module provides for an 101 * efficient virtual copy of memory from one map to another. 102 * 103 * Synchronization is required prior to most operations. 104 * 105 * Maps consist of an ordered doubly-linked list of simple entries. 106 * A hint and a RB tree is used to speed-up lookups. 107 * 108 * Callers looking to modify maps specify start/end addresses which cause 109 * the related map entry to be clipped if necessary, and then later 110 * recombined if the pieces remained compatible. 111 * 112 * Virtual copy operations are performed by copying VM object references 113 * from one map to another, and then marking both regions as copy-on-write. 114 */ 115 static boolean_t vmspace_ctor(void *obj, void *privdata, int ocflags); 116 static void vmspace_dtor(void *obj, void *privdata); 117 static void vmspace_terminate(struct vmspace *vm, int final); 118 119 MALLOC_DEFINE(M_VMSPACE, "vmspace", "vmspace objcache backingstore"); 120 MALLOC_DEFINE(M_MAP_BACKING, "map_backing", "vm_map_backing to entry"); 121 static struct objcache *vmspace_cache; 122 123 /* 124 * per-cpu page table cross mappings are initialized in early boot 125 * and might require a considerable number of vm_map_entry structures. 126 */ 127 #define MAPENTRYBSP_CACHE (MAXCPU+1) 128 #define MAPENTRYAP_CACHE 8 129 130 /* 131 * Partioning threaded programs with large anonymous memory areas can 132 * improve concurrent fault performance. 133 */ 134 #define MAP_ENTRY_PARTITION_SIZE ((vm_offset_t)(32 * 1024 * 1024)) 135 #define MAP_ENTRY_PARTITION_MASK (MAP_ENTRY_PARTITION_SIZE - 1) 136 137 #define VM_MAP_ENTRY_WITHIN_PARTITION(entry) \ 138 ((((entry)->ba.start ^ (entry)->ba.end) & ~MAP_ENTRY_PARTITION_MASK) == 0) 139 140 static struct vm_zone mapentzone_store; 141 __read_mostly static vm_zone_t mapentzone; 142 143 static struct vm_map_entry map_entry_init[MAX_MAPENT]; 144 static struct vm_map_entry cpu_map_entry_init_bsp[MAPENTRYBSP_CACHE]; 145 static struct vm_map_entry cpu_map_entry_init_ap[MAXCPU][MAPENTRYAP_CACHE]; 146 147 static int randomize_mmap; 148 SYSCTL_INT(_vm, OID_AUTO, randomize_mmap, CTLFLAG_RW, &randomize_mmap, 0, 149 "Randomize mmap offsets"); 150 static int vm_map_relock_enable = 1; 151 SYSCTL_INT(_vm, OID_AUTO, map_relock_enable, CTLFLAG_RW, 152 &vm_map_relock_enable, 0, "insert pop pgtable optimization"); 153 static int vm_map_partition_enable = 1; 154 SYSCTL_INT(_vm, OID_AUTO, map_partition_enable, CTLFLAG_RW, 155 &vm_map_partition_enable, 0, "Break up larger vm_map_entry's"); 156 static int vm_map_backing_limit = 5; 157 SYSCTL_INT(_vm, OID_AUTO, map_backing_limit, CTLFLAG_RW, 158 &vm_map_backing_limit, 0, "ba.backing_ba link depth"); 159 static int vm_map_backing_shadow_test = 1; 160 SYSCTL_INT(_vm, OID_AUTO, map_backing_shadow_test, CTLFLAG_RW, 161 &vm_map_backing_shadow_test, 0, "ba.object shadow test"); 162 163 static void vmspace_drop_notoken(struct vmspace *vm); 164 static void vm_map_entry_shadow(vm_map_entry_t entry); 165 static vm_map_entry_t vm_map_entry_create(int *); 166 static void vm_map_entry_dispose (vm_map_t map, vm_map_entry_t entry, int *); 167 static void vm_map_entry_dispose_ba (vm_map_entry_t entry, vm_map_backing_t ba); 168 static void vm_map_backing_replicated(vm_map_t map, 169 vm_map_entry_t entry, int flags); 170 static void vm_map_backing_adjust_start(vm_map_entry_t entry, 171 vm_ooffset_t start); 172 static void vm_map_backing_adjust_end(vm_map_entry_t entry, 173 vm_ooffset_t end); 174 static void vm_map_backing_attach (vm_map_entry_t entry, vm_map_backing_t ba); 175 static void vm_map_backing_detach (vm_map_entry_t entry, vm_map_backing_t ba); 176 static void _vm_map_clip_end (vm_map_t, vm_map_entry_t, vm_offset_t, int *); 177 static void _vm_map_clip_start (vm_map_t, vm_map_entry_t, vm_offset_t, int *); 178 static void vm_map_entry_delete (vm_map_t, vm_map_entry_t, int *); 179 static void vm_map_entry_unwire (vm_map_t, vm_map_entry_t); 180 static void vm_map_copy_entry (vm_map_t, vm_map_t, vm_map_entry_t, 181 vm_map_entry_t); 182 static void vm_map_unclip_range (vm_map_t map, vm_map_entry_t start_entry, 183 vm_offset_t start, vm_offset_t end, int *countp, int flags); 184 static void vm_map_entry_partition(vm_map_t map, vm_map_entry_t entry, 185 vm_offset_t vaddr, int *countp); 186 187 #define MAP_BACK_CLIPPED 0x0001 188 #define MAP_BACK_BASEOBJREFD 0x0002 189 190 /* 191 * Initialize the vm_map module. Must be called before any other vm_map 192 * routines. 193 * 194 * Map and entry structures are allocated from the general purpose 195 * memory pool with some exceptions: 196 * 197 * - The kernel map is allocated statically. 198 * - Initial kernel map entries are allocated out of a static pool. 199 * - We must set ZONE_SPECIAL here or the early boot code can get 200 * stuck if there are >63 cores. 201 * 202 * These restrictions are necessary since malloc() uses the 203 * maps and requires map entries. 204 * 205 * Called from the low level boot code only. 206 */ 207 void 208 vm_map_startup(void) 209 { 210 mapentzone = &mapentzone_store; 211 zbootinit(mapentzone, "MAP ENTRY", sizeof (struct vm_map_entry), 212 map_entry_init, MAX_MAPENT); 213 mapentzone_store.zflags |= ZONE_SPECIAL; 214 } 215 216 /* 217 * Called prior to any vmspace allocations. 218 * 219 * Called from the low level boot code only. 220 */ 221 void 222 vm_init2(void) 223 { 224 vmspace_cache = objcache_create_mbacked(M_VMSPACE, 225 sizeof(struct vmspace), 226 0, ncpus * 4, 227 vmspace_ctor, vmspace_dtor, 228 NULL); 229 zinitna(mapentzone, NULL, 0, 0, ZONE_USE_RESERVE | ZONE_SPECIAL); 230 pmap_init2(); 231 vm_object_init2(); 232 } 233 234 /* 235 * objcache support. We leave the pmap root cached as long as possible 236 * for performance reasons. 237 */ 238 static 239 boolean_t 240 vmspace_ctor(void *obj, void *privdata, int ocflags) 241 { 242 struct vmspace *vm = obj; 243 244 bzero(vm, sizeof(*vm)); 245 vm->vm_refcnt = VM_REF_DELETED; 246 247 return 1; 248 } 249 250 static 251 void 252 vmspace_dtor(void *obj, void *privdata) 253 { 254 struct vmspace *vm = obj; 255 256 KKASSERT(vm->vm_refcnt == VM_REF_DELETED); 257 pmap_puninit(vmspace_pmap(vm)); 258 } 259 260 /* 261 * Red black tree functions 262 * 263 * The caller must hold the related map lock. 264 */ 265 static int rb_vm_map_compare(vm_map_entry_t a, vm_map_entry_t b); 266 RB_GENERATE(vm_map_rb_tree, vm_map_entry, rb_entry, rb_vm_map_compare); 267 268 /* a->ba.start is address, and the only field which must be initialized */ 269 static int 270 rb_vm_map_compare(vm_map_entry_t a, vm_map_entry_t b) 271 { 272 if (a->ba.start < b->ba.start) 273 return(-1); 274 else if (a->ba.start > b->ba.start) 275 return(1); 276 return(0); 277 } 278 279 /* 280 * Initialize vmspace ref/hold counts vmspace0. There is a holdcnt for 281 * every refcnt. 282 */ 283 void 284 vmspace_initrefs(struct vmspace *vm) 285 { 286 vm->vm_refcnt = 1; 287 vm->vm_holdcnt = 1; 288 } 289 290 /* 291 * Allocate a vmspace structure, including a vm_map and pmap. 292 * Initialize numerous fields. While the initial allocation is zerod, 293 * subsequence reuse from the objcache leaves elements of the structure 294 * intact (particularly the pmap), so portions must be zerod. 295 * 296 * Returns a referenced vmspace. 297 * 298 * No requirements. 299 */ 300 struct vmspace * 301 vmspace_alloc(vm_offset_t min, vm_offset_t max) 302 { 303 struct vmspace *vm; 304 305 vm = objcache_get(vmspace_cache, M_WAITOK); 306 307 bzero(&vm->vm_startcopy, 308 (char *)&vm->vm_endcopy - (char *)&vm->vm_startcopy); 309 vm_map_init(&vm->vm_map, min, max, NULL); /* initializes token */ 310 311 /* 312 * NOTE: hold to acquires token for safety. 313 * 314 * On return vmspace is referenced (refs=1, hold=1). That is, 315 * each refcnt also has a holdcnt. There can be additional holds 316 * (holdcnt) above and beyond the refcnt. Finalization is handled in 317 * two stages, one on refs 1->0, and the the second on hold 1->0. 318 */ 319 KKASSERT(vm->vm_holdcnt == 0); 320 KKASSERT(vm->vm_refcnt == VM_REF_DELETED); 321 vmspace_initrefs(vm); 322 vmspace_hold(vm); 323 pmap_pinit(vmspace_pmap(vm)); /* (some fields reused) */ 324 vm->vm_map.pmap = vmspace_pmap(vm); /* XXX */ 325 vm->vm_shm = NULL; 326 vm->vm_flags = 0; 327 cpu_vmspace_alloc(vm); 328 vmspace_drop(vm); 329 330 return (vm); 331 } 332 333 /* 334 * NOTE: Can return 0 if the vmspace is exiting. 335 */ 336 int 337 vmspace_getrefs(struct vmspace *vm) 338 { 339 int32_t n; 340 341 n = vm->vm_refcnt; 342 cpu_ccfence(); 343 if (n & VM_REF_DELETED) 344 n = -1; 345 return n; 346 } 347 348 void 349 vmspace_hold(struct vmspace *vm) 350 { 351 atomic_add_int(&vm->vm_holdcnt, 1); 352 lwkt_gettoken(&vm->vm_map.token); 353 } 354 355 /* 356 * Drop with final termination interlock. 357 */ 358 void 359 vmspace_drop(struct vmspace *vm) 360 { 361 lwkt_reltoken(&vm->vm_map.token); 362 vmspace_drop_notoken(vm); 363 } 364 365 static void 366 vmspace_drop_notoken(struct vmspace *vm) 367 { 368 if (atomic_fetchadd_int(&vm->vm_holdcnt, -1) == 1) { 369 if (vm->vm_refcnt & VM_REF_DELETED) 370 vmspace_terminate(vm, 1); 371 } 372 } 373 374 /* 375 * A vmspace object must not be in a terminated state to be able to obtain 376 * additional refs on it. 377 * 378 * These are official references to the vmspace, the count is used to check 379 * for vmspace sharing. Foreign accessors should use 'hold' and not 'ref'. 380 * 381 * XXX we need to combine hold & ref together into one 64-bit field to allow 382 * holds to prevent stage-1 termination. 383 */ 384 void 385 vmspace_ref(struct vmspace *vm) 386 { 387 uint32_t n; 388 389 atomic_add_int(&vm->vm_holdcnt, 1); 390 n = atomic_fetchadd_int(&vm->vm_refcnt, 1); 391 KKASSERT((n & VM_REF_DELETED) == 0); 392 } 393 394 /* 395 * Release a ref on the vmspace. On the 1->0 transition we do stage-1 396 * termination of the vmspace. Then, on the final drop of the hold we 397 * will do stage-2 final termination. 398 */ 399 void 400 vmspace_rel(struct vmspace *vm) 401 { 402 uint32_t n; 403 404 /* 405 * Drop refs. Each ref also has a hold which is also dropped. 406 * 407 * When refs hits 0 compete to get the VM_REF_DELETED flag (hold 408 * prevent finalization) to start termination processing. 409 * Finalization occurs when the last hold count drops to 0. 410 */ 411 n = atomic_fetchadd_int(&vm->vm_refcnt, -1) - 1; 412 while (n == 0) { 413 if (atomic_cmpset_int(&vm->vm_refcnt, 0, VM_REF_DELETED)) { 414 vmspace_terminate(vm, 0); 415 break; 416 } 417 n = vm->vm_refcnt; 418 cpu_ccfence(); 419 } 420 vmspace_drop_notoken(vm); 421 } 422 423 /* 424 * This is called during exit indicating that the vmspace is no 425 * longer in used by an exiting process, but the process has not yet 426 * been reaped. 427 * 428 * We drop refs, allowing for stage-1 termination, but maintain a holdcnt 429 * to prevent stage-2 until the process is reaped. Note hte order of 430 * operation, we must hold first. 431 * 432 * No requirements. 433 */ 434 void 435 vmspace_relexit(struct vmspace *vm) 436 { 437 atomic_add_int(&vm->vm_holdcnt, 1); 438 vmspace_rel(vm); 439 } 440 441 /* 442 * Called during reap to disconnect the remainder of the vmspace from 443 * the process. On the hold drop the vmspace termination is finalized. 444 * 445 * No requirements. 446 */ 447 void 448 vmspace_exitfree(struct proc *p) 449 { 450 struct vmspace *vm; 451 452 vm = p->p_vmspace; 453 p->p_vmspace = NULL; 454 vmspace_drop_notoken(vm); 455 } 456 457 /* 458 * Called in two cases: 459 * 460 * (1) When the last refcnt is dropped and the vmspace becomes inactive, 461 * called with final == 0. refcnt will be (u_int)-1 at this point, 462 * and holdcnt will still be non-zero. 463 * 464 * (2) When holdcnt becomes 0, called with final == 1. There should no 465 * longer be anyone with access to the vmspace. 466 * 467 * VMSPACE_EXIT1 flags the primary deactivation 468 * VMSPACE_EXIT2 flags the last reap 469 */ 470 static void 471 vmspace_terminate(struct vmspace *vm, int final) 472 { 473 int count; 474 475 lwkt_gettoken(&vm->vm_map.token); 476 if (final == 0) { 477 KKASSERT((vm->vm_flags & VMSPACE_EXIT1) == 0); 478 vm->vm_flags |= VMSPACE_EXIT1; 479 480 /* 481 * Get rid of most of the resources. Leave the kernel pmap 482 * intact. 483 * 484 * If the pmap does not contain wired pages we can bulk-delete 485 * the pmap as a performance optimization before removing the 486 * related mappings. 487 * 488 * If the pmap contains wired pages we cannot do this 489 * pre-optimization because currently vm_fault_unwire() 490 * expects the pmap pages to exist and will not decrement 491 * p->wire_count if they do not. 492 */ 493 shmexit(vm); 494 if (vmspace_pmap(vm)->pm_stats.wired_count) { 495 vm_map_remove(&vm->vm_map, VM_MIN_USER_ADDRESS, 496 VM_MAX_USER_ADDRESS); 497 pmap_remove_pages(vmspace_pmap(vm), VM_MIN_USER_ADDRESS, 498 VM_MAX_USER_ADDRESS); 499 } else { 500 pmap_remove_pages(vmspace_pmap(vm), VM_MIN_USER_ADDRESS, 501 VM_MAX_USER_ADDRESS); 502 vm_map_remove(&vm->vm_map, VM_MIN_USER_ADDRESS, 503 VM_MAX_USER_ADDRESS); 504 } 505 lwkt_reltoken(&vm->vm_map.token); 506 } else { 507 KKASSERT((vm->vm_flags & VMSPACE_EXIT1) != 0); 508 KKASSERT((vm->vm_flags & VMSPACE_EXIT2) == 0); 509 510 /* 511 * Get rid of remaining basic resources. 512 */ 513 vm->vm_flags |= VMSPACE_EXIT2; 514 shmexit(vm); 515 516 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 517 vm_map_lock(&vm->vm_map); 518 cpu_vmspace_free(vm); 519 520 /* 521 * Lock the map, to wait out all other references to it. 522 * Delete all of the mappings and pages they hold, then call 523 * the pmap module to reclaim anything left. 524 */ 525 vm_map_delete(&vm->vm_map, 526 vm_map_min(&vm->vm_map), 527 vm_map_max(&vm->vm_map), 528 &count); 529 vm_map_unlock(&vm->vm_map); 530 vm_map_entry_release(count); 531 532 pmap_release(vmspace_pmap(vm)); 533 lwkt_reltoken(&vm->vm_map.token); 534 objcache_put(vmspace_cache, vm); 535 } 536 } 537 538 /* 539 * Swap useage is determined by taking the proportional swap used by 540 * VM objects backing the VM map. To make up for fractional losses, 541 * if the VM object has any swap use at all the associated map entries 542 * count for at least 1 swap page. 543 * 544 * No requirements. 545 */ 546 vm_offset_t 547 vmspace_swap_count(struct vmspace *vm) 548 { 549 vm_map_t map = &vm->vm_map; 550 vm_map_entry_t cur; 551 vm_object_t object; 552 vm_offset_t count = 0; 553 vm_offset_t n; 554 555 vmspace_hold(vm); 556 557 RB_FOREACH(cur, vm_map_rb_tree, &map->rb_root) { 558 switch(cur->maptype) { 559 case VM_MAPTYPE_NORMAL: 560 case VM_MAPTYPE_VPAGETABLE: 561 if ((object = cur->ba.object) == NULL) 562 break; 563 if (object->swblock_count) { 564 n = (cur->ba.end - cur->ba.start) / PAGE_SIZE; 565 count += object->swblock_count * 566 SWAP_META_PAGES * n / object->size + 1; 567 } 568 break; 569 default: 570 break; 571 } 572 } 573 vmspace_drop(vm); 574 575 return(count); 576 } 577 578 /* 579 * Calculate the approximate number of anonymous pages in use by 580 * this vmspace. To make up for fractional losses, we count each 581 * VM object as having at least 1 anonymous page. 582 * 583 * No requirements. 584 */ 585 vm_offset_t 586 vmspace_anonymous_count(struct vmspace *vm) 587 { 588 vm_map_t map = &vm->vm_map; 589 vm_map_entry_t cur; 590 vm_object_t object; 591 vm_offset_t count = 0; 592 593 vmspace_hold(vm); 594 RB_FOREACH(cur, vm_map_rb_tree, &map->rb_root) { 595 switch(cur->maptype) { 596 case VM_MAPTYPE_NORMAL: 597 case VM_MAPTYPE_VPAGETABLE: 598 if ((object = cur->ba.object) == NULL) 599 break; 600 if (object->type != OBJT_DEFAULT && 601 object->type != OBJT_SWAP) { 602 break; 603 } 604 count += object->resident_page_count; 605 break; 606 default: 607 break; 608 } 609 } 610 vmspace_drop(vm); 611 612 return(count); 613 } 614 615 /* 616 * Initialize an existing vm_map structure such as that in the vmspace 617 * structure. The pmap is initialized elsewhere. 618 * 619 * No requirements. 620 */ 621 void 622 vm_map_init(struct vm_map *map, vm_offset_t min_addr, vm_offset_t max_addr, 623 pmap_t pmap) 624 { 625 RB_INIT(&map->rb_root); 626 spin_init(&map->ilock_spin, "ilock"); 627 map->ilock_base = NULL; 628 map->nentries = 0; 629 map->size = 0; 630 map->system_map = 0; 631 vm_map_min(map) = min_addr; 632 vm_map_max(map) = max_addr; 633 map->pmap = pmap; 634 map->timestamp = 0; 635 map->flags = 0; 636 bzero(&map->freehint, sizeof(map->freehint)); 637 lwkt_token_init(&map->token, "vm_map"); 638 lockinit(&map->lock, "vm_maplk", (hz + 9) / 10, 0); 639 } 640 641 /* 642 * Find the first possible free address for the specified request length. 643 * Returns 0 if we don't have one cached. 644 */ 645 static 646 vm_offset_t 647 vm_map_freehint_find(vm_map_t map, vm_size_t length, vm_size_t align) 648 { 649 vm_map_freehint_t *scan; 650 651 scan = &map->freehint[0]; 652 while (scan < &map->freehint[VM_MAP_FFCOUNT]) { 653 if (scan->length == length && scan->align == align) 654 return(scan->start); 655 ++scan; 656 } 657 return 0; 658 } 659 660 /* 661 * Unconditionally set the freehint. Called by vm_map_findspace() after 662 * it finds an address. This will help us iterate optimally on the next 663 * similar findspace. 664 */ 665 static 666 void 667 vm_map_freehint_update(vm_map_t map, vm_offset_t start, 668 vm_size_t length, vm_size_t align) 669 { 670 vm_map_freehint_t *scan; 671 672 scan = &map->freehint[0]; 673 while (scan < &map->freehint[VM_MAP_FFCOUNT]) { 674 if (scan->length == length && scan->align == align) { 675 scan->start = start; 676 return; 677 } 678 ++scan; 679 } 680 scan = &map->freehint[map->freehint_newindex & VM_MAP_FFMASK]; 681 scan->start = start; 682 scan->align = align; 683 scan->length = length; 684 ++map->freehint_newindex; 685 } 686 687 /* 688 * Update any existing freehints (for any alignment), for the hole we just 689 * added. 690 */ 691 static 692 void 693 vm_map_freehint_hole(vm_map_t map, vm_offset_t start, vm_size_t length) 694 { 695 vm_map_freehint_t *scan; 696 697 scan = &map->freehint[0]; 698 while (scan < &map->freehint[VM_MAP_FFCOUNT]) { 699 if (scan->length <= length && scan->start > start) 700 scan->start = start; 701 ++scan; 702 } 703 } 704 705 /* 706 * This function handles MAP_ENTRY_NEEDS_COPY by inserting a fronting 707 * object in the entry for COW faults. 708 * 709 * The entire chain including entry->ba (prior to inserting the fronting 710 * object) essentially becomes set in stone... elements of it can be paged 711 * in or out, but cannot be further modified. 712 * 713 * NOTE: If we do not optimize the backing chain then a unique copy is not 714 * needed. Note, however, that because portions of the chain are 715 * shared across pmaps we cannot make any changes to the vm_map_backing 716 * elements themselves. 717 * 718 * If the map segment is governed by a virtual page table then it is 719 * possible to address offsets beyond the mapped area. Just allocate 720 * a maximally sized object for this case. 721 * 722 * If addref is non-zero an additional reference is added to the returned 723 * entry. This mechanic exists because the additional reference might have 724 * to be added atomically and not after return to prevent a premature 725 * collapse. XXX currently there is no collapse code. 726 * 727 * The vm_map must be exclusively locked. 728 * No other requirements. 729 */ 730 static 731 void 732 vm_map_entry_shadow(vm_map_entry_t entry) 733 { 734 vm_map_backing_t ba; 735 vm_size_t length; 736 vm_object_t source; 737 vm_object_t result; 738 739 if (entry->maptype == VM_MAPTYPE_VPAGETABLE) 740 length = 0x7FFFFFFF; 741 else 742 length = atop(entry->ba.end - entry->ba.start); 743 744 /* 745 * Don't create the new object if the old object isn't shared. 746 * This case occurs quite often when programs fork/exec/wait. 747 * 748 * Caller ensures source exists (all backing_ba's must have objects), 749 * typically indirectly by virtue of the NEEDS_COPY flag being set. 750 * We have a ref on source by virtue of the entry and do not need 751 * to lock it to do this test. 752 */ 753 source = entry->ba.object; 754 KKASSERT(source); 755 756 if (source->type != OBJT_VNODE) { 757 if (source->ref_count == 1 && 758 source->handle == NULL && 759 (source->type == OBJT_DEFAULT || 760 source->type == OBJT_SWAP)) { 761 goto done; 762 } 763 } 764 ba = kmalloc(sizeof(*ba), M_MAP_BACKING, M_INTWAIT); /* copied later */ 765 vm_object_hold_shared(source); 766 767 /* 768 * Once it becomes part of a backing_ba chain it can wind up anywhere, 769 * drop the ONEMAPPING flag now. 770 */ 771 vm_object_clear_flag(source, OBJ_ONEMAPPING); 772 773 /* 774 * Allocate a new object with the given length. The new object 775 * is returned referenced but we may have to add another one. 776 * If we are adding a second reference we must clear OBJ_ONEMAPPING. 777 * (typically because the caller is about to clone a vm_map_entry). 778 * 779 * The source object currently has an extra reference to prevent 780 * collapses into it while we mess with its shadow list, which 781 * we will remove later in this routine. 782 * 783 * The target object may require a second reference if asked for one 784 * by the caller. 785 */ 786 result = vm_object_allocate_hold(OBJT_DEFAULT, length); 787 if (result == NULL) 788 panic("vm_object_shadow: no object for shadowing"); 789 790 /* 791 * The new object shadows the source object. 792 * 793 * Try to optimize the result object's page color when shadowing 794 * in order to maintain page coloring consistency in the combined 795 * shadowed object. 796 * 797 * The source object is moved to ba, retaining its existing ref-count. 798 * No additional ref is needed. 799 * 800 * SHADOWING IS NOT APPLICABLE TO OBJT_VNODE OBJECTS 801 */ 802 vm_map_backing_detach(entry, &entry->ba); 803 *ba = entry->ba; /* previous ba */ 804 entry->ba.object = result; /* new ba (at head of entry) */ 805 entry->ba.backing_ba = ba; 806 entry->ba.backing_count = ba->backing_count + 1; 807 entry->ba.offset = 0; 808 809 /* cpu localization twist */ 810 result->pg_color = vm_quickcolor(); 811 812 vm_map_backing_attach(entry, &entry->ba); 813 vm_map_backing_attach(entry, ba); 814 815 /* 816 * Adjust the return storage. Drop the ref on source before 817 * returning. 818 */ 819 vm_object_drop(result); 820 vm_object_drop(source); 821 done: 822 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 823 } 824 825 /* 826 * Allocate an object for a vm_map_entry. 827 * 828 * Object allocation for anonymous mappings is defered as long as possible. 829 * This function is called when we can defer no longer, generally when a map 830 * entry might be split or forked or takes a page fault. 831 * 832 * If the map segment is governed by a virtual page table then it is 833 * possible to address offsets beyond the mapped area. Just allocate 834 * a maximally sized object for this case. 835 * 836 * The vm_map must be exclusively locked. 837 * No other requirements. 838 */ 839 void 840 vm_map_entry_allocate_object(vm_map_entry_t entry) 841 { 842 vm_object_t obj; 843 844 /* 845 * ba.offset is NOT cumulatively added in the backing_ba scan like 846 * it was in the old object chain, so we can assign whatever offset 847 * we like to the new object. 848 * 849 * For now assign a value of 0 to make debugging object sizes 850 * easier. 851 */ 852 entry->ba.offset = 0; 853 854 if (entry->maptype == VM_MAPTYPE_VPAGETABLE) { 855 /* XXX */ 856 obj = vm_object_allocate(OBJT_DEFAULT, 0x7FFFFFFF); 857 } else { 858 obj = vm_object_allocate(OBJT_DEFAULT, 859 atop(entry->ba.end - entry->ba.start) + 860 entry->ba.offset); 861 } 862 entry->ba.object = obj; 863 vm_map_backing_attach(entry, &entry->ba); 864 } 865 866 /* 867 * Set an initial negative count so the first attempt to reserve 868 * space preloads a bunch of vm_map_entry's for this cpu. Also 869 * pre-allocate 2 vm_map_entries which will be needed by zalloc() to 870 * map a new page for vm_map_entry structures. SMP systems are 871 * particularly sensitive. 872 * 873 * This routine is called in early boot so we cannot just call 874 * vm_map_entry_reserve(). 875 * 876 * Called from the low level boot code only (for each cpu) 877 * 878 * WARNING! Take care not to have too-big a static/BSS structure here 879 * as MAXCPU can be 256+, otherwise the loader's 64MB heap 880 * can get blown out by the kernel plus the initrd image. 881 */ 882 void 883 vm_map_entry_reserve_cpu_init(globaldata_t gd) 884 { 885 vm_map_entry_t entry; 886 int count; 887 int i; 888 889 atomic_add_int(&gd->gd_vme_avail, -MAP_RESERVE_COUNT * 2); 890 if (gd->gd_cpuid == 0) { 891 entry = &cpu_map_entry_init_bsp[0]; 892 count = MAPENTRYBSP_CACHE; 893 } else { 894 entry = &cpu_map_entry_init_ap[gd->gd_cpuid][0]; 895 count = MAPENTRYAP_CACHE; 896 } 897 for (i = 0; i < count; ++i, ++entry) { 898 MAPENT_FREELIST(entry) = gd->gd_vme_base; 899 gd->gd_vme_base = entry; 900 } 901 } 902 903 /* 904 * Reserves vm_map_entry structures so code later-on can manipulate 905 * map_entry structures within a locked map without blocking trying 906 * to allocate a new vm_map_entry. 907 * 908 * No requirements. 909 * 910 * WARNING! We must not decrement gd_vme_avail until after we have 911 * ensured that sufficient entries exist, otherwise we can 912 * get into an endless call recursion in the zalloc code 913 * itself. 914 */ 915 int 916 vm_map_entry_reserve(int count) 917 { 918 struct globaldata *gd = mycpu; 919 vm_map_entry_t entry; 920 921 /* 922 * Make sure we have enough structures in gd_vme_base to handle 923 * the reservation request. 924 * 925 * Use a critical section to protect against VM faults. It might 926 * not be needed, but we have to be careful here. 927 */ 928 if (gd->gd_vme_avail < count) { 929 crit_enter(); 930 while (gd->gd_vme_avail < count) { 931 entry = zalloc(mapentzone); 932 MAPENT_FREELIST(entry) = gd->gd_vme_base; 933 gd->gd_vme_base = entry; 934 atomic_add_int(&gd->gd_vme_avail, 1); 935 } 936 crit_exit(); 937 } 938 atomic_add_int(&gd->gd_vme_avail, -count); 939 940 return(count); 941 } 942 943 /* 944 * Releases previously reserved vm_map_entry structures that were not 945 * used. If we have too much junk in our per-cpu cache clean some of 946 * it out. 947 * 948 * No requirements. 949 */ 950 void 951 vm_map_entry_release(int count) 952 { 953 struct globaldata *gd = mycpu; 954 vm_map_entry_t entry; 955 vm_map_entry_t efree; 956 957 count = atomic_fetchadd_int(&gd->gd_vme_avail, count) + count; 958 if (gd->gd_vme_avail > MAP_RESERVE_SLOP) { 959 efree = NULL; 960 crit_enter(); 961 while (gd->gd_vme_avail > MAP_RESERVE_HYST) { 962 entry = gd->gd_vme_base; 963 KKASSERT(entry != NULL); 964 gd->gd_vme_base = MAPENT_FREELIST(entry); 965 atomic_add_int(&gd->gd_vme_avail, -1); 966 MAPENT_FREELIST(entry) = efree; 967 efree = entry; 968 } 969 crit_exit(); 970 while ((entry = efree) != NULL) { 971 efree = MAPENT_FREELIST(efree); 972 zfree(mapentzone, entry); 973 } 974 } 975 } 976 977 /* 978 * Reserve map entry structures for use in kernel_map itself. These 979 * entries have *ALREADY* been reserved on a per-cpu basis when the map 980 * was inited. This function is used by zalloc() to avoid a recursion 981 * when zalloc() itself needs to allocate additional kernel memory. 982 * 983 * This function works like the normal reserve but does not load the 984 * vm_map_entry cache (because that would result in an infinite 985 * recursion). Note that gd_vme_avail may go negative. This is expected. 986 * 987 * Any caller of this function must be sure to renormalize after 988 * potentially eating entries to ensure that the reserve supply 989 * remains intact. 990 * 991 * No requirements. 992 */ 993 int 994 vm_map_entry_kreserve(int count) 995 { 996 struct globaldata *gd = mycpu; 997 998 atomic_add_int(&gd->gd_vme_avail, -count); 999 KASSERT(gd->gd_vme_base != NULL, 1000 ("no reserved entries left, gd_vme_avail = %d", 1001 gd->gd_vme_avail)); 1002 return(count); 1003 } 1004 1005 /* 1006 * Release previously reserved map entries for kernel_map. We do not 1007 * attempt to clean up like the normal release function as this would 1008 * cause an unnecessary (but probably not fatal) deep procedure call. 1009 * 1010 * No requirements. 1011 */ 1012 void 1013 vm_map_entry_krelease(int count) 1014 { 1015 struct globaldata *gd = mycpu; 1016 1017 atomic_add_int(&gd->gd_vme_avail, count); 1018 } 1019 1020 /* 1021 * Allocates a VM map entry for insertion. No entry fields are filled in. 1022 * 1023 * The entries should have previously been reserved. The reservation count 1024 * is tracked in (*countp). 1025 * 1026 * No requirements. 1027 */ 1028 static vm_map_entry_t 1029 vm_map_entry_create(int *countp) 1030 { 1031 struct globaldata *gd = mycpu; 1032 vm_map_entry_t entry; 1033 1034 KKASSERT(*countp > 0); 1035 --*countp; 1036 crit_enter(); 1037 entry = gd->gd_vme_base; 1038 KASSERT(entry != NULL, ("gd_vme_base NULL! count %d", *countp)); 1039 gd->gd_vme_base = MAPENT_FREELIST(entry); 1040 crit_exit(); 1041 1042 return(entry); 1043 } 1044 1045 /* 1046 * Attach and detach backing store elements 1047 */ 1048 static void 1049 vm_map_backing_attach(vm_map_entry_t entry, vm_map_backing_t ba) 1050 { 1051 vm_object_t obj; 1052 1053 switch(entry->maptype) { 1054 case VM_MAPTYPE_VPAGETABLE: 1055 case VM_MAPTYPE_NORMAL: 1056 obj = ba->object; 1057 lockmgr(&obj->backing_lk, LK_EXCLUSIVE); 1058 TAILQ_INSERT_TAIL(&obj->backing_list, ba, entry); 1059 lockmgr(&obj->backing_lk, LK_RELEASE); 1060 break; 1061 case VM_MAPTYPE_UKSMAP: 1062 ba->uksmap(ba, UKSMAPOP_ADD, entry->aux.dev, NULL); 1063 break; 1064 } 1065 } 1066 1067 static void 1068 vm_map_backing_detach(vm_map_entry_t entry, vm_map_backing_t ba) 1069 { 1070 vm_object_t obj; 1071 1072 switch(entry->maptype) { 1073 case VM_MAPTYPE_VPAGETABLE: 1074 case VM_MAPTYPE_NORMAL: 1075 obj = ba->object; 1076 lockmgr(&obj->backing_lk, LK_EXCLUSIVE); 1077 TAILQ_REMOVE(&obj->backing_list, ba, entry); 1078 lockmgr(&obj->backing_lk, LK_RELEASE); 1079 break; 1080 case VM_MAPTYPE_UKSMAP: 1081 ba->uksmap(ba, UKSMAPOP_REM, entry->aux.dev, NULL); 1082 break; 1083 } 1084 } 1085 1086 /* 1087 * Dispose of the dynamically allocated backing_ba chain associated 1088 * with a vm_map_entry. 1089 * 1090 * We decrement the (possibly shared) element and kfree() on the 1091 * 1->0 transition. We only iterate to the next backing_ba when 1092 * the previous one went through a 1->0 transition. 1093 * 1094 * These can only be normal vm_object based backings. 1095 */ 1096 static void 1097 vm_map_entry_dispose_ba(vm_map_entry_t entry, vm_map_backing_t ba) 1098 { 1099 vm_map_backing_t next; 1100 1101 while (ba) { 1102 if (ba->map_object) { 1103 vm_map_backing_detach(entry, ba); 1104 vm_object_deallocate(ba->object); 1105 } 1106 next = ba->backing_ba; 1107 kfree(ba, M_MAP_BACKING); 1108 ba = next; 1109 } 1110 } 1111 1112 /* 1113 * Dispose of a vm_map_entry that is no longer being referenced. 1114 * 1115 * No requirements. 1116 */ 1117 static void 1118 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry, int *countp) 1119 { 1120 struct globaldata *gd = mycpu; 1121 1122 /* 1123 * Dispose of the base object and the backing link. 1124 */ 1125 switch(entry->maptype) { 1126 case VM_MAPTYPE_NORMAL: 1127 case VM_MAPTYPE_VPAGETABLE: 1128 if (entry->ba.map_object) { 1129 vm_map_backing_detach(entry, &entry->ba); 1130 vm_object_deallocate(entry->ba.object); 1131 } 1132 break; 1133 case VM_MAPTYPE_SUBMAP: 1134 break; 1135 case VM_MAPTYPE_UKSMAP: 1136 vm_map_backing_detach(entry, &entry->ba); 1137 break; 1138 default: 1139 break; 1140 } 1141 vm_map_entry_dispose_ba(entry, entry->ba.backing_ba); 1142 1143 /* 1144 * Cleanup for safety. 1145 */ 1146 entry->ba.backing_ba = NULL; 1147 entry->ba.object = NULL; 1148 entry->ba.offset = 0; 1149 1150 ++*countp; 1151 crit_enter(); 1152 MAPENT_FREELIST(entry) = gd->gd_vme_base; 1153 gd->gd_vme_base = entry; 1154 crit_exit(); 1155 } 1156 1157 1158 /* 1159 * Insert/remove entries from maps. 1160 * 1161 * The related map must be exclusively locked. 1162 * The caller must hold map->token 1163 * No other requirements. 1164 */ 1165 static __inline void 1166 vm_map_entry_link(vm_map_t map, vm_map_entry_t entry) 1167 { 1168 ASSERT_VM_MAP_LOCKED(map); 1169 1170 map->nentries++; 1171 if (vm_map_rb_tree_RB_INSERT(&map->rb_root, entry)) 1172 panic("vm_map_entry_link: dup addr map %p ent %p", map, entry); 1173 } 1174 1175 static __inline void 1176 vm_map_entry_unlink(vm_map_t map, 1177 vm_map_entry_t entry) 1178 { 1179 ASSERT_VM_MAP_LOCKED(map); 1180 1181 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 1182 panic("vm_map_entry_unlink: attempt to mess with " 1183 "locked entry! %p", entry); 1184 } 1185 vm_map_rb_tree_RB_REMOVE(&map->rb_root, entry); 1186 map->nentries--; 1187 } 1188 1189 /* 1190 * Finds the map entry containing (or immediately preceding) the specified 1191 * address in the given map. The entry is returned in (*entry). 1192 * 1193 * The boolean result indicates whether the address is actually contained 1194 * in the map. 1195 * 1196 * The related map must be locked. 1197 * No other requirements. 1198 */ 1199 boolean_t 1200 vm_map_lookup_entry(vm_map_t map, vm_offset_t address, vm_map_entry_t *entry) 1201 { 1202 vm_map_entry_t tmp; 1203 vm_map_entry_t last; 1204 1205 ASSERT_VM_MAP_LOCKED(map); 1206 1207 /* 1208 * Locate the record from the top of the tree. 'last' tracks the 1209 * closest prior record and is returned if no match is found, which 1210 * in binary tree terms means tracking the most recent right-branch 1211 * taken. If there is no prior record, *entry is set to NULL. 1212 */ 1213 last = NULL; 1214 tmp = RB_ROOT(&map->rb_root); 1215 1216 while (tmp) { 1217 if (address >= tmp->ba.start) { 1218 if (address < tmp->ba.end) { 1219 *entry = tmp; 1220 return(TRUE); 1221 } 1222 last = tmp; 1223 tmp = RB_RIGHT(tmp, rb_entry); 1224 } else { 1225 tmp = RB_LEFT(tmp, rb_entry); 1226 } 1227 } 1228 *entry = last; 1229 return (FALSE); 1230 } 1231 1232 /* 1233 * Inserts the given whole VM object into the target map at the specified 1234 * address range. The object's size should match that of the address range. 1235 * 1236 * The map must be exclusively locked. 1237 * The object must be held. 1238 * The caller must have reserved sufficient vm_map_entry structures. 1239 * 1240 * If object is non-NULL, ref count must be bumped by caller prior to 1241 * making call to account for the new entry. XXX API is a bit messy. 1242 */ 1243 int 1244 vm_map_insert(vm_map_t map, int *countp, 1245 void *map_object, void *map_aux, 1246 vm_ooffset_t offset, void *aux_info, 1247 vm_offset_t start, vm_offset_t end, 1248 vm_maptype_t maptype, vm_subsys_t id, 1249 vm_prot_t prot, vm_prot_t max, int cow) 1250 { 1251 vm_map_entry_t new_entry; 1252 vm_map_entry_t prev_entry; 1253 vm_map_entry_t next; 1254 vm_map_entry_t temp_entry; 1255 vm_eflags_t protoeflags; 1256 vm_object_t object; 1257 int must_drop = 0; 1258 1259 if (maptype == VM_MAPTYPE_UKSMAP) 1260 object = NULL; 1261 else 1262 object = map_object; 1263 1264 ASSERT_VM_MAP_LOCKED(map); 1265 if (object) 1266 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 1267 1268 /* 1269 * Check that the start and end points are not bogus. 1270 */ 1271 if ((start < vm_map_min(map)) || (end > vm_map_max(map)) || 1272 (start >= end)) { 1273 return (KERN_INVALID_ADDRESS); 1274 } 1275 1276 /* 1277 * Find the entry prior to the proposed starting address; if it's part 1278 * of an existing entry, this range is bogus. 1279 */ 1280 if (vm_map_lookup_entry(map, start, &temp_entry)) 1281 return (KERN_NO_SPACE); 1282 prev_entry = temp_entry; 1283 1284 /* 1285 * Assert that the next entry doesn't overlap the end point. 1286 */ 1287 if (prev_entry) 1288 next = vm_map_rb_tree_RB_NEXT(prev_entry); 1289 else 1290 next = RB_MIN(vm_map_rb_tree, &map->rb_root); 1291 if (next && next->ba.start < end) 1292 return (KERN_NO_SPACE); 1293 1294 protoeflags = 0; 1295 1296 if (cow & MAP_COPY_ON_WRITE) 1297 protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY; 1298 1299 if (cow & MAP_NOFAULT) { 1300 protoeflags |= MAP_ENTRY_NOFAULT; 1301 1302 KASSERT(object == NULL, 1303 ("vm_map_insert: paradoxical MAP_NOFAULT request")); 1304 } 1305 if (cow & MAP_DISABLE_SYNCER) 1306 protoeflags |= MAP_ENTRY_NOSYNC; 1307 if (cow & MAP_DISABLE_COREDUMP) 1308 protoeflags |= MAP_ENTRY_NOCOREDUMP; 1309 if (cow & MAP_IS_STACK) 1310 protoeflags |= MAP_ENTRY_STACK; 1311 if (cow & MAP_IS_KSTACK) 1312 protoeflags |= MAP_ENTRY_KSTACK; 1313 1314 lwkt_gettoken(&map->token); 1315 1316 if (object) { 1317 ; 1318 } else if (prev_entry && 1319 (prev_entry->eflags == protoeflags) && 1320 (prev_entry->ba.end == start) && 1321 (prev_entry->wired_count == 0) && 1322 (prev_entry->id == id) && 1323 prev_entry->maptype == maptype && 1324 maptype == VM_MAPTYPE_NORMAL && 1325 prev_entry->ba.backing_ba == NULL && /* not backed */ 1326 ((prev_entry->ba.object == NULL) || 1327 vm_object_coalesce(prev_entry->ba.object, 1328 OFF_TO_IDX(prev_entry->ba.offset), 1329 (vm_size_t)(prev_entry->ba.end - prev_entry->ba.start), 1330 (vm_size_t)(end - prev_entry->ba.end)))) { 1331 /* 1332 * We were able to extend the object. Determine if we 1333 * can extend the previous map entry to include the 1334 * new range as well. 1335 */ 1336 if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) && 1337 (prev_entry->protection == prot) && 1338 (prev_entry->max_protection == max)) { 1339 map->size += (end - prev_entry->ba.end); 1340 vm_map_backing_adjust_end(prev_entry, end); 1341 vm_map_simplify_entry(map, prev_entry, countp); 1342 lwkt_reltoken(&map->token); 1343 return (KERN_SUCCESS); 1344 } 1345 1346 /* 1347 * If we can extend the object but cannot extend the 1348 * map entry, we have to create a new map entry. We 1349 * must bump the ref count on the extended object to 1350 * account for it. object may be NULL. 1351 */ 1352 object = prev_entry->ba.object; 1353 offset = prev_entry->ba.offset + 1354 (prev_entry->ba.end - prev_entry->ba.start); 1355 if (object) { 1356 vm_object_hold(object); 1357 vm_object_lock_swap(); /* map->token order */ 1358 vm_object_reference_locked(object); 1359 map_object = object; 1360 must_drop = 1; 1361 } 1362 } 1363 1364 /* 1365 * NOTE: if conditionals fail, object can be NULL here. This occurs 1366 * in things like the buffer map where we manage kva but do not manage 1367 * backing objects. 1368 */ 1369 1370 /* 1371 * Create a new entry 1372 */ 1373 new_entry = vm_map_entry_create(countp); 1374 new_entry->ba.pmap = map->pmap; 1375 new_entry->ba.start = start; 1376 new_entry->ba.end = end; 1377 new_entry->id = id; 1378 1379 new_entry->maptype = maptype; 1380 new_entry->eflags = protoeflags; 1381 new_entry->aux.master_pde = 0; /* in case size is different */ 1382 new_entry->aux.map_aux = map_aux; 1383 new_entry->ba.map_object = map_object; 1384 new_entry->ba.backing_ba = NULL; 1385 new_entry->ba.backing_count = 0; 1386 new_entry->ba.offset = offset; 1387 new_entry->ba.aux_info = aux_info; 1388 new_entry->ba.flags = 0; 1389 new_entry->ba.pmap = map->pmap; 1390 1391 new_entry->inheritance = VM_INHERIT_DEFAULT; 1392 new_entry->protection = prot; 1393 new_entry->max_protection = max; 1394 new_entry->wired_count = 0; 1395 1396 /* 1397 * Insert the new entry into the list 1398 */ 1399 vm_map_backing_replicated(map, new_entry, MAP_BACK_BASEOBJREFD); 1400 vm_map_entry_link(map, new_entry); 1401 map->size += new_entry->ba.end - new_entry->ba.start; 1402 1403 /* 1404 * Don't worry about updating freehint[] when inserting, allow 1405 * addresses to be lower than the actual first free spot. 1406 */ 1407 #if 0 1408 /* 1409 * Temporarily removed to avoid MAP_STACK panic, due to 1410 * MAP_STACK being a huge hack. Will be added back in 1411 * when MAP_STACK (and the user stack mapping) is fixed. 1412 */ 1413 /* 1414 * It may be possible to simplify the entry 1415 */ 1416 vm_map_simplify_entry(map, new_entry, countp); 1417 #endif 1418 1419 /* 1420 * Try to pre-populate the page table. Mappings governed by virtual 1421 * page tables cannot be prepopulated without a lot of work, so 1422 * don't try. 1423 */ 1424 if ((cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) && 1425 maptype != VM_MAPTYPE_VPAGETABLE && 1426 maptype != VM_MAPTYPE_UKSMAP) { 1427 int dorelock = 0; 1428 if (vm_map_relock_enable && (cow & MAP_PREFAULT_RELOCK)) { 1429 dorelock = 1; 1430 vm_object_lock_swap(); 1431 vm_object_drop(object); 1432 } 1433 pmap_object_init_pt(map->pmap, new_entry, 1434 new_entry->ba.start, 1435 new_entry->ba.end - new_entry->ba.start, 1436 cow & MAP_PREFAULT_PARTIAL); 1437 if (dorelock) { 1438 vm_object_hold(object); 1439 vm_object_lock_swap(); 1440 } 1441 } 1442 lwkt_reltoken(&map->token); 1443 if (must_drop) 1444 vm_object_drop(object); 1445 1446 return (KERN_SUCCESS); 1447 } 1448 1449 /* 1450 * Find sufficient space for `length' bytes in the given map, starting at 1451 * `start'. Returns 0 on success, 1 on no space. 1452 * 1453 * This function will returned an arbitrarily aligned pointer. If no 1454 * particular alignment is required you should pass align as 1. Note that 1455 * the map may return PAGE_SIZE aligned pointers if all the lengths used in 1456 * the map are a multiple of PAGE_SIZE, even if you pass a smaller align 1457 * argument. 1458 * 1459 * 'align' should be a power of 2 but is not required to be. 1460 * 1461 * The map must be exclusively locked. 1462 * No other requirements. 1463 */ 1464 int 1465 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length, 1466 vm_size_t align, int flags, vm_offset_t *addr) 1467 { 1468 vm_map_entry_t entry; 1469 vm_map_entry_t tmp; 1470 vm_offset_t hole_start; 1471 vm_offset_t end; 1472 vm_offset_t align_mask; 1473 1474 if (start < vm_map_min(map)) 1475 start = vm_map_min(map); 1476 if (start > vm_map_max(map)) 1477 return (1); 1478 1479 /* 1480 * If the alignment is not a power of 2 we will have to use 1481 * a mod/division, set align_mask to a special value. 1482 */ 1483 if ((align | (align - 1)) + 1 != (align << 1)) 1484 align_mask = (vm_offset_t)-1; 1485 else 1486 align_mask = align - 1; 1487 1488 /* 1489 * Use freehint to adjust the start point, hopefully reducing 1490 * the iteration to O(1). 1491 */ 1492 hole_start = vm_map_freehint_find(map, length, align); 1493 if (start < hole_start) 1494 start = hole_start; 1495 if (vm_map_lookup_entry(map, start, &tmp)) 1496 start = tmp->ba.end; 1497 entry = tmp; /* may be NULL */ 1498 1499 /* 1500 * Look through the rest of the map, trying to fit a new region in the 1501 * gap between existing regions, or after the very last region. 1502 */ 1503 for (;;) { 1504 /* 1505 * Adjust the proposed start by the requested alignment, 1506 * be sure that we didn't wrap the address. 1507 */ 1508 if (align_mask == (vm_offset_t)-1) 1509 end = roundup(start, align); 1510 else 1511 end = (start + align_mask) & ~align_mask; 1512 if (end < start) 1513 return (1); 1514 start = end; 1515 1516 /* 1517 * Find the end of the proposed new region. Be sure we didn't 1518 * go beyond the end of the map, or wrap around the address. 1519 * Then check to see if this is the last entry or if the 1520 * proposed end fits in the gap between this and the next 1521 * entry. 1522 */ 1523 end = start + length; 1524 if (end > vm_map_max(map) || end < start) 1525 return (1); 1526 1527 /* 1528 * Locate the next entry, we can stop if this is the 1529 * last entry (we know we are in-bounds so that would 1530 * be a sucess). 1531 */ 1532 if (entry) 1533 entry = vm_map_rb_tree_RB_NEXT(entry); 1534 else 1535 entry = RB_MIN(vm_map_rb_tree, &map->rb_root); 1536 if (entry == NULL) 1537 break; 1538 1539 /* 1540 * Determine if the proposed area would overlap the 1541 * next entry. 1542 * 1543 * When matching against a STACK entry, only allow the 1544 * memory map to intrude on the ungrown portion of the 1545 * STACK entry when MAP_TRYFIXED is set. 1546 */ 1547 if (entry->ba.start >= end) { 1548 if ((entry->eflags & MAP_ENTRY_STACK) == 0) 1549 break; 1550 if (flags & MAP_TRYFIXED) 1551 break; 1552 if (entry->ba.start - entry->aux.avail_ssize >= end) 1553 break; 1554 } 1555 start = entry->ba.end; 1556 } 1557 1558 /* 1559 * Update the freehint 1560 */ 1561 vm_map_freehint_update(map, start, length, align); 1562 1563 /* 1564 * Grow the kernel_map if necessary. pmap_growkernel() will panic 1565 * if it fails. The kernel_map is locked and nothing can steal 1566 * our address space if pmap_growkernel() blocks. 1567 * 1568 * NOTE: This may be unconditionally called for kldload areas on 1569 * x86_64 because these do not bump kernel_vm_end (which would 1570 * fill 128G worth of page tables!). Therefore we must not 1571 * retry. 1572 */ 1573 if (map == &kernel_map) { 1574 vm_offset_t kstop; 1575 1576 kstop = round_page(start + length); 1577 if (kstop > kernel_vm_end) 1578 pmap_growkernel(start, kstop); 1579 } 1580 *addr = start; 1581 return (0); 1582 } 1583 1584 /* 1585 * vm_map_find finds an unallocated region in the target address map with 1586 * the given length and allocates it. The search is defined to be first-fit 1587 * from the specified address; the region found is returned in the same 1588 * parameter. 1589 * 1590 * If object is non-NULL, ref count must be bumped by caller 1591 * prior to making call to account for the new entry. 1592 * 1593 * No requirements. This function will lock the map temporarily. 1594 */ 1595 int 1596 vm_map_find(vm_map_t map, void *map_object, void *map_aux, 1597 vm_ooffset_t offset, vm_offset_t *addr, 1598 vm_size_t length, vm_size_t align, boolean_t fitit, 1599 vm_maptype_t maptype, vm_subsys_t id, 1600 vm_prot_t prot, vm_prot_t max, int cow) 1601 { 1602 vm_offset_t start; 1603 vm_object_t object; 1604 void *aux_info; 1605 int result; 1606 int count; 1607 1608 /* 1609 * UKSMAPs set aux_info to the tid of the calling thread. This is 1610 * only used by /dev/lpmap (per-thread user/kernel shared page). 1611 */ 1612 aux_info = NULL; 1613 if (maptype == VM_MAPTYPE_UKSMAP) { 1614 object = NULL; 1615 if (curthread->td_lwp) 1616 aux_info = (void *)(intptr_t)curthread->td_lwp->lwp_tid; 1617 } else { 1618 object = map_object; 1619 } 1620 1621 start = *addr; 1622 1623 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 1624 vm_map_lock(map); 1625 if (object) 1626 vm_object_hold_shared(object); 1627 if (fitit) { 1628 if (vm_map_findspace(map, start, length, align, 0, addr)) { 1629 if (object) 1630 vm_object_drop(object); 1631 vm_map_unlock(map); 1632 vm_map_entry_release(count); 1633 return (KERN_NO_SPACE); 1634 } 1635 start = *addr; 1636 } 1637 result = vm_map_insert(map, &count, 1638 map_object, map_aux, 1639 offset, aux_info, 1640 start, start + length, 1641 maptype, id, prot, max, cow); 1642 if (object) 1643 vm_object_drop(object); 1644 vm_map_unlock(map); 1645 vm_map_entry_release(count); 1646 1647 return (result); 1648 } 1649 1650 /* 1651 * Simplify the given map entry by merging with either neighbor. This 1652 * routine also has the ability to merge with both neighbors. 1653 * 1654 * This routine guarentees that the passed entry remains valid (though 1655 * possibly extended). When merging, this routine may delete one or 1656 * both neighbors. No action is taken on entries which have their 1657 * in-transition flag set. 1658 * 1659 * The map must be exclusively locked. 1660 */ 1661 void 1662 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry, int *countp) 1663 { 1664 vm_map_entry_t next, prev; 1665 vm_size_t prevsize, esize; 1666 1667 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 1668 ++mycpu->gd_cnt.v_intrans_coll; 1669 return; 1670 } 1671 1672 if (entry->maptype == VM_MAPTYPE_SUBMAP) 1673 return; 1674 if (entry->maptype == VM_MAPTYPE_UKSMAP) 1675 return; 1676 1677 prev = vm_map_rb_tree_RB_PREV(entry); 1678 if (prev) { 1679 prevsize = prev->ba.end - prev->ba.start; 1680 if ( (prev->ba.end == entry->ba.start) && 1681 (prev->maptype == entry->maptype) && 1682 (prev->ba.object == entry->ba.object) && 1683 (prev->ba.backing_ba == entry->ba.backing_ba) && 1684 (!prev->ba.object || 1685 (prev->ba.offset + prevsize == entry->ba.offset)) && 1686 (prev->eflags == entry->eflags) && 1687 (prev->protection == entry->protection) && 1688 (prev->max_protection == entry->max_protection) && 1689 (prev->inheritance == entry->inheritance) && 1690 (prev->id == entry->id) && 1691 (prev->wired_count == entry->wired_count)) { 1692 /* 1693 * NOTE: order important. Unlink before gumming up 1694 * the RBTREE w/adjust, adjust before disposal 1695 * of prior entry, to avoid pmap snafus. 1696 */ 1697 vm_map_entry_unlink(map, prev); 1698 vm_map_backing_adjust_start(entry, prev->ba.start); 1699 if (entry->ba.object == NULL) 1700 entry->ba.offset = 0; 1701 vm_map_entry_dispose(map, prev, countp); 1702 } 1703 } 1704 1705 next = vm_map_rb_tree_RB_NEXT(entry); 1706 if (next) { 1707 esize = entry->ba.end - entry->ba.start; 1708 if ((entry->ba.end == next->ba.start) && 1709 (next->maptype == entry->maptype) && 1710 (next->ba.object == entry->ba.object) && 1711 (prev->ba.backing_ba == entry->ba.backing_ba) && 1712 (!entry->ba.object || 1713 (entry->ba.offset + esize == next->ba.offset)) && 1714 (next->eflags == entry->eflags) && 1715 (next->protection == entry->protection) && 1716 (next->max_protection == entry->max_protection) && 1717 (next->inheritance == entry->inheritance) && 1718 (next->id == entry->id) && 1719 (next->wired_count == entry->wired_count)) { 1720 /* 1721 * NOTE: order important. Unlink before gumming up 1722 * the RBTREE w/adjust, adjust before disposal 1723 * of prior entry, to avoid pmap snafus. 1724 */ 1725 vm_map_entry_unlink(map, next); 1726 vm_map_backing_adjust_end(entry, next->ba.end); 1727 vm_map_entry_dispose(map, next, countp); 1728 } 1729 } 1730 } 1731 1732 /* 1733 * Asserts that the given entry begins at or after the specified address. 1734 * If necessary, it splits the entry into two. 1735 */ 1736 #define vm_map_clip_start(map, entry, startaddr, countp) \ 1737 { \ 1738 if (startaddr > entry->ba.start) \ 1739 _vm_map_clip_start(map, entry, startaddr, countp); \ 1740 } 1741 1742 /* 1743 * This routine is called only when it is known that the entry must be split. 1744 * 1745 * The map must be exclusively locked. 1746 */ 1747 static void 1748 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start, 1749 int *countp) 1750 { 1751 vm_map_entry_t new_entry; 1752 1753 /* 1754 * Split off the front portion -- note that we must insert the new 1755 * entry BEFORE this one, so that this entry has the specified 1756 * starting address. 1757 */ 1758 1759 vm_map_simplify_entry(map, entry, countp); 1760 1761 /* 1762 * If there is no object backing this entry, we might as well create 1763 * one now. If we defer it, an object can get created after the map 1764 * is clipped, and individual objects will be created for the split-up 1765 * map. This is a bit of a hack, but is also about the best place to 1766 * put this improvement. 1767 */ 1768 if (entry->ba.object == NULL && !map->system_map && 1769 VM_MAP_ENTRY_WITHIN_PARTITION(entry)) { 1770 vm_map_entry_allocate_object(entry); 1771 } 1772 1773 /* 1774 * NOTE: The replicated function will adjust start, end, and offset 1775 * for the remainder of the backing_ba linkages. We must fixup 1776 * the embedded ba. 1777 */ 1778 new_entry = vm_map_entry_create(countp); 1779 *new_entry = *entry; 1780 new_entry->ba.end = start; 1781 1782 /* 1783 * Ordering is important, make sure the new entry is replicated 1784 * before we cut the exiting entry. 1785 */ 1786 vm_map_backing_replicated(map, new_entry, MAP_BACK_CLIPPED); 1787 vm_map_backing_adjust_start(entry, start); 1788 vm_map_entry_link(map, new_entry); 1789 } 1790 1791 /* 1792 * Asserts that the given entry ends at or before the specified address. 1793 * If necessary, it splits the entry into two. 1794 * 1795 * The map must be exclusively locked. 1796 */ 1797 #define vm_map_clip_end(map, entry, endaddr, countp) \ 1798 { \ 1799 if (endaddr < entry->ba.end) \ 1800 _vm_map_clip_end(map, entry, endaddr, countp); \ 1801 } 1802 1803 /* 1804 * This routine is called only when it is known that the entry must be split. 1805 * 1806 * The map must be exclusively locked. 1807 */ 1808 static void 1809 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end, 1810 int *countp) 1811 { 1812 vm_map_entry_t new_entry; 1813 1814 /* 1815 * If there is no object backing this entry, we might as well create 1816 * one now. If we defer it, an object can get created after the map 1817 * is clipped, and individual objects will be created for the split-up 1818 * map. This is a bit of a hack, but is also about the best place to 1819 * put this improvement. 1820 */ 1821 1822 if (entry->ba.object == NULL && !map->system_map && 1823 VM_MAP_ENTRY_WITHIN_PARTITION(entry)) { 1824 vm_map_entry_allocate_object(entry); 1825 } 1826 1827 /* 1828 * Create a new entry and insert it AFTER the specified entry 1829 * 1830 * NOTE: The replicated function will adjust start, end, and offset 1831 * for the remainder of the backing_ba linkages. We must fixup 1832 * the embedded ba. 1833 */ 1834 new_entry = vm_map_entry_create(countp); 1835 *new_entry = *entry; 1836 new_entry->ba.start = end; 1837 new_entry->ba.offset += (new_entry->ba.start - entry->ba.start); 1838 1839 /* 1840 * Ordering is important, make sure the new entry is replicated 1841 * before we cut the exiting entry. 1842 */ 1843 vm_map_backing_replicated(map, new_entry, MAP_BACK_CLIPPED); 1844 vm_map_backing_adjust_end(entry, end); 1845 vm_map_entry_link(map, new_entry); 1846 } 1847 1848 /* 1849 * Asserts that the starting and ending region addresses fall within the 1850 * valid range for the map. 1851 */ 1852 #define VM_MAP_RANGE_CHECK(map, start, end) \ 1853 { \ 1854 if (start < vm_map_min(map)) \ 1855 start = vm_map_min(map); \ 1856 if (end > vm_map_max(map)) \ 1857 end = vm_map_max(map); \ 1858 if (start > end) \ 1859 start = end; \ 1860 } 1861 1862 /* 1863 * Used to block when an in-transition collison occurs. The map 1864 * is unlocked for the sleep and relocked before the return. 1865 */ 1866 void 1867 vm_map_transition_wait(vm_map_t map, int relock) 1868 { 1869 tsleep_interlock(map, 0); 1870 vm_map_unlock(map); 1871 tsleep(map, PINTERLOCKED, "vment", 0); 1872 if (relock) 1873 vm_map_lock(map); 1874 } 1875 1876 /* 1877 * When we do blocking operations with the map lock held it is 1878 * possible that a clip might have occured on our in-transit entry, 1879 * requiring an adjustment to the entry in our loop. These macros 1880 * help the pageable and clip_range code deal with the case. The 1881 * conditional costs virtually nothing if no clipping has occured. 1882 */ 1883 1884 #define CLIP_CHECK_BACK(entry, save_start) \ 1885 do { \ 1886 while (entry->ba.start != save_start) { \ 1887 entry = vm_map_rb_tree_RB_PREV(entry); \ 1888 KASSERT(entry, ("bad entry clip")); \ 1889 } \ 1890 } while(0) 1891 1892 #define CLIP_CHECK_FWD(entry, save_end) \ 1893 do { \ 1894 while (entry->ba.end != save_end) { \ 1895 entry = vm_map_rb_tree_RB_NEXT(entry); \ 1896 KASSERT(entry, ("bad entry clip")); \ 1897 } \ 1898 } while(0) 1899 1900 1901 /* 1902 * Clip the specified range and return the base entry. The 1903 * range may cover several entries starting at the returned base 1904 * and the first and last entry in the covering sequence will be 1905 * properly clipped to the requested start and end address. 1906 * 1907 * If no holes are allowed you should pass the MAP_CLIP_NO_HOLES 1908 * flag. 1909 * 1910 * The MAP_ENTRY_IN_TRANSITION flag will be set for the entries 1911 * covered by the requested range. 1912 * 1913 * The map must be exclusively locked on entry and will remain locked 1914 * on return. If no range exists or the range contains holes and you 1915 * specified that no holes were allowed, NULL will be returned. This 1916 * routine may temporarily unlock the map in order avoid a deadlock when 1917 * sleeping. 1918 */ 1919 static 1920 vm_map_entry_t 1921 vm_map_clip_range(vm_map_t map, vm_offset_t start, vm_offset_t end, 1922 int *countp, int flags) 1923 { 1924 vm_map_entry_t start_entry; 1925 vm_map_entry_t entry; 1926 vm_map_entry_t next; 1927 1928 /* 1929 * Locate the entry and effect initial clipping. The in-transition 1930 * case does not occur very often so do not try to optimize it. 1931 */ 1932 again: 1933 if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) 1934 return (NULL); 1935 entry = start_entry; 1936 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 1937 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 1938 ++mycpu->gd_cnt.v_intrans_coll; 1939 ++mycpu->gd_cnt.v_intrans_wait; 1940 vm_map_transition_wait(map, 1); 1941 /* 1942 * entry and/or start_entry may have been clipped while 1943 * we slept, or may have gone away entirely. We have 1944 * to restart from the lookup. 1945 */ 1946 goto again; 1947 } 1948 1949 /* 1950 * Since we hold an exclusive map lock we do not have to restart 1951 * after clipping, even though clipping may block in zalloc. 1952 */ 1953 vm_map_clip_start(map, entry, start, countp); 1954 vm_map_clip_end(map, entry, end, countp); 1955 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 1956 1957 /* 1958 * Scan entries covered by the range. When working on the next 1959 * entry a restart need only re-loop on the current entry which 1960 * we have already locked, since 'next' may have changed. Also, 1961 * even though entry is safe, it may have been clipped so we 1962 * have to iterate forwards through the clip after sleeping. 1963 */ 1964 for (;;) { 1965 next = vm_map_rb_tree_RB_NEXT(entry); 1966 if (next == NULL || next->ba.start >= end) 1967 break; 1968 if (flags & MAP_CLIP_NO_HOLES) { 1969 if (next->ba.start > entry->ba.end) { 1970 vm_map_unclip_range(map, start_entry, 1971 start, entry->ba.end, countp, flags); 1972 return(NULL); 1973 } 1974 } 1975 1976 if (next->eflags & MAP_ENTRY_IN_TRANSITION) { 1977 vm_offset_t save_end = entry->ba.end; 1978 next->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 1979 ++mycpu->gd_cnt.v_intrans_coll; 1980 ++mycpu->gd_cnt.v_intrans_wait; 1981 vm_map_transition_wait(map, 1); 1982 1983 /* 1984 * clips might have occured while we blocked. 1985 */ 1986 CLIP_CHECK_FWD(entry, save_end); 1987 CLIP_CHECK_BACK(start_entry, start); 1988 continue; 1989 } 1990 1991 /* 1992 * No restart necessary even though clip_end may block, we 1993 * are holding the map lock. 1994 */ 1995 vm_map_clip_end(map, next, end, countp); 1996 next->eflags |= MAP_ENTRY_IN_TRANSITION; 1997 entry = next; 1998 } 1999 if (flags & MAP_CLIP_NO_HOLES) { 2000 if (entry->ba.end != end) { 2001 vm_map_unclip_range(map, start_entry, 2002 start, entry->ba.end, countp, flags); 2003 return(NULL); 2004 } 2005 } 2006 return(start_entry); 2007 } 2008 2009 /* 2010 * Undo the effect of vm_map_clip_range(). You should pass the same 2011 * flags and the same range that you passed to vm_map_clip_range(). 2012 * This code will clear the in-transition flag on the entries and 2013 * wake up anyone waiting. This code will also simplify the sequence 2014 * and attempt to merge it with entries before and after the sequence. 2015 * 2016 * The map must be locked on entry and will remain locked on return. 2017 * 2018 * Note that you should also pass the start_entry returned by 2019 * vm_map_clip_range(). However, if you block between the two calls 2020 * with the map unlocked please be aware that the start_entry may 2021 * have been clipped and you may need to scan it backwards to find 2022 * the entry corresponding with the original start address. You are 2023 * responsible for this, vm_map_unclip_range() expects the correct 2024 * start_entry to be passed to it and will KASSERT otherwise. 2025 */ 2026 static 2027 void 2028 vm_map_unclip_range(vm_map_t map, vm_map_entry_t start_entry, 2029 vm_offset_t start, vm_offset_t end, 2030 int *countp, int flags) 2031 { 2032 vm_map_entry_t entry; 2033 2034 entry = start_entry; 2035 2036 KASSERT(entry->ba.start == start, ("unclip_range: illegal base entry")); 2037 while (entry && entry->ba.start < end) { 2038 KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION, 2039 ("in-transition flag not set during unclip on: %p", 2040 entry)); 2041 KASSERT(entry->ba.end <= end, 2042 ("unclip_range: tail wasn't clipped")); 2043 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; 2044 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 2045 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 2046 wakeup(map); 2047 } 2048 entry = vm_map_rb_tree_RB_NEXT(entry); 2049 } 2050 2051 /* 2052 * Simplification does not block so there is no restart case. 2053 */ 2054 entry = start_entry; 2055 while (entry && entry->ba.start < end) { 2056 vm_map_simplify_entry(map, entry, countp); 2057 entry = vm_map_rb_tree_RB_NEXT(entry); 2058 } 2059 } 2060 2061 /* 2062 * Mark the given range as handled by a subordinate map. 2063 * 2064 * This range must have been created with vm_map_find(), and no other 2065 * operations may have been performed on this range prior to calling 2066 * vm_map_submap(). 2067 * 2068 * Submappings cannot be removed. 2069 * 2070 * No requirements. 2071 */ 2072 int 2073 vm_map_submap(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_map_t submap) 2074 { 2075 vm_map_entry_t entry; 2076 int result = KERN_INVALID_ARGUMENT; 2077 int count; 2078 2079 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 2080 vm_map_lock(map); 2081 2082 VM_MAP_RANGE_CHECK(map, start, end); 2083 2084 if (vm_map_lookup_entry(map, start, &entry)) { 2085 vm_map_clip_start(map, entry, start, &count); 2086 } else if (entry) { 2087 entry = vm_map_rb_tree_RB_NEXT(entry); 2088 } else { 2089 entry = RB_MIN(vm_map_rb_tree, &map->rb_root); 2090 } 2091 2092 vm_map_clip_end(map, entry, end, &count); 2093 2094 if ((entry->ba.start == start) && (entry->ba.end == end) && 2095 ((entry->eflags & MAP_ENTRY_COW) == 0) && 2096 (entry->ba.object == NULL)) { 2097 entry->ba.sub_map = submap; 2098 entry->maptype = VM_MAPTYPE_SUBMAP; 2099 result = KERN_SUCCESS; 2100 } 2101 vm_map_unlock(map); 2102 vm_map_entry_release(count); 2103 2104 return (result); 2105 } 2106 2107 /* 2108 * Sets the protection of the specified address region in the target map. 2109 * If "set_max" is specified, the maximum protection is to be set; 2110 * otherwise, only the current protection is affected. 2111 * 2112 * The protection is not applicable to submaps, but is applicable to normal 2113 * maps and maps governed by virtual page tables. For example, when operating 2114 * on a virtual page table our protection basically controls how COW occurs 2115 * on the backing object, whereas the virtual page table abstraction itself 2116 * is an abstraction for userland. 2117 * 2118 * No requirements. 2119 */ 2120 int 2121 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, 2122 vm_prot_t new_prot, boolean_t set_max) 2123 { 2124 vm_map_entry_t current; 2125 vm_map_entry_t entry; 2126 int count; 2127 2128 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 2129 vm_map_lock(map); 2130 2131 VM_MAP_RANGE_CHECK(map, start, end); 2132 2133 if (vm_map_lookup_entry(map, start, &entry)) { 2134 vm_map_clip_start(map, entry, start, &count); 2135 } else if (entry) { 2136 entry = vm_map_rb_tree_RB_NEXT(entry); 2137 } else { 2138 entry = RB_MIN(vm_map_rb_tree, &map->rb_root); 2139 } 2140 2141 /* 2142 * Make a first pass to check for protection violations. 2143 */ 2144 current = entry; 2145 while (current && current->ba.start < end) { 2146 if (current->maptype == VM_MAPTYPE_SUBMAP) { 2147 vm_map_unlock(map); 2148 vm_map_entry_release(count); 2149 return (KERN_INVALID_ARGUMENT); 2150 } 2151 if ((new_prot & current->max_protection) != new_prot) { 2152 vm_map_unlock(map); 2153 vm_map_entry_release(count); 2154 return (KERN_PROTECTION_FAILURE); 2155 } 2156 2157 /* 2158 * When making a SHARED+RW file mmap writable, update 2159 * v_lastwrite_ts. 2160 */ 2161 if (new_prot & PROT_WRITE && 2162 (current->eflags & MAP_ENTRY_NEEDS_COPY) == 0 && 2163 (current->maptype == VM_MAPTYPE_NORMAL || 2164 current->maptype == VM_MAPTYPE_VPAGETABLE) && 2165 current->ba.object && 2166 current->ba.object->type == OBJT_VNODE) { 2167 struct vnode *vp; 2168 2169 vp = current->ba.object->handle; 2170 if (vp && vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT) == 0) { 2171 vfs_timestamp(&vp->v_lastwrite_ts); 2172 vsetflags(vp, VLASTWRITETS); 2173 vn_unlock(vp); 2174 } 2175 } 2176 current = vm_map_rb_tree_RB_NEXT(current); 2177 } 2178 2179 /* 2180 * Go back and fix up protections. [Note that clipping is not 2181 * necessary the second time.] 2182 */ 2183 current = entry; 2184 2185 while (current && current->ba.start < end) { 2186 vm_prot_t old_prot; 2187 2188 vm_map_clip_end(map, current, end, &count); 2189 2190 old_prot = current->protection; 2191 if (set_max) { 2192 current->max_protection = new_prot; 2193 current->protection = new_prot & old_prot; 2194 } else { 2195 current->protection = new_prot; 2196 } 2197 2198 /* 2199 * Update physical map if necessary. Worry about copy-on-write 2200 * here -- CHECK THIS XXX 2201 */ 2202 if (current->protection != old_prot) { 2203 #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ 2204 VM_PROT_ALL) 2205 2206 pmap_protect(map->pmap, current->ba.start, 2207 current->ba.end, 2208 current->protection & MASK(current)); 2209 #undef MASK 2210 } 2211 2212 vm_map_simplify_entry(map, current, &count); 2213 2214 current = vm_map_rb_tree_RB_NEXT(current); 2215 } 2216 vm_map_unlock(map); 2217 vm_map_entry_release(count); 2218 return (KERN_SUCCESS); 2219 } 2220 2221 /* 2222 * This routine traverses a processes map handling the madvise 2223 * system call. Advisories are classified as either those effecting 2224 * the vm_map_entry structure, or those effecting the underlying 2225 * objects. 2226 * 2227 * The <value> argument is used for extended madvise calls. 2228 * 2229 * No requirements. 2230 */ 2231 int 2232 vm_map_madvise(vm_map_t map, vm_offset_t start, vm_offset_t end, 2233 int behav, off_t value) 2234 { 2235 vm_map_entry_t current, entry; 2236 int modify_map = 0; 2237 int error = 0; 2238 int count; 2239 2240 /* 2241 * Some madvise calls directly modify the vm_map_entry, in which case 2242 * we need to use an exclusive lock on the map and we need to perform 2243 * various clipping operations. Otherwise we only need a read-lock 2244 * on the map. 2245 */ 2246 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 2247 2248 switch(behav) { 2249 case MADV_NORMAL: 2250 case MADV_SEQUENTIAL: 2251 case MADV_RANDOM: 2252 case MADV_NOSYNC: 2253 case MADV_AUTOSYNC: 2254 case MADV_NOCORE: 2255 case MADV_CORE: 2256 case MADV_SETMAP: 2257 modify_map = 1; 2258 vm_map_lock(map); 2259 break; 2260 case MADV_INVAL: 2261 case MADV_WILLNEED: 2262 case MADV_DONTNEED: 2263 case MADV_FREE: 2264 vm_map_lock_read(map); 2265 break; 2266 default: 2267 vm_map_entry_release(count); 2268 return (EINVAL); 2269 } 2270 2271 /* 2272 * Locate starting entry and clip if necessary. 2273 */ 2274 2275 VM_MAP_RANGE_CHECK(map, start, end); 2276 2277 if (vm_map_lookup_entry(map, start, &entry)) { 2278 if (modify_map) 2279 vm_map_clip_start(map, entry, start, &count); 2280 } else if (entry) { 2281 entry = vm_map_rb_tree_RB_NEXT(entry); 2282 } else { 2283 entry = RB_MIN(vm_map_rb_tree, &map->rb_root); 2284 } 2285 2286 if (modify_map) { 2287 /* 2288 * madvise behaviors that are implemented in the vm_map_entry. 2289 * 2290 * We clip the vm_map_entry so that behavioral changes are 2291 * limited to the specified address range. 2292 */ 2293 for (current = entry; 2294 current && current->ba.start < end; 2295 current = vm_map_rb_tree_RB_NEXT(current)) { 2296 /* 2297 * Ignore submaps 2298 */ 2299 if (current->maptype == VM_MAPTYPE_SUBMAP) 2300 continue; 2301 2302 vm_map_clip_end(map, current, end, &count); 2303 2304 switch (behav) { 2305 case MADV_NORMAL: 2306 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL); 2307 break; 2308 case MADV_SEQUENTIAL: 2309 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL); 2310 break; 2311 case MADV_RANDOM: 2312 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM); 2313 break; 2314 case MADV_NOSYNC: 2315 current->eflags |= MAP_ENTRY_NOSYNC; 2316 break; 2317 case MADV_AUTOSYNC: 2318 current->eflags &= ~MAP_ENTRY_NOSYNC; 2319 break; 2320 case MADV_NOCORE: 2321 current->eflags |= MAP_ENTRY_NOCOREDUMP; 2322 break; 2323 case MADV_CORE: 2324 current->eflags &= ~MAP_ENTRY_NOCOREDUMP; 2325 break; 2326 case MADV_SETMAP: 2327 /* 2328 * Set the page directory page for a map 2329 * governed by a virtual page table. Mark 2330 * the entry as being governed by a virtual 2331 * page table if it is not. 2332 * 2333 * XXX the page directory page is stored 2334 * in the avail_ssize field if the map_entry. 2335 * 2336 * XXX the map simplification code does not 2337 * compare this field so weird things may 2338 * happen if you do not apply this function 2339 * to the entire mapping governed by the 2340 * virtual page table. 2341 */ 2342 if (current->maptype != VM_MAPTYPE_VPAGETABLE) { 2343 error = EINVAL; 2344 break; 2345 } 2346 current->aux.master_pde = value; 2347 pmap_remove(map->pmap, 2348 current->ba.start, current->ba.end); 2349 break; 2350 case MADV_INVAL: 2351 /* 2352 * Invalidate the related pmap entries, used 2353 * to flush portions of the real kernel's 2354 * pmap when the caller has removed or 2355 * modified existing mappings in a virtual 2356 * page table. 2357 * 2358 * (exclusive locked map version does not 2359 * need the range interlock). 2360 */ 2361 pmap_remove(map->pmap, 2362 current->ba.start, current->ba.end); 2363 break; 2364 default: 2365 error = EINVAL; 2366 break; 2367 } 2368 vm_map_simplify_entry(map, current, &count); 2369 } 2370 vm_map_unlock(map); 2371 } else { 2372 vm_pindex_t pindex; 2373 vm_pindex_t delta; 2374 2375 /* 2376 * madvise behaviors that are implemented in the underlying 2377 * vm_object. 2378 * 2379 * Since we don't clip the vm_map_entry, we have to clip 2380 * the vm_object pindex and count. 2381 * 2382 * NOTE! These functions are only supported on normal maps, 2383 * except MADV_INVAL which is also supported on 2384 * virtual page tables. 2385 * 2386 * NOTE! These functions only apply to the top-most object. 2387 * It is not applicable to backing objects. 2388 */ 2389 for (current = entry; 2390 current && current->ba.start < end; 2391 current = vm_map_rb_tree_RB_NEXT(current)) { 2392 vm_offset_t useStart; 2393 2394 if (current->maptype != VM_MAPTYPE_NORMAL && 2395 (current->maptype != VM_MAPTYPE_VPAGETABLE || 2396 behav != MADV_INVAL)) { 2397 continue; 2398 } 2399 2400 pindex = OFF_TO_IDX(current->ba.offset); 2401 delta = atop(current->ba.end - current->ba.start); 2402 useStart = current->ba.start; 2403 2404 if (current->ba.start < start) { 2405 pindex += atop(start - current->ba.start); 2406 delta -= atop(start - current->ba.start); 2407 useStart = start; 2408 } 2409 if (current->ba.end > end) 2410 delta -= atop(current->ba.end - end); 2411 2412 if ((vm_spindex_t)delta <= 0) 2413 continue; 2414 2415 if (behav == MADV_INVAL) { 2416 /* 2417 * Invalidate the related pmap entries, used 2418 * to flush portions of the real kernel's 2419 * pmap when the caller has removed or 2420 * modified existing mappings in a virtual 2421 * page table. 2422 * 2423 * (shared locked map version needs the 2424 * interlock, see vm_fault()). 2425 */ 2426 struct vm_map_ilock ilock; 2427 2428 KASSERT(useStart >= VM_MIN_USER_ADDRESS && 2429 useStart + ptoa(delta) <= 2430 VM_MAX_USER_ADDRESS, 2431 ("Bad range %016jx-%016jx (%016jx)", 2432 useStart, useStart + ptoa(delta), 2433 delta)); 2434 vm_map_interlock(map, &ilock, 2435 useStart, 2436 useStart + ptoa(delta)); 2437 pmap_remove(map->pmap, 2438 useStart, 2439 useStart + ptoa(delta)); 2440 vm_map_deinterlock(map, &ilock); 2441 } else { 2442 vm_object_madvise(current->ba.object, 2443 pindex, delta, behav); 2444 } 2445 2446 /* 2447 * Try to populate the page table. Mappings governed 2448 * by virtual page tables cannot be pre-populated 2449 * without a lot of work so don't try. 2450 */ 2451 if (behav == MADV_WILLNEED && 2452 current->maptype != VM_MAPTYPE_VPAGETABLE) { 2453 pmap_object_init_pt( 2454 map->pmap, current, 2455 useStart, 2456 (delta << PAGE_SHIFT), 2457 MAP_PREFAULT_MADVISE 2458 ); 2459 } 2460 } 2461 vm_map_unlock_read(map); 2462 } 2463 vm_map_entry_release(count); 2464 return(error); 2465 } 2466 2467 2468 /* 2469 * Sets the inheritance of the specified address range in the target map. 2470 * Inheritance affects how the map will be shared with child maps at the 2471 * time of vm_map_fork. 2472 */ 2473 int 2474 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, 2475 vm_inherit_t new_inheritance) 2476 { 2477 vm_map_entry_t entry; 2478 vm_map_entry_t temp_entry; 2479 int count; 2480 2481 switch (new_inheritance) { 2482 case VM_INHERIT_NONE: 2483 case VM_INHERIT_COPY: 2484 case VM_INHERIT_SHARE: 2485 break; 2486 default: 2487 return (KERN_INVALID_ARGUMENT); 2488 } 2489 2490 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 2491 vm_map_lock(map); 2492 2493 VM_MAP_RANGE_CHECK(map, start, end); 2494 2495 if (vm_map_lookup_entry(map, start, &temp_entry)) { 2496 entry = temp_entry; 2497 vm_map_clip_start(map, entry, start, &count); 2498 } else if (temp_entry) { 2499 entry = vm_map_rb_tree_RB_NEXT(temp_entry); 2500 } else { 2501 entry = RB_MIN(vm_map_rb_tree, &map->rb_root); 2502 } 2503 2504 while (entry && entry->ba.start < end) { 2505 vm_map_clip_end(map, entry, end, &count); 2506 2507 entry->inheritance = new_inheritance; 2508 2509 vm_map_simplify_entry(map, entry, &count); 2510 2511 entry = vm_map_rb_tree_RB_NEXT(entry); 2512 } 2513 vm_map_unlock(map); 2514 vm_map_entry_release(count); 2515 return (KERN_SUCCESS); 2516 } 2517 2518 /* 2519 * Implement the semantics of mlock 2520 */ 2521 int 2522 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t real_end, 2523 boolean_t new_pageable) 2524 { 2525 vm_map_entry_t entry; 2526 vm_map_entry_t start_entry; 2527 vm_offset_t end; 2528 int rv = KERN_SUCCESS; 2529 int count; 2530 2531 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 2532 vm_map_lock(map); 2533 VM_MAP_RANGE_CHECK(map, start, real_end); 2534 end = real_end; 2535 2536 start_entry = vm_map_clip_range(map, start, end, &count, 2537 MAP_CLIP_NO_HOLES); 2538 if (start_entry == NULL) { 2539 vm_map_unlock(map); 2540 vm_map_entry_release(count); 2541 return (KERN_INVALID_ADDRESS); 2542 } 2543 2544 if (new_pageable == 0) { 2545 entry = start_entry; 2546 while (entry && entry->ba.start < end) { 2547 vm_offset_t save_start; 2548 vm_offset_t save_end; 2549 2550 /* 2551 * Already user wired or hard wired (trivial cases) 2552 */ 2553 if (entry->eflags & MAP_ENTRY_USER_WIRED) { 2554 entry = vm_map_rb_tree_RB_NEXT(entry); 2555 continue; 2556 } 2557 if (entry->wired_count != 0) { 2558 entry->wired_count++; 2559 entry->eflags |= MAP_ENTRY_USER_WIRED; 2560 entry = vm_map_rb_tree_RB_NEXT(entry); 2561 continue; 2562 } 2563 2564 /* 2565 * A new wiring requires instantiation of appropriate 2566 * management structures and the faulting in of the 2567 * page. 2568 */ 2569 if (entry->maptype == VM_MAPTYPE_NORMAL || 2570 entry->maptype == VM_MAPTYPE_VPAGETABLE) { 2571 int copyflag = entry->eflags & 2572 MAP_ENTRY_NEEDS_COPY; 2573 if (copyflag && ((entry->protection & 2574 VM_PROT_WRITE) != 0)) { 2575 vm_map_entry_shadow(entry); 2576 } else if (entry->ba.object == NULL && 2577 !map->system_map) { 2578 vm_map_entry_allocate_object(entry); 2579 } 2580 } 2581 entry->wired_count++; 2582 entry->eflags |= MAP_ENTRY_USER_WIRED; 2583 2584 /* 2585 * Now fault in the area. Note that vm_fault_wire() 2586 * may release the map lock temporarily, it will be 2587 * relocked on return. The in-transition 2588 * flag protects the entries. 2589 */ 2590 save_start = entry->ba.start; 2591 save_end = entry->ba.end; 2592 rv = vm_fault_wire(map, entry, TRUE, 0); 2593 if (rv) { 2594 CLIP_CHECK_BACK(entry, save_start); 2595 for (;;) { 2596 KASSERT(entry->wired_count == 1, ("bad wired_count on entry")); 2597 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2598 entry->wired_count = 0; 2599 if (entry->ba.end == save_end) 2600 break; 2601 entry = vm_map_rb_tree_RB_NEXT(entry); 2602 KASSERT(entry, 2603 ("bad entry clip during backout")); 2604 } 2605 end = save_start; /* unwire the rest */ 2606 break; 2607 } 2608 /* 2609 * note that even though the entry might have been 2610 * clipped, the USER_WIRED flag we set prevents 2611 * duplication so we do not have to do a 2612 * clip check. 2613 */ 2614 entry = vm_map_rb_tree_RB_NEXT(entry); 2615 } 2616 2617 /* 2618 * If we failed fall through to the unwiring section to 2619 * unwire what we had wired so far. 'end' has already 2620 * been adjusted. 2621 */ 2622 if (rv) 2623 new_pageable = 1; 2624 2625 /* 2626 * start_entry might have been clipped if we unlocked the 2627 * map and blocked. No matter how clipped it has gotten 2628 * there should be a fragment that is on our start boundary. 2629 */ 2630 CLIP_CHECK_BACK(start_entry, start); 2631 } 2632 2633 /* 2634 * Deal with the unwiring case. 2635 */ 2636 if (new_pageable) { 2637 /* 2638 * This is the unwiring case. We must first ensure that the 2639 * range to be unwired is really wired down. We know there 2640 * are no holes. 2641 */ 2642 entry = start_entry; 2643 while (entry && entry->ba.start < end) { 2644 if ((entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { 2645 rv = KERN_INVALID_ARGUMENT; 2646 goto done; 2647 } 2648 KASSERT(entry->wired_count != 0, 2649 ("wired count was 0 with USER_WIRED set! %p", 2650 entry)); 2651 entry = vm_map_rb_tree_RB_NEXT(entry); 2652 } 2653 2654 /* 2655 * Now decrement the wiring count for each region. If a region 2656 * becomes completely unwired, unwire its physical pages and 2657 * mappings. 2658 */ 2659 /* 2660 * The map entries are processed in a loop, checking to 2661 * make sure the entry is wired and asserting it has a wired 2662 * count. However, another loop was inserted more-or-less in 2663 * the middle of the unwiring path. This loop picks up the 2664 * "entry" loop variable from the first loop without first 2665 * setting it to start_entry. Naturally, the secound loop 2666 * is never entered and the pages backing the entries are 2667 * never unwired. This can lead to a leak of wired pages. 2668 */ 2669 entry = start_entry; 2670 while (entry && entry->ba.start < end) { 2671 KASSERT(entry->eflags & MAP_ENTRY_USER_WIRED, 2672 ("expected USER_WIRED on entry %p", entry)); 2673 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2674 entry->wired_count--; 2675 if (entry->wired_count == 0) 2676 vm_fault_unwire(map, entry); 2677 entry = vm_map_rb_tree_RB_NEXT(entry); 2678 } 2679 } 2680 done: 2681 vm_map_unclip_range(map, start_entry, start, real_end, &count, 2682 MAP_CLIP_NO_HOLES); 2683 vm_map_unlock(map); 2684 vm_map_entry_release(count); 2685 2686 return (rv); 2687 } 2688 2689 /* 2690 * Sets the pageability of the specified address range in the target map. 2691 * Regions specified as not pageable require locked-down physical 2692 * memory and physical page maps. 2693 * 2694 * The map must not be locked, but a reference must remain to the map 2695 * throughout the call. 2696 * 2697 * This function may be called via the zalloc path and must properly 2698 * reserve map entries for kernel_map. 2699 * 2700 * No requirements. 2701 */ 2702 int 2703 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t real_end, int kmflags) 2704 { 2705 vm_map_entry_t entry; 2706 vm_map_entry_t start_entry; 2707 vm_offset_t end; 2708 int rv = KERN_SUCCESS; 2709 int count; 2710 2711 if (kmflags & KM_KRESERVE) 2712 count = vm_map_entry_kreserve(MAP_RESERVE_COUNT); 2713 else 2714 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 2715 vm_map_lock(map); 2716 VM_MAP_RANGE_CHECK(map, start, real_end); 2717 end = real_end; 2718 2719 start_entry = vm_map_clip_range(map, start, end, &count, 2720 MAP_CLIP_NO_HOLES); 2721 if (start_entry == NULL) { 2722 vm_map_unlock(map); 2723 rv = KERN_INVALID_ADDRESS; 2724 goto failure; 2725 } 2726 if ((kmflags & KM_PAGEABLE) == 0) { 2727 /* 2728 * Wiring. 2729 * 2730 * 1. Holding the write lock, we create any shadow or zero-fill 2731 * objects that need to be created. Then we clip each map 2732 * entry to the region to be wired and increment its wiring 2733 * count. We create objects before clipping the map entries 2734 * to avoid object proliferation. 2735 * 2736 * 2. We downgrade to a read lock, and call vm_fault_wire to 2737 * fault in the pages for any newly wired area (wired_count is 2738 * 1). 2739 * 2740 * Downgrading to a read lock for vm_fault_wire avoids a 2741 * possible deadlock with another process that may have faulted 2742 * on one of the pages to be wired (it would mark the page busy, 2743 * blocking us, then in turn block on the map lock that we 2744 * hold). Because of problems in the recursive lock package, 2745 * we cannot upgrade to a write lock in vm_map_lookup. Thus, 2746 * any actions that require the write lock must be done 2747 * beforehand. Because we keep the read lock on the map, the 2748 * copy-on-write status of the entries we modify here cannot 2749 * change. 2750 */ 2751 entry = start_entry; 2752 while (entry && entry->ba.start < end) { 2753 /* 2754 * Trivial case if the entry is already wired 2755 */ 2756 if (entry->wired_count) { 2757 entry->wired_count++; 2758 entry = vm_map_rb_tree_RB_NEXT(entry); 2759 continue; 2760 } 2761 2762 /* 2763 * The entry is being newly wired, we have to setup 2764 * appropriate management structures. A shadow 2765 * object is required for a copy-on-write region, 2766 * or a normal object for a zero-fill region. We 2767 * do not have to do this for entries that point to sub 2768 * maps because we won't hold the lock on the sub map. 2769 */ 2770 if (entry->maptype == VM_MAPTYPE_NORMAL || 2771 entry->maptype == VM_MAPTYPE_VPAGETABLE) { 2772 int copyflag = entry->eflags & 2773 MAP_ENTRY_NEEDS_COPY; 2774 if (copyflag && ((entry->protection & 2775 VM_PROT_WRITE) != 0)) { 2776 vm_map_entry_shadow(entry); 2777 } else if (entry->ba.object == NULL && 2778 !map->system_map) { 2779 vm_map_entry_allocate_object(entry); 2780 } 2781 } 2782 entry->wired_count++; 2783 entry = vm_map_rb_tree_RB_NEXT(entry); 2784 } 2785 2786 /* 2787 * Pass 2. 2788 */ 2789 2790 /* 2791 * HACK HACK HACK HACK 2792 * 2793 * vm_fault_wire() temporarily unlocks the map to avoid 2794 * deadlocks. The in-transition flag from vm_map_clip_range 2795 * call should protect us from changes while the map is 2796 * unlocked. T 2797 * 2798 * NOTE: Previously this comment stated that clipping might 2799 * still occur while the entry is unlocked, but from 2800 * what I can tell it actually cannot. 2801 * 2802 * It is unclear whether the CLIP_CHECK_*() calls 2803 * are still needed but we keep them in anyway. 2804 * 2805 * HACK HACK HACK HACK 2806 */ 2807 2808 entry = start_entry; 2809 while (entry && entry->ba.start < end) { 2810 /* 2811 * If vm_fault_wire fails for any page we need to undo 2812 * what has been done. We decrement the wiring count 2813 * for those pages which have not yet been wired (now) 2814 * and unwire those that have (later). 2815 */ 2816 vm_offset_t save_start = entry->ba.start; 2817 vm_offset_t save_end = entry->ba.end; 2818 2819 if (entry->wired_count == 1) 2820 rv = vm_fault_wire(map, entry, FALSE, kmflags); 2821 if (rv) { 2822 CLIP_CHECK_BACK(entry, save_start); 2823 for (;;) { 2824 KASSERT(entry->wired_count == 1, 2825 ("wired_count changed unexpectedly")); 2826 entry->wired_count = 0; 2827 if (entry->ba.end == save_end) 2828 break; 2829 entry = vm_map_rb_tree_RB_NEXT(entry); 2830 KASSERT(entry, 2831 ("bad entry clip during backout")); 2832 } 2833 end = save_start; 2834 break; 2835 } 2836 CLIP_CHECK_FWD(entry, save_end); 2837 entry = vm_map_rb_tree_RB_NEXT(entry); 2838 } 2839 2840 /* 2841 * If a failure occured undo everything by falling through 2842 * to the unwiring code. 'end' has already been adjusted 2843 * appropriately. 2844 */ 2845 if (rv) 2846 kmflags |= KM_PAGEABLE; 2847 2848 /* 2849 * start_entry is still IN_TRANSITION but may have been 2850 * clipped since vm_fault_wire() unlocks and relocks the 2851 * map. No matter how clipped it has gotten there should 2852 * be a fragment that is on our start boundary. 2853 */ 2854 CLIP_CHECK_BACK(start_entry, start); 2855 } 2856 2857 if (kmflags & KM_PAGEABLE) { 2858 /* 2859 * This is the unwiring case. We must first ensure that the 2860 * range to be unwired is really wired down. We know there 2861 * are no holes. 2862 */ 2863 entry = start_entry; 2864 while (entry && entry->ba.start < end) { 2865 if (entry->wired_count == 0) { 2866 rv = KERN_INVALID_ARGUMENT; 2867 goto done; 2868 } 2869 entry = vm_map_rb_tree_RB_NEXT(entry); 2870 } 2871 2872 /* 2873 * Now decrement the wiring count for each region. If a region 2874 * becomes completely unwired, unwire its physical pages and 2875 * mappings. 2876 */ 2877 entry = start_entry; 2878 while (entry && entry->ba.start < end) { 2879 entry->wired_count--; 2880 if (entry->wired_count == 0) 2881 vm_fault_unwire(map, entry); 2882 entry = vm_map_rb_tree_RB_NEXT(entry); 2883 } 2884 } 2885 done: 2886 vm_map_unclip_range(map, start_entry, start, real_end, 2887 &count, MAP_CLIP_NO_HOLES); 2888 vm_map_unlock(map); 2889 failure: 2890 if (kmflags & KM_KRESERVE) 2891 vm_map_entry_krelease(count); 2892 else 2893 vm_map_entry_release(count); 2894 return (rv); 2895 } 2896 2897 /* 2898 * Mark a newly allocated address range as wired but do not fault in 2899 * the pages. The caller is expected to load the pages into the object. 2900 * 2901 * The map must be locked on entry and will remain locked on return. 2902 * No other requirements. 2903 */ 2904 void 2905 vm_map_set_wired_quick(vm_map_t map, vm_offset_t addr, vm_size_t size, 2906 int *countp) 2907 { 2908 vm_map_entry_t scan; 2909 vm_map_entry_t entry; 2910 2911 entry = vm_map_clip_range(map, addr, addr + size, 2912 countp, MAP_CLIP_NO_HOLES); 2913 scan = entry; 2914 while (scan && scan->ba.start < addr + size) { 2915 KKASSERT(scan->wired_count == 0); 2916 scan->wired_count = 1; 2917 scan = vm_map_rb_tree_RB_NEXT(scan); 2918 } 2919 vm_map_unclip_range(map, entry, addr, addr + size, 2920 countp, MAP_CLIP_NO_HOLES); 2921 } 2922 2923 /* 2924 * Push any dirty cached pages in the address range to their pager. 2925 * If syncio is TRUE, dirty pages are written synchronously. 2926 * If invalidate is TRUE, any cached pages are freed as well. 2927 * 2928 * This routine is called by sys_msync() 2929 * 2930 * Returns an error if any part of the specified range is not mapped. 2931 * 2932 * No requirements. 2933 */ 2934 int 2935 vm_map_clean(vm_map_t map, vm_offset_t start, vm_offset_t end, 2936 boolean_t syncio, boolean_t invalidate) 2937 { 2938 vm_map_entry_t current; 2939 vm_map_entry_t next; 2940 vm_map_entry_t entry; 2941 vm_map_backing_t ba; 2942 vm_size_t size; 2943 vm_object_t object; 2944 vm_ooffset_t offset; 2945 2946 vm_map_lock_read(map); 2947 VM_MAP_RANGE_CHECK(map, start, end); 2948 if (!vm_map_lookup_entry(map, start, &entry)) { 2949 vm_map_unlock_read(map); 2950 return (KERN_INVALID_ADDRESS); 2951 } 2952 lwkt_gettoken(&map->token); 2953 2954 /* 2955 * Make a first pass to check for holes. 2956 */ 2957 current = entry; 2958 while (current && current->ba.start < end) { 2959 if (current->maptype == VM_MAPTYPE_SUBMAP) { 2960 lwkt_reltoken(&map->token); 2961 vm_map_unlock_read(map); 2962 return (KERN_INVALID_ARGUMENT); 2963 } 2964 next = vm_map_rb_tree_RB_NEXT(current); 2965 if (end > current->ba.end && 2966 (next == NULL || 2967 current->ba.end != next->ba.start)) { 2968 lwkt_reltoken(&map->token); 2969 vm_map_unlock_read(map); 2970 return (KERN_INVALID_ADDRESS); 2971 } 2972 current = next; 2973 } 2974 2975 if (invalidate) 2976 pmap_remove(vm_map_pmap(map), start, end); 2977 2978 /* 2979 * Make a second pass, cleaning/uncaching pages from the indicated 2980 * objects as we go. 2981 */ 2982 current = entry; 2983 while (current && current->ba.start < end) { 2984 offset = current->ba.offset + (start - current->ba.start); 2985 size = (end <= current->ba.end ? end : current->ba.end) - start; 2986 2987 switch(current->maptype) { 2988 case VM_MAPTYPE_SUBMAP: 2989 { 2990 vm_map_t smap; 2991 vm_map_entry_t tentry; 2992 vm_size_t tsize; 2993 2994 smap = current->ba.sub_map; 2995 vm_map_lock_read(smap); 2996 vm_map_lookup_entry(smap, offset, &tentry); 2997 if (tentry == NULL) { 2998 tsize = vm_map_max(smap) - offset; 2999 ba = NULL; 3000 offset = 0 + (offset - vm_map_min(smap)); 3001 } else { 3002 tsize = tentry->ba.end - offset; 3003 ba = &tentry->ba; 3004 offset = tentry->ba.offset + 3005 (offset - tentry->ba.start); 3006 } 3007 vm_map_unlock_read(smap); 3008 if (tsize < size) 3009 size = tsize; 3010 break; 3011 } 3012 case VM_MAPTYPE_NORMAL: 3013 case VM_MAPTYPE_VPAGETABLE: 3014 ba = ¤t->ba; 3015 break; 3016 default: 3017 ba = NULL; 3018 break; 3019 } 3020 if (ba) { 3021 object = ba->object; 3022 if (object) 3023 vm_object_hold(object); 3024 } else { 3025 object = NULL; 3026 } 3027 3028 /* 3029 * Note that there is absolutely no sense in writing out 3030 * anonymous objects, so we track down the vnode object 3031 * to write out. 3032 * We invalidate (remove) all pages from the address space 3033 * anyway, for semantic correctness. 3034 * 3035 * note: certain anonymous maps, such as MAP_NOSYNC maps, 3036 * may start out with a NULL object. 3037 * 3038 * XXX do we really want to stop at the first backing store 3039 * here if there are more? XXX 3040 */ 3041 if (ba) { 3042 vm_object_t tobj; 3043 3044 tobj = object; 3045 while (ba->backing_ba != NULL) { 3046 offset -= ba->offset; 3047 ba = ba->backing_ba; 3048 offset += ba->offset; 3049 tobj = ba->object; 3050 if (tobj->size < OFF_TO_IDX(offset + size)) 3051 size = IDX_TO_OFF(tobj->size) - offset; 3052 break; /* XXX this break is not correct */ 3053 } 3054 if (object != tobj) { 3055 if (object) 3056 vm_object_drop(object); 3057 object = tobj; 3058 vm_object_hold(object); 3059 } 3060 } 3061 3062 if (object && (object->type == OBJT_VNODE) && 3063 (current->protection & VM_PROT_WRITE) && 3064 (object->flags & OBJ_NOMSYNC) == 0) { 3065 /* 3066 * Flush pages if writing is allowed, invalidate them 3067 * if invalidation requested. Pages undergoing I/O 3068 * will be ignored by vm_object_page_remove(). 3069 * 3070 * We cannot lock the vnode and then wait for paging 3071 * to complete without deadlocking against vm_fault. 3072 * Instead we simply call vm_object_page_remove() and 3073 * allow it to block internally on a page-by-page 3074 * basis when it encounters pages undergoing async 3075 * I/O. 3076 */ 3077 int flags; 3078 3079 /* no chain wait needed for vnode objects */ 3080 vm_object_reference_locked(object); 3081 vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY); 3082 flags = (syncio || invalidate) ? OBJPC_SYNC : 0; 3083 flags |= invalidate ? OBJPC_INVAL : 0; 3084 3085 /* 3086 * When operating on a virtual page table just 3087 * flush the whole object. XXX we probably ought 3088 * to 3089 */ 3090 switch(current->maptype) { 3091 case VM_MAPTYPE_NORMAL: 3092 vm_object_page_clean(object, 3093 OFF_TO_IDX(offset), 3094 OFF_TO_IDX(offset + size + PAGE_MASK), 3095 flags); 3096 break; 3097 case VM_MAPTYPE_VPAGETABLE: 3098 vm_object_page_clean(object, 0, 0, flags); 3099 break; 3100 } 3101 vn_unlock(((struct vnode *)object->handle)); 3102 vm_object_deallocate_locked(object); 3103 } 3104 if (object && invalidate && 3105 ((object->type == OBJT_VNODE) || 3106 (object->type == OBJT_DEVICE) || 3107 (object->type == OBJT_MGTDEVICE))) { 3108 int clean_only = 3109 ((object->type == OBJT_DEVICE) || 3110 (object->type == OBJT_MGTDEVICE)) ? FALSE : TRUE; 3111 /* no chain wait needed for vnode/device objects */ 3112 vm_object_reference_locked(object); 3113 switch(current->maptype) { 3114 case VM_MAPTYPE_NORMAL: 3115 vm_object_page_remove(object, 3116 OFF_TO_IDX(offset), 3117 OFF_TO_IDX(offset + size + PAGE_MASK), 3118 clean_only); 3119 break; 3120 case VM_MAPTYPE_VPAGETABLE: 3121 vm_object_page_remove(object, 0, 0, clean_only); 3122 break; 3123 } 3124 vm_object_deallocate_locked(object); 3125 } 3126 start += size; 3127 if (object) 3128 vm_object_drop(object); 3129 current = vm_map_rb_tree_RB_NEXT(current); 3130 } 3131 3132 lwkt_reltoken(&map->token); 3133 vm_map_unlock_read(map); 3134 3135 return (KERN_SUCCESS); 3136 } 3137 3138 /* 3139 * Make the region specified by this entry pageable. 3140 * 3141 * The vm_map must be exclusively locked. 3142 */ 3143 static void 3144 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry) 3145 { 3146 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 3147 entry->wired_count = 0; 3148 vm_fault_unwire(map, entry); 3149 } 3150 3151 /* 3152 * Deallocate the given entry from the target map. 3153 * 3154 * The vm_map must be exclusively locked. 3155 */ 3156 static void 3157 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry, int *countp) 3158 { 3159 vm_map_entry_unlink(map, entry); 3160 map->size -= entry->ba.end - entry->ba.start; 3161 vm_map_entry_dispose(map, entry, countp); 3162 } 3163 3164 /* 3165 * Deallocates the given address range from the target map. 3166 * 3167 * The vm_map must be exclusively locked. 3168 */ 3169 int 3170 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end, int *countp) 3171 { 3172 vm_object_t object; 3173 vm_map_entry_t entry; 3174 vm_map_entry_t first_entry; 3175 vm_offset_t hole_start; 3176 3177 ASSERT_VM_MAP_LOCKED(map); 3178 lwkt_gettoken(&map->token); 3179 again: 3180 /* 3181 * Find the start of the region, and clip it. Set entry to point 3182 * at the first record containing the requested address or, if no 3183 * such record exists, the next record with a greater address. The 3184 * loop will run from this point until a record beyond the termination 3185 * address is encountered. 3186 * 3187 * Adjust freehint[] for either the clip case or the extension case. 3188 * 3189 * GGG see other GGG comment. 3190 */ 3191 if (vm_map_lookup_entry(map, start, &first_entry)) { 3192 entry = first_entry; 3193 vm_map_clip_start(map, entry, start, countp); 3194 hole_start = start; 3195 } else { 3196 if (first_entry) { 3197 entry = vm_map_rb_tree_RB_NEXT(first_entry); 3198 if (entry == NULL) 3199 hole_start = first_entry->ba.start; 3200 else 3201 hole_start = first_entry->ba.end; 3202 } else { 3203 entry = RB_MIN(vm_map_rb_tree, &map->rb_root); 3204 if (entry == NULL) 3205 hole_start = vm_map_min(map); 3206 else 3207 hole_start = vm_map_max(map); 3208 } 3209 } 3210 3211 /* 3212 * Step through all entries in this region 3213 */ 3214 while (entry && entry->ba.start < end) { 3215 vm_map_entry_t next; 3216 vm_offset_t s, e; 3217 vm_pindex_t offidxstart, offidxend, count; 3218 3219 /* 3220 * If we hit an in-transition entry we have to sleep and 3221 * retry. It's easier (and not really slower) to just retry 3222 * since this case occurs so rarely and the hint is already 3223 * pointing at the right place. We have to reset the 3224 * start offset so as not to accidently delete an entry 3225 * another process just created in vacated space. 3226 */ 3227 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 3228 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 3229 start = entry->ba.start; 3230 ++mycpu->gd_cnt.v_intrans_coll; 3231 ++mycpu->gd_cnt.v_intrans_wait; 3232 vm_map_transition_wait(map, 1); 3233 goto again; 3234 } 3235 vm_map_clip_end(map, entry, end, countp); 3236 3237 s = entry->ba.start; 3238 e = entry->ba.end; 3239 next = vm_map_rb_tree_RB_NEXT(entry); 3240 3241 offidxstart = OFF_TO_IDX(entry->ba.offset); 3242 count = OFF_TO_IDX(e - s); 3243 3244 switch(entry->maptype) { 3245 case VM_MAPTYPE_NORMAL: 3246 case VM_MAPTYPE_VPAGETABLE: 3247 case VM_MAPTYPE_SUBMAP: 3248 object = entry->ba.object; 3249 break; 3250 default: 3251 object = NULL; 3252 break; 3253 } 3254 3255 /* 3256 * Unwire before removing addresses from the pmap; otherwise, 3257 * unwiring will put the entries back in the pmap. 3258 * 3259 * Generally speaking, doing a bulk pmap_remove() before 3260 * removing the pages from the VM object is better at 3261 * reducing unnecessary IPIs. The pmap code is now optimized 3262 * to not blindly iterate the range when pt and pd pages 3263 * are missing. 3264 */ 3265 if (entry->wired_count != 0) 3266 vm_map_entry_unwire(map, entry); 3267 3268 offidxend = offidxstart + count; 3269 3270 if (object == &kernel_object) { 3271 pmap_remove(map->pmap, s, e); 3272 vm_object_hold(object); 3273 vm_object_page_remove(object, offidxstart, 3274 offidxend, FALSE); 3275 vm_object_drop(object); 3276 } else if (object && object->type != OBJT_DEFAULT && 3277 object->type != OBJT_SWAP) { 3278 /* 3279 * vnode object routines cannot be chain-locked, 3280 * but since we aren't removing pages from the 3281 * object here we can use a shared hold. 3282 */ 3283 vm_object_hold_shared(object); 3284 pmap_remove(map->pmap, s, e); 3285 vm_object_drop(object); 3286 } else if (object) { 3287 vm_object_hold(object); 3288 pmap_remove(map->pmap, s, e); 3289 3290 if (object != NULL && 3291 object->ref_count != 1 && 3292 (object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == 3293 OBJ_ONEMAPPING && 3294 (object->type == OBJT_DEFAULT || 3295 object->type == OBJT_SWAP)) { 3296 /* 3297 * When ONEMAPPING is set we can destroy the 3298 * pages underlying the entry's range. 3299 */ 3300 vm_object_page_remove(object, offidxstart, 3301 offidxend, FALSE); 3302 if (object->type == OBJT_SWAP) { 3303 swap_pager_freespace(object, 3304 offidxstart, 3305 count); 3306 } 3307 if (offidxend >= object->size && 3308 offidxstart < object->size) { 3309 object->size = offidxstart; 3310 } 3311 } 3312 vm_object_drop(object); 3313 } else if (entry->maptype == VM_MAPTYPE_UKSMAP) { 3314 pmap_remove(map->pmap, s, e); 3315 } 3316 3317 /* 3318 * Delete the entry (which may delete the object) only after 3319 * removing all pmap entries pointing to its pages. 3320 * (Otherwise, its page frames may be reallocated, and any 3321 * modify bits will be set in the wrong object!) 3322 */ 3323 vm_map_entry_delete(map, entry, countp); 3324 entry = next; 3325 } 3326 3327 /* 3328 * We either reached the end and use vm_map_max as the end 3329 * address, or we didn't and we use the next entry as the 3330 * end address. 3331 */ 3332 if (entry == NULL) { 3333 vm_map_freehint_hole(map, hole_start, 3334 vm_map_max(map) - hole_start); 3335 } else { 3336 vm_map_freehint_hole(map, hole_start, 3337 entry->ba.start - hole_start); 3338 } 3339 3340 lwkt_reltoken(&map->token); 3341 3342 return (KERN_SUCCESS); 3343 } 3344 3345 /* 3346 * Remove the given address range from the target map. 3347 * This is the exported form of vm_map_delete. 3348 * 3349 * No requirements. 3350 */ 3351 int 3352 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end) 3353 { 3354 int result; 3355 int count; 3356 3357 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 3358 vm_map_lock(map); 3359 VM_MAP_RANGE_CHECK(map, start, end); 3360 result = vm_map_delete(map, start, end, &count); 3361 vm_map_unlock(map); 3362 vm_map_entry_release(count); 3363 3364 return (result); 3365 } 3366 3367 /* 3368 * Assert that the target map allows the specified privilege on the 3369 * entire address region given. The entire region must be allocated. 3370 * 3371 * The caller must specify whether the vm_map is already locked or not. 3372 */ 3373 boolean_t 3374 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end, 3375 vm_prot_t protection, boolean_t have_lock) 3376 { 3377 vm_map_entry_t entry; 3378 vm_map_entry_t tmp_entry; 3379 boolean_t result; 3380 3381 if (have_lock == FALSE) 3382 vm_map_lock_read(map); 3383 3384 if (!vm_map_lookup_entry(map, start, &tmp_entry)) { 3385 if (have_lock == FALSE) 3386 vm_map_unlock_read(map); 3387 return (FALSE); 3388 } 3389 entry = tmp_entry; 3390 3391 result = TRUE; 3392 while (start < end) { 3393 if (entry == NULL) { 3394 result = FALSE; 3395 break; 3396 } 3397 3398 /* 3399 * No holes allowed! 3400 */ 3401 3402 if (start < entry->ba.start) { 3403 result = FALSE; 3404 break; 3405 } 3406 /* 3407 * Check protection associated with entry. 3408 */ 3409 3410 if ((entry->protection & protection) != protection) { 3411 result = FALSE; 3412 break; 3413 } 3414 /* go to next entry */ 3415 start = entry->ba.end; 3416 entry = vm_map_rb_tree_RB_NEXT(entry); 3417 } 3418 if (have_lock == FALSE) 3419 vm_map_unlock_read(map); 3420 return (result); 3421 } 3422 3423 /* 3424 * vm_map_backing structures are not shared across forks and must be 3425 * replicated. 3426 * 3427 * Generally speaking we must reallocate the backing_ba sequence and 3428 * also adjust it for any changes made to the base entry->ba.start and 3429 * entry->ba.end. The first ba in the chain is of course &entry->ba, 3430 * so we only need to adjust subsequent ba's start, end, and offset. 3431 * 3432 * MAP_BACK_CLIPPED - Called as part of a clipping replication. 3433 * Do not clear OBJ_ONEMAPPING. 3434 * 3435 * MAP_BACK_BASEOBJREFD - Called from vm_map_insert(). The base object 3436 * has already been referenced. 3437 */ 3438 static 3439 void 3440 vm_map_backing_replicated(vm_map_t map, vm_map_entry_t entry, int flags) 3441 { 3442 vm_map_backing_t ba; 3443 vm_map_backing_t nba; 3444 vm_object_t object; 3445 3446 ba = &entry->ba; 3447 for (;;) { 3448 object = ba->object; 3449 ba->pmap = map->pmap; 3450 if (object && 3451 (entry->maptype == VM_MAPTYPE_VPAGETABLE || 3452 entry->maptype == VM_MAPTYPE_NORMAL)) { 3453 if (ba != &entry->ba || 3454 (flags & MAP_BACK_BASEOBJREFD) == 0) { 3455 vm_object_reference_quick(object); 3456 } 3457 vm_map_backing_attach(entry, ba); 3458 if ((flags & MAP_BACK_CLIPPED) == 0 && 3459 object->ref_count > 1) { 3460 vm_object_clear_flag(object, OBJ_ONEMAPPING); 3461 } 3462 } else if (entry->maptype == VM_MAPTYPE_UKSMAP) { 3463 vm_map_backing_attach(entry, ba); 3464 } 3465 if (ba->backing_ba == NULL) 3466 break; 3467 3468 /* 3469 * NOTE: The aux_info field is retained. 3470 */ 3471 nba = kmalloc(sizeof(*nba), M_MAP_BACKING, M_INTWAIT); 3472 *nba = *ba->backing_ba; 3473 nba->offset += (ba->start - nba->start); /* += (new - old) */ 3474 nba->start = ba->start; 3475 nba->end = ba->end; 3476 ba->backing_ba = nba; 3477 ba = nba; 3478 /* pmap is replaced at the top of the loop */ 3479 } 3480 } 3481 3482 static 3483 void 3484 vm_map_backing_adjust_start(vm_map_entry_t entry, vm_ooffset_t start) 3485 { 3486 vm_map_backing_t ba; 3487 3488 if (entry->maptype == VM_MAPTYPE_VPAGETABLE || 3489 entry->maptype == VM_MAPTYPE_NORMAL) { 3490 for (ba = &entry->ba; ba; ba = ba->backing_ba) { 3491 if (ba->object) { 3492 lockmgr(&ba->object->backing_lk, LK_EXCLUSIVE); 3493 ba->offset += (start - ba->start); 3494 ba->start = start; 3495 lockmgr(&ba->object->backing_lk, LK_RELEASE); 3496 } else { 3497 ba->offset += (start - ba->start); 3498 ba->start = start; 3499 } 3500 } 3501 } else { 3502 /* not an object and can't be shadowed */ 3503 } 3504 } 3505 3506 static 3507 void 3508 vm_map_backing_adjust_end(vm_map_entry_t entry, vm_ooffset_t end) 3509 { 3510 vm_map_backing_t ba; 3511 3512 if (entry->maptype == VM_MAPTYPE_VPAGETABLE || 3513 entry->maptype == VM_MAPTYPE_NORMAL) { 3514 for (ba = &entry->ba; ba; ba = ba->backing_ba) { 3515 if (ba->object) { 3516 lockmgr(&ba->object->backing_lk, LK_EXCLUSIVE); 3517 ba->end = end; 3518 lockmgr(&ba->object->backing_lk, LK_RELEASE); 3519 } else { 3520 ba->end = end; 3521 } 3522 } 3523 } else { 3524 /* not an object and can't be shadowed */ 3525 } 3526 } 3527 3528 /* 3529 * Handles the dirty work of making src_entry and dst_entry copy-on-write 3530 * after src_entry has been cloned to dst_entry. For normal entries only. 3531 * 3532 * The vm_maps must be exclusively locked. 3533 * The vm_map's token must be held. 3534 * 3535 * Because the maps are locked no faults can be in progress during the 3536 * operation. 3537 */ 3538 static void 3539 vm_map_copy_entry(vm_map_t src_map, vm_map_t dst_map, 3540 vm_map_entry_t src_entry, vm_map_entry_t dst_entry) 3541 { 3542 vm_object_t obj; 3543 3544 KKASSERT(dst_entry->maptype == VM_MAPTYPE_NORMAL || 3545 dst_entry->maptype == VM_MAPTYPE_VPAGETABLE); 3546 3547 if (src_entry->wired_count && 3548 src_entry->maptype != VM_MAPTYPE_VPAGETABLE) { 3549 /* 3550 * Of course, wired down pages can't be set copy-on-write. 3551 * Cause wired pages to be copied into the new map by 3552 * simulating faults (the new pages are pageable) 3553 * 3554 * Scrap ba.object (its ref-count has not yet been adjusted 3555 * so we can just NULL out the field). Remove the backing 3556 * store. 3557 * 3558 * Then call vm_fault_copy_entry() to create a new object 3559 * in dst_entry and copy the wired pages from src to dst. 3560 * 3561 * The fault-copy code doesn't work with virtual page 3562 * tables. 3563 * 3564 * NOTE: obj is not actually an object for all MAPTYPEs, 3565 * just test against NULL. 3566 */ 3567 if (dst_entry->ba.map_object != NULL) { 3568 vm_map_backing_detach(dst_entry, &dst_entry->ba); 3569 dst_entry->ba.map_object = NULL; 3570 vm_map_entry_dispose_ba(dst_entry, 3571 dst_entry->ba.backing_ba); 3572 dst_entry->ba.backing_ba = NULL; 3573 dst_entry->ba.backing_count = 0; 3574 } 3575 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry); 3576 } else { 3577 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { 3578 /* 3579 * If the source entry is not already marked NEEDS_COPY 3580 * we need to write-protect the PTEs. 3581 */ 3582 pmap_protect(src_map->pmap, 3583 src_entry->ba.start, 3584 src_entry->ba.end, 3585 src_entry->protection & ~VM_PROT_WRITE); 3586 } 3587 3588 /* 3589 * dst_entry.ba_object might be stale. Update it (its 3590 * ref-count has not yet been updated so just overwrite 3591 * the field). 3592 * 3593 * If there is no object then we are golden. Also, in 3594 * this situation if there are no backing_ba linkages then 3595 * we can set ba.offset to whatever we want. For now we 3596 * set the offset for 0 for make debugging object sizes 3597 * easier. 3598 */ 3599 obj = src_entry->ba.object; 3600 3601 if (obj) { 3602 src_entry->eflags |= (MAP_ENTRY_COW | 3603 MAP_ENTRY_NEEDS_COPY); 3604 dst_entry->eflags |= (MAP_ENTRY_COW | 3605 MAP_ENTRY_NEEDS_COPY); 3606 KKASSERT(dst_entry->ba.offset == src_entry->ba.offset); 3607 } else { 3608 dst_entry->ba.offset = 0; 3609 } 3610 3611 /* 3612 * Normal, allow the backing_ba link depth to 3613 * increase. 3614 */ 3615 pmap_copy(dst_map->pmap, src_map->pmap, 3616 dst_entry->ba.start, 3617 dst_entry->ba.end - dst_entry->ba.start, 3618 src_entry->ba.start); 3619 } 3620 } 3621 3622 /* 3623 * Create a vmspace for a new process and its related vm_map based on an 3624 * existing vmspace. The new map inherits information from the old map 3625 * according to inheritance settings. 3626 * 3627 * The source map must not be locked. 3628 * No requirements. 3629 */ 3630 static void vmspace_fork_normal_entry(vm_map_t old_map, vm_map_t new_map, 3631 vm_map_entry_t old_entry, int *countp); 3632 static void vmspace_fork_uksmap_entry(vm_map_t old_map, vm_map_t new_map, 3633 vm_map_entry_t old_entry, int *countp); 3634 3635 struct vmspace * 3636 vmspace_fork(struct vmspace *vm1) 3637 { 3638 struct vmspace *vm2; 3639 vm_map_t old_map = &vm1->vm_map; 3640 vm_map_t new_map; 3641 vm_map_entry_t old_entry; 3642 int count; 3643 3644 lwkt_gettoken(&vm1->vm_map.token); 3645 vm_map_lock(old_map); 3646 3647 vm2 = vmspace_alloc(vm_map_min(old_map), vm_map_max(old_map)); 3648 lwkt_gettoken(&vm2->vm_map.token); 3649 3650 /* 3651 * We must bump the timestamp to force any concurrent fault 3652 * to retry. 3653 */ 3654 bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy, 3655 (caddr_t)&vm1->vm_endcopy - (caddr_t)&vm1->vm_startcopy); 3656 new_map = &vm2->vm_map; /* XXX */ 3657 new_map->timestamp = 1; 3658 3659 vm_map_lock(new_map); 3660 3661 count = old_map->nentries; 3662 count = vm_map_entry_reserve(count + MAP_RESERVE_COUNT); 3663 3664 RB_FOREACH(old_entry, vm_map_rb_tree, &old_map->rb_root) { 3665 switch(old_entry->maptype) { 3666 case VM_MAPTYPE_SUBMAP: 3667 panic("vm_map_fork: encountered a submap"); 3668 break; 3669 case VM_MAPTYPE_UKSMAP: 3670 vmspace_fork_uksmap_entry(old_map, new_map, 3671 old_entry, &count); 3672 break; 3673 case VM_MAPTYPE_NORMAL: 3674 case VM_MAPTYPE_VPAGETABLE: 3675 vmspace_fork_normal_entry(old_map, new_map, 3676 old_entry, &count); 3677 break; 3678 } 3679 } 3680 3681 new_map->size = old_map->size; 3682 vm_map_unlock(new_map); 3683 vm_map_unlock(old_map); 3684 vm_map_entry_release(count); 3685 3686 lwkt_reltoken(&vm2->vm_map.token); 3687 lwkt_reltoken(&vm1->vm_map.token); 3688 3689 return (vm2); 3690 } 3691 3692 static 3693 void 3694 vmspace_fork_normal_entry(vm_map_t old_map, vm_map_t new_map, 3695 vm_map_entry_t old_entry, int *countp) 3696 { 3697 vm_map_entry_t new_entry; 3698 vm_map_backing_t ba; 3699 vm_object_t object; 3700 3701 /* 3702 * If the backing_ba link list gets too long then fault it 3703 * all into the head object and dispose of the list. We do 3704 * this in old_entry prior to cloning in order to benefit both 3705 * parent and child. 3706 * 3707 * We can test our fronting object's size against its 3708 * resident_page_count for a really cheap (but probably not perfect) 3709 * all-shadowed test, allowing us to disconnect the backing_ba 3710 * link list early. 3711 * 3712 * XXX Currently doesn't work for VPAGETABLEs (the entire object 3713 * would have to be copied). 3714 */ 3715 object = old_entry->ba.object; 3716 if (old_entry->ba.backing_ba && 3717 old_entry->maptype != VM_MAPTYPE_VPAGETABLE && 3718 (old_entry->ba.backing_count >= vm_map_backing_limit || 3719 (vm_map_backing_shadow_test && object && 3720 object->size == object->resident_page_count))) { 3721 /* 3722 * If there are too many backing_ba linkages we 3723 * collapse everything into the head 3724 * 3725 * This will also remove all the pte's. 3726 */ 3727 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) 3728 vm_map_entry_shadow(old_entry); 3729 if (object == NULL) 3730 vm_map_entry_allocate_object(old_entry); 3731 if (vm_fault_collapse(old_map, old_entry) == KERN_SUCCESS) { 3732 ba = old_entry->ba.backing_ba; 3733 old_entry->ba.backing_ba = NULL; 3734 old_entry->ba.backing_count = 0; 3735 vm_map_entry_dispose_ba(old_entry, ba); 3736 } 3737 } 3738 object = NULL; /* object variable is now invalid */ 3739 3740 /* 3741 * Fork the entry 3742 */ 3743 switch (old_entry->inheritance) { 3744 case VM_INHERIT_NONE: 3745 break; 3746 case VM_INHERIT_SHARE: 3747 /* 3748 * Clone the entry as a shared entry. This will look like 3749 * shared memory across the old and the new process. We must 3750 * ensure that the object is allocated. 3751 */ 3752 if (old_entry->ba.object == NULL) 3753 vm_map_entry_allocate_object(old_entry); 3754 3755 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) { 3756 /* 3757 * Create the fronting vm_map_backing for 3758 * an entry which needs a copy, plus an extra 3759 * ref because we are going to duplicate it 3760 * in the fork. 3761 * 3762 * The call to vm_map_entry_shadow() will also clear 3763 * OBJ_ONEMAPPING. 3764 * 3765 * XXX no more collapse. Still need extra ref 3766 * for the fork. 3767 */ 3768 vm_map_entry_shadow(old_entry); 3769 } else if (old_entry->ba.object) { 3770 object = old_entry->ba.object; 3771 } 3772 3773 /* 3774 * Clone the entry. We've already bumped the ref on 3775 * the vm_object for our new entry. 3776 */ 3777 new_entry = vm_map_entry_create(countp); 3778 *new_entry = *old_entry; 3779 3780 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED; 3781 new_entry->wired_count = 0; 3782 3783 /* 3784 * Replicate and index the vm_map_backing. Don't share 3785 * the vm_map_backing across vm_map's (only across clips). 3786 * 3787 * Insert the entry into the new map -- we know we're 3788 * inserting at the end of the new map. 3789 */ 3790 vm_map_backing_replicated(new_map, new_entry, 0); 3791 vm_map_entry_link(new_map, new_entry); 3792 3793 /* 3794 * Update the physical map 3795 */ 3796 pmap_copy(new_map->pmap, old_map->pmap, 3797 new_entry->ba.start, 3798 (old_entry->ba.end - old_entry->ba.start), 3799 old_entry->ba.start); 3800 break; 3801 case VM_INHERIT_COPY: 3802 /* 3803 * Clone the entry and link the copy into the new map. 3804 * 3805 * Note that ref-counting adjustment for old_entry->ba.object 3806 * (if it isn't a special map that is) is handled by 3807 * vm_map_copy_entry(). 3808 */ 3809 new_entry = vm_map_entry_create(countp); 3810 *new_entry = *old_entry; 3811 3812 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED; 3813 new_entry->wired_count = 0; 3814 3815 vm_map_backing_replicated(new_map, new_entry, 0); 3816 vm_map_entry_link(new_map, new_entry); 3817 3818 /* 3819 * This does the actual dirty work of making both entries 3820 * copy-on-write, and will also handle the fronting object. 3821 */ 3822 vm_map_copy_entry(old_map, new_map, old_entry, new_entry); 3823 break; 3824 } 3825 } 3826 3827 /* 3828 * When forking user-kernel shared maps, the map might change in the 3829 * child so do not try to copy the underlying pmap entries. 3830 */ 3831 static 3832 void 3833 vmspace_fork_uksmap_entry(vm_map_t old_map, vm_map_t new_map, 3834 vm_map_entry_t old_entry, int *countp) 3835 { 3836 vm_map_entry_t new_entry; 3837 3838 new_entry = vm_map_entry_create(countp); 3839 *new_entry = *old_entry; 3840 3841 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED; 3842 new_entry->wired_count = 0; 3843 KKASSERT(new_entry->ba.backing_ba == NULL); 3844 vm_map_backing_replicated(new_map, new_entry, 0); 3845 3846 vm_map_entry_link(new_map, new_entry); 3847 } 3848 3849 /* 3850 * Create an auto-grow stack entry 3851 * 3852 * No requirements. 3853 */ 3854 int 3855 vm_map_stack (vm_map_t map, vm_offset_t *addrbos, vm_size_t max_ssize, 3856 int flags, vm_prot_t prot, vm_prot_t max, int cow) 3857 { 3858 vm_map_entry_t prev_entry; 3859 vm_map_entry_t next; 3860 vm_size_t init_ssize; 3861 int rv; 3862 int count; 3863 vm_offset_t tmpaddr; 3864 3865 cow |= MAP_IS_STACK; 3866 3867 if (max_ssize < sgrowsiz) 3868 init_ssize = max_ssize; 3869 else 3870 init_ssize = sgrowsiz; 3871 3872 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 3873 vm_map_lock(map); 3874 3875 /* 3876 * Find space for the mapping 3877 */ 3878 if ((flags & (MAP_FIXED | MAP_TRYFIXED)) == 0) { 3879 if (vm_map_findspace(map, *addrbos, max_ssize, 1, 3880 flags, &tmpaddr)) { 3881 vm_map_unlock(map); 3882 vm_map_entry_release(count); 3883 return (KERN_NO_SPACE); 3884 } 3885 *addrbos = tmpaddr; 3886 } 3887 3888 /* If addr is already mapped, no go */ 3889 if (vm_map_lookup_entry(map, *addrbos, &prev_entry)) { 3890 vm_map_unlock(map); 3891 vm_map_entry_release(count); 3892 return (KERN_NO_SPACE); 3893 } 3894 3895 #if 0 3896 /* XXX already handled by kern_mmap() */ 3897 /* If we would blow our VMEM resource limit, no go */ 3898 if (map->size + init_ssize > 3899 curproc->p_rlimit[RLIMIT_VMEM].rlim_cur) { 3900 vm_map_unlock(map); 3901 vm_map_entry_release(count); 3902 return (KERN_NO_SPACE); 3903 } 3904 #endif 3905 3906 /* 3907 * If we can't accomodate max_ssize in the current mapping, 3908 * no go. However, we need to be aware that subsequent user 3909 * mappings might map into the space we have reserved for 3910 * stack, and currently this space is not protected. 3911 * 3912 * Hopefully we will at least detect this condition 3913 * when we try to grow the stack. 3914 */ 3915 if (prev_entry) 3916 next = vm_map_rb_tree_RB_NEXT(prev_entry); 3917 else 3918 next = RB_MIN(vm_map_rb_tree, &map->rb_root); 3919 3920 if (next && next->ba.start < *addrbos + max_ssize) { 3921 vm_map_unlock(map); 3922 vm_map_entry_release(count); 3923 return (KERN_NO_SPACE); 3924 } 3925 3926 /* 3927 * We initially map a stack of only init_ssize. We will 3928 * grow as needed later. Since this is to be a grow 3929 * down stack, we map at the top of the range. 3930 * 3931 * Note: we would normally expect prot and max to be 3932 * VM_PROT_ALL, and cow to be 0. Possibly we should 3933 * eliminate these as input parameters, and just 3934 * pass these values here in the insert call. 3935 */ 3936 rv = vm_map_insert(map, &count, 3937 NULL, NULL, 3938 0, NULL, 3939 *addrbos + max_ssize - init_ssize, 3940 *addrbos + max_ssize, 3941 VM_MAPTYPE_NORMAL, 3942 VM_SUBSYS_STACK, prot, max, cow); 3943 3944 /* Now set the avail_ssize amount */ 3945 if (rv == KERN_SUCCESS) { 3946 if (prev_entry) 3947 next = vm_map_rb_tree_RB_NEXT(prev_entry); 3948 else 3949 next = RB_MIN(vm_map_rb_tree, &map->rb_root); 3950 if (prev_entry != NULL) { 3951 vm_map_clip_end(map, 3952 prev_entry, 3953 *addrbos + max_ssize - init_ssize, 3954 &count); 3955 } 3956 if (next->ba.end != *addrbos + max_ssize || 3957 next->ba.start != *addrbos + max_ssize - init_ssize){ 3958 panic ("Bad entry start/end for new stack entry"); 3959 } else { 3960 next->aux.avail_ssize = max_ssize - init_ssize; 3961 } 3962 } 3963 3964 vm_map_unlock(map); 3965 vm_map_entry_release(count); 3966 return (rv); 3967 } 3968 3969 /* 3970 * Attempts to grow a vm stack entry. Returns KERN_SUCCESS if the 3971 * desired address is already mapped, or if we successfully grow 3972 * the stack. Also returns KERN_SUCCESS if addr is outside the 3973 * stack range (this is strange, but preserves compatibility with 3974 * the grow function in vm_machdep.c). 3975 * 3976 * No requirements. 3977 */ 3978 int 3979 vm_map_growstack (vm_map_t map, vm_offset_t addr) 3980 { 3981 vm_map_entry_t prev_entry; 3982 vm_map_entry_t stack_entry; 3983 vm_map_entry_t next; 3984 struct vmspace *vm; 3985 struct lwp *lp; 3986 struct proc *p; 3987 vm_offset_t end; 3988 int grow_amount; 3989 int rv = KERN_SUCCESS; 3990 int is_procstack; 3991 int use_read_lock = 1; 3992 int count; 3993 3994 /* 3995 * Find the vm 3996 */ 3997 lp = curthread->td_lwp; 3998 p = curthread->td_proc; 3999 KKASSERT(lp != NULL); 4000 vm = lp->lwp_vmspace; 4001 4002 /* 4003 * Growstack is only allowed on the current process. We disallow 4004 * other use cases, e.g. trying to access memory via procfs that 4005 * the stack hasn't grown into. 4006 */ 4007 if (map != &vm->vm_map) { 4008 return KERN_FAILURE; 4009 } 4010 4011 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 4012 Retry: 4013 if (use_read_lock) 4014 vm_map_lock_read(map); 4015 else 4016 vm_map_lock(map); 4017 4018 /* 4019 * If addr is already in the entry range, no need to grow. 4020 * prev_entry returns NULL if addr is at the head. 4021 */ 4022 if (vm_map_lookup_entry(map, addr, &prev_entry)) 4023 goto done; 4024 if (prev_entry) 4025 stack_entry = vm_map_rb_tree_RB_NEXT(prev_entry); 4026 else 4027 stack_entry = RB_MIN(vm_map_rb_tree, &map->rb_root); 4028 4029 if (stack_entry == NULL) 4030 goto done; 4031 if (prev_entry == NULL) 4032 end = stack_entry->ba.start - stack_entry->aux.avail_ssize; 4033 else 4034 end = prev_entry->ba.end; 4035 4036 /* 4037 * This next test mimics the old grow function in vm_machdep.c. 4038 * It really doesn't quite make sense, but we do it anyway 4039 * for compatibility. 4040 * 4041 * If not growable stack, return success. This signals the 4042 * caller to proceed as he would normally with normal vm. 4043 */ 4044 if (stack_entry->aux.avail_ssize < 1 || 4045 addr >= stack_entry->ba.start || 4046 addr < stack_entry->ba.start - stack_entry->aux.avail_ssize) { 4047 goto done; 4048 } 4049 4050 /* Find the minimum grow amount */ 4051 grow_amount = roundup (stack_entry->ba.start - addr, PAGE_SIZE); 4052 if (grow_amount > stack_entry->aux.avail_ssize) { 4053 rv = KERN_NO_SPACE; 4054 goto done; 4055 } 4056 4057 /* 4058 * If there is no longer enough space between the entries 4059 * nogo, and adjust the available space. Note: this 4060 * should only happen if the user has mapped into the 4061 * stack area after the stack was created, and is 4062 * probably an error. 4063 * 4064 * This also effectively destroys any guard page the user 4065 * might have intended by limiting the stack size. 4066 */ 4067 if (grow_amount > stack_entry->ba.start - end) { 4068 if (use_read_lock && vm_map_lock_upgrade(map)) { 4069 /* lost lock */ 4070 use_read_lock = 0; 4071 goto Retry; 4072 } 4073 use_read_lock = 0; 4074 stack_entry->aux.avail_ssize = stack_entry->ba.start - end; 4075 rv = KERN_NO_SPACE; 4076 goto done; 4077 } 4078 4079 is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr; 4080 4081 /* If this is the main process stack, see if we're over the 4082 * stack limit. 4083 */ 4084 if (is_procstack && (vm->vm_ssize + grow_amount > 4085 p->p_rlimit[RLIMIT_STACK].rlim_cur)) { 4086 rv = KERN_NO_SPACE; 4087 goto done; 4088 } 4089 4090 /* Round up the grow amount modulo SGROWSIZ */ 4091 grow_amount = roundup (grow_amount, sgrowsiz); 4092 if (grow_amount > stack_entry->aux.avail_ssize) { 4093 grow_amount = stack_entry->aux.avail_ssize; 4094 } 4095 if (is_procstack && (vm->vm_ssize + grow_amount > 4096 p->p_rlimit[RLIMIT_STACK].rlim_cur)) { 4097 grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur - vm->vm_ssize; 4098 } 4099 4100 /* If we would blow our VMEM resource limit, no go */ 4101 if (map->size + grow_amount > p->p_rlimit[RLIMIT_VMEM].rlim_cur) { 4102 rv = KERN_NO_SPACE; 4103 goto done; 4104 } 4105 4106 if (use_read_lock && vm_map_lock_upgrade(map)) { 4107 /* lost lock */ 4108 use_read_lock = 0; 4109 goto Retry; 4110 } 4111 use_read_lock = 0; 4112 4113 /* Get the preliminary new entry start value */ 4114 addr = stack_entry->ba.start - grow_amount; 4115 4116 /* If this puts us into the previous entry, cut back our growth 4117 * to the available space. Also, see the note above. 4118 */ 4119 if (addr < end) { 4120 stack_entry->aux.avail_ssize = stack_entry->ba.start - end; 4121 addr = end; 4122 } 4123 4124 rv = vm_map_insert(map, &count, 4125 NULL, NULL, 4126 0, NULL, 4127 addr, stack_entry->ba.start, 4128 VM_MAPTYPE_NORMAL, 4129 VM_SUBSYS_STACK, VM_PROT_ALL, VM_PROT_ALL, 0); 4130 4131 /* Adjust the available stack space by the amount we grew. */ 4132 if (rv == KERN_SUCCESS) { 4133 if (prev_entry) { 4134 vm_map_clip_end(map, prev_entry, addr, &count); 4135 next = vm_map_rb_tree_RB_NEXT(prev_entry); 4136 } else { 4137 next = RB_MIN(vm_map_rb_tree, &map->rb_root); 4138 } 4139 if (next->ba.end != stack_entry->ba.start || 4140 next->ba.start != addr) { 4141 panic ("Bad stack grow start/end in new stack entry"); 4142 } else { 4143 next->aux.avail_ssize = 4144 stack_entry->aux.avail_ssize - 4145 (next->ba.end - next->ba.start); 4146 if (is_procstack) { 4147 vm->vm_ssize += next->ba.end - 4148 next->ba.start; 4149 } 4150 } 4151 4152 if (map->flags & MAP_WIREFUTURE) 4153 vm_map_unwire(map, next->ba.start, next->ba.end, FALSE); 4154 } 4155 4156 done: 4157 if (use_read_lock) 4158 vm_map_unlock_read(map); 4159 else 4160 vm_map_unlock(map); 4161 vm_map_entry_release(count); 4162 return (rv); 4163 } 4164 4165 /* 4166 * Unshare the specified VM space for exec. If other processes are 4167 * mapped to it, then create a new one. The new vmspace is null. 4168 * 4169 * No requirements. 4170 */ 4171 void 4172 vmspace_exec(struct proc *p, struct vmspace *vmcopy) 4173 { 4174 struct vmspace *oldvmspace = p->p_vmspace; 4175 struct vmspace *newvmspace; 4176 vm_map_t map = &p->p_vmspace->vm_map; 4177 4178 /* 4179 * If we are execing a resident vmspace we fork it, otherwise 4180 * we create a new vmspace. Note that exitingcnt is not 4181 * copied to the new vmspace. 4182 */ 4183 lwkt_gettoken(&oldvmspace->vm_map.token); 4184 if (vmcopy) { 4185 newvmspace = vmspace_fork(vmcopy); 4186 lwkt_gettoken(&newvmspace->vm_map.token); 4187 } else { 4188 newvmspace = vmspace_alloc(vm_map_min(map), vm_map_max(map)); 4189 lwkt_gettoken(&newvmspace->vm_map.token); 4190 bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy, 4191 (caddr_t)&oldvmspace->vm_endcopy - 4192 (caddr_t)&oldvmspace->vm_startcopy); 4193 } 4194 4195 /* 4196 * Finish initializing the vmspace before assigning it 4197 * to the process. The vmspace will become the current vmspace 4198 * if p == curproc. 4199 */ 4200 pmap_pinit2(vmspace_pmap(newvmspace)); 4201 pmap_replacevm(p, newvmspace, 0); 4202 lwkt_reltoken(&newvmspace->vm_map.token); 4203 lwkt_reltoken(&oldvmspace->vm_map.token); 4204 vmspace_rel(oldvmspace); 4205 } 4206 4207 /* 4208 * Unshare the specified VM space for forcing COW. This 4209 * is called by rfork, for the (RFMEM|RFPROC) == 0 case. 4210 */ 4211 void 4212 vmspace_unshare(struct proc *p) 4213 { 4214 struct vmspace *oldvmspace = p->p_vmspace; 4215 struct vmspace *newvmspace; 4216 4217 lwkt_gettoken(&oldvmspace->vm_map.token); 4218 if (vmspace_getrefs(oldvmspace) == 1) { 4219 lwkt_reltoken(&oldvmspace->vm_map.token); 4220 return; 4221 } 4222 newvmspace = vmspace_fork(oldvmspace); 4223 lwkt_gettoken(&newvmspace->vm_map.token); 4224 pmap_pinit2(vmspace_pmap(newvmspace)); 4225 pmap_replacevm(p, newvmspace, 0); 4226 lwkt_reltoken(&newvmspace->vm_map.token); 4227 lwkt_reltoken(&oldvmspace->vm_map.token); 4228 vmspace_rel(oldvmspace); 4229 } 4230 4231 /* 4232 * vm_map_hint: return the beginning of the best area suitable for 4233 * creating a new mapping with "prot" protection. 4234 * 4235 * No requirements. 4236 */ 4237 vm_offset_t 4238 vm_map_hint(struct proc *p, vm_offset_t addr, vm_prot_t prot) 4239 { 4240 struct vmspace *vms = p->p_vmspace; 4241 struct rlimit limit; 4242 rlim_t dsiz; 4243 4244 /* 4245 * Acquire datasize limit for mmap() operation, 4246 * calculate nearest power of 2. 4247 */ 4248 if (kern_getrlimit(RLIMIT_DATA, &limit)) 4249 limit.rlim_cur = maxdsiz; 4250 dsiz = limit.rlim_cur; 4251 4252 if (!randomize_mmap || addr != 0) { 4253 /* 4254 * Set a reasonable start point for the hint if it was 4255 * not specified or if it falls within the heap space. 4256 * Hinted mmap()s do not allocate out of the heap space. 4257 */ 4258 if (addr == 0 || 4259 (addr >= round_page((vm_offset_t)vms->vm_taddr) && 4260 addr < round_page((vm_offset_t)vms->vm_daddr + dsiz))) { 4261 addr = round_page((vm_offset_t)vms->vm_daddr + dsiz); 4262 } 4263 4264 return addr; 4265 } 4266 4267 /* 4268 * randomize_mmap && addr == 0. For now randomize the 4269 * address within a dsiz range beyond the data limit. 4270 */ 4271 addr = (vm_offset_t)vms->vm_daddr + dsiz; 4272 if (dsiz) 4273 addr += (karc4random64() & 0x7FFFFFFFFFFFFFFFLU) % dsiz; 4274 return (round_page(addr)); 4275 } 4276 4277 /* 4278 * Finds the VM object, offset, and protection for a given virtual address 4279 * in the specified map, assuming a page fault of the type specified. 4280 * 4281 * Leaves the map in question locked for read; return values are guaranteed 4282 * until a vm_map_lookup_done call is performed. Note that the map argument 4283 * is in/out; the returned map must be used in the call to vm_map_lookup_done. 4284 * 4285 * A handle (out_entry) is returned for use in vm_map_lookup_done, to make 4286 * that fast. 4287 * 4288 * If a lookup is requested with "write protection" specified, the map may 4289 * be changed to perform virtual copying operations, although the data 4290 * referenced will remain the same. 4291 * 4292 * No requirements. 4293 */ 4294 int 4295 vm_map_lookup(vm_map_t *var_map, /* IN/OUT */ 4296 vm_offset_t vaddr, 4297 vm_prot_t fault_typea, 4298 vm_map_entry_t *out_entry, /* OUT */ 4299 struct vm_map_backing **bap, /* OUT */ 4300 vm_pindex_t *pindex, /* OUT */ 4301 vm_prot_t *out_prot, /* OUT */ 4302 int *wflags) /* OUT */ 4303 { 4304 vm_map_entry_t entry; 4305 vm_map_t map = *var_map; 4306 vm_prot_t prot; 4307 vm_prot_t fault_type = fault_typea; 4308 int use_read_lock = 1; 4309 int rv = KERN_SUCCESS; 4310 int count; 4311 thread_t td = curthread; 4312 4313 /* 4314 * vm_map_entry_reserve() implements an important mitigation 4315 * against mmap() span running the kernel out of vm_map_entry 4316 * structures, but it can also cause an infinite call recursion. 4317 * Use td_nest_count to prevent an infinite recursion (allows 4318 * the vm_map code to dig into the pcpu vm_map_entry reserve). 4319 */ 4320 count = 0; 4321 if (td->td_nest_count == 0) { 4322 ++td->td_nest_count; 4323 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 4324 --td->td_nest_count; 4325 } 4326 RetryLookup: 4327 if (use_read_lock) 4328 vm_map_lock_read(map); 4329 else 4330 vm_map_lock(map); 4331 4332 /* 4333 * Always do a full lookup. The hint doesn't get us much anymore 4334 * now that the map is RB'd. 4335 */ 4336 cpu_ccfence(); 4337 *out_entry = NULL; 4338 *bap = NULL; 4339 4340 { 4341 vm_map_entry_t tmp_entry; 4342 4343 if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) { 4344 rv = KERN_INVALID_ADDRESS; 4345 goto done; 4346 } 4347 entry = tmp_entry; 4348 *out_entry = entry; 4349 } 4350 4351 /* 4352 * Handle submaps. 4353 */ 4354 if (entry->maptype == VM_MAPTYPE_SUBMAP) { 4355 vm_map_t old_map = map; 4356 4357 *var_map = map = entry->ba.sub_map; 4358 if (use_read_lock) 4359 vm_map_unlock_read(old_map); 4360 else 4361 vm_map_unlock(old_map); 4362 use_read_lock = 1; 4363 goto RetryLookup; 4364 } 4365 4366 /* 4367 * Check whether this task is allowed to have this page. 4368 * Note the special case for MAP_ENTRY_COW pages with an override. 4369 * This is to implement a forced COW for debuggers. 4370 */ 4371 if (fault_type & VM_PROT_OVERRIDE_WRITE) 4372 prot = entry->max_protection; 4373 else 4374 prot = entry->protection; 4375 4376 fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); 4377 if ((fault_type & prot) != fault_type) { 4378 rv = KERN_PROTECTION_FAILURE; 4379 goto done; 4380 } 4381 4382 if ((entry->eflags & MAP_ENTRY_USER_WIRED) && 4383 (entry->eflags & MAP_ENTRY_COW) && 4384 (fault_type & VM_PROT_WRITE) && 4385 (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) { 4386 rv = KERN_PROTECTION_FAILURE; 4387 goto done; 4388 } 4389 4390 /* 4391 * If this page is not pageable, we have to get it for all possible 4392 * accesses. 4393 */ 4394 *wflags = 0; 4395 if (entry->wired_count) { 4396 *wflags |= FW_WIRED; 4397 prot = fault_type = entry->protection; 4398 } 4399 4400 /* 4401 * Virtual page tables may need to update the accessed (A) bit 4402 * in a page table entry. Upgrade the fault to a write fault for 4403 * that case if the map will support it. If the map does not support 4404 * it the page table entry simply will not be updated. 4405 */ 4406 if (entry->maptype == VM_MAPTYPE_VPAGETABLE) { 4407 if (prot & VM_PROT_WRITE) 4408 fault_type |= VM_PROT_WRITE; 4409 } 4410 4411 if (curthread->td_lwp && curthread->td_lwp->lwp_vmspace && 4412 pmap_emulate_ad_bits(&curthread->td_lwp->lwp_vmspace->vm_pmap)) { 4413 if ((prot & VM_PROT_WRITE) == 0) 4414 fault_type |= VM_PROT_WRITE; 4415 } 4416 4417 /* 4418 * Only NORMAL and VPAGETABLE maps are object-based. UKSMAPs are not. 4419 */ 4420 if (entry->maptype != VM_MAPTYPE_NORMAL && 4421 entry->maptype != VM_MAPTYPE_VPAGETABLE) { 4422 *bap = NULL; 4423 goto skip; 4424 } 4425 4426 /* 4427 * If the entry was copy-on-write, we either ... 4428 */ 4429 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 4430 /* 4431 * If we want to write the page, we may as well handle that 4432 * now since we've got the map locked. 4433 * 4434 * If we don't need to write the page, we just demote the 4435 * permissions allowed. 4436 */ 4437 if (fault_type & VM_PROT_WRITE) { 4438 /* 4439 * Not allowed if TDF_NOFAULT is set as the shadowing 4440 * operation can deadlock against the faulting 4441 * function due to the copy-on-write. 4442 */ 4443 if (curthread->td_flags & TDF_NOFAULT) { 4444 rv = KERN_FAILURE_NOFAULT; 4445 goto done; 4446 } 4447 4448 /* 4449 * Make a new vm_map_backing + object, and place it 4450 * in the object chain. Note that no new references 4451 * have appeared -- one just moved from the map to 4452 * the new object. 4453 */ 4454 if (use_read_lock && vm_map_lock_upgrade(map)) { 4455 /* lost lock */ 4456 use_read_lock = 0; 4457 goto RetryLookup; 4458 } 4459 use_read_lock = 0; 4460 vm_map_entry_shadow(entry); 4461 *wflags |= FW_DIDCOW; 4462 } else { 4463 /* 4464 * We're attempting to read a copy-on-write page -- 4465 * don't allow writes. 4466 */ 4467 prot &= ~VM_PROT_WRITE; 4468 } 4469 } 4470 4471 /* 4472 * Create an object if necessary. This code also handles 4473 * partitioning large entries to improve vm_fault performance. 4474 */ 4475 if (entry->ba.object == NULL && !map->system_map) { 4476 if (use_read_lock && vm_map_lock_upgrade(map)) { 4477 /* lost lock */ 4478 use_read_lock = 0; 4479 goto RetryLookup; 4480 } 4481 use_read_lock = 0; 4482 4483 /* 4484 * Partition large entries, giving each its own VM object, 4485 * to improve concurrent fault performance. This is only 4486 * applicable to userspace. 4487 */ 4488 if (map != &kernel_map && 4489 entry->maptype == VM_MAPTYPE_NORMAL && 4490 ((entry->ba.start ^ entry->ba.end) & 4491 ~MAP_ENTRY_PARTITION_MASK) && 4492 vm_map_partition_enable) { 4493 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 4494 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 4495 ++mycpu->gd_cnt.v_intrans_coll; 4496 ++mycpu->gd_cnt.v_intrans_wait; 4497 vm_map_transition_wait(map, 0); 4498 goto RetryLookup; 4499 } 4500 vm_map_entry_partition(map, entry, vaddr, &count); 4501 } 4502 vm_map_entry_allocate_object(entry); 4503 } 4504 4505 /* 4506 * Return the object/offset from this entry. If the entry was 4507 * copy-on-write or empty, it has been fixed up. 4508 */ 4509 *bap = &entry->ba; 4510 4511 skip: 4512 *pindex = OFF_TO_IDX((vaddr - entry->ba.start) + entry->ba.offset); 4513 4514 /* 4515 * Return whether this is the only map sharing this data. On 4516 * success we return with a read lock held on the map. On failure 4517 * we return with the map unlocked. 4518 */ 4519 *out_prot = prot; 4520 done: 4521 if (rv == KERN_SUCCESS) { 4522 if (use_read_lock == 0) 4523 vm_map_lock_downgrade(map); 4524 } else if (use_read_lock) { 4525 vm_map_unlock_read(map); 4526 } else { 4527 vm_map_unlock(map); 4528 } 4529 if (count > 0) 4530 vm_map_entry_release(count); 4531 4532 return (rv); 4533 } 4534 4535 /* 4536 * Releases locks acquired by a vm_map_lookup() 4537 * (according to the handle returned by that lookup). 4538 * 4539 * No other requirements. 4540 */ 4541 void 4542 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry, int count) 4543 { 4544 /* 4545 * Unlock the main-level map 4546 */ 4547 vm_map_unlock_read(map); 4548 if (count) 4549 vm_map_entry_release(count); 4550 } 4551 4552 static void 4553 vm_map_entry_partition(vm_map_t map, vm_map_entry_t entry, 4554 vm_offset_t vaddr, int *countp) 4555 { 4556 vaddr &= ~MAP_ENTRY_PARTITION_MASK; 4557 vm_map_clip_start(map, entry, vaddr, countp); 4558 vaddr += MAP_ENTRY_PARTITION_SIZE; 4559 vm_map_clip_end(map, entry, vaddr, countp); 4560 } 4561 4562 /* 4563 * Quick hack, needs some help to make it more SMP friendly. 4564 */ 4565 void 4566 vm_map_interlock(vm_map_t map, struct vm_map_ilock *ilock, 4567 vm_offset_t ran_beg, vm_offset_t ran_end) 4568 { 4569 struct vm_map_ilock *scan; 4570 4571 ilock->ran_beg = ran_beg; 4572 ilock->ran_end = ran_end; 4573 ilock->flags = 0; 4574 4575 spin_lock(&map->ilock_spin); 4576 restart: 4577 for (scan = map->ilock_base; scan; scan = scan->next) { 4578 if (ran_end > scan->ran_beg && ran_beg < scan->ran_end) { 4579 scan->flags |= ILOCK_WAITING; 4580 ssleep(scan, &map->ilock_spin, 0, "ilock", 0); 4581 goto restart; 4582 } 4583 } 4584 ilock->next = map->ilock_base; 4585 map->ilock_base = ilock; 4586 spin_unlock(&map->ilock_spin); 4587 } 4588 4589 void 4590 vm_map_deinterlock(vm_map_t map, struct vm_map_ilock *ilock) 4591 { 4592 struct vm_map_ilock *scan; 4593 struct vm_map_ilock **scanp; 4594 4595 spin_lock(&map->ilock_spin); 4596 scanp = &map->ilock_base; 4597 while ((scan = *scanp) != NULL) { 4598 if (scan == ilock) { 4599 *scanp = ilock->next; 4600 spin_unlock(&map->ilock_spin); 4601 if (ilock->flags & ILOCK_WAITING) 4602 wakeup(ilock); 4603 return; 4604 } 4605 scanp = &scan->next; 4606 } 4607 spin_unlock(&map->ilock_spin); 4608 panic("vm_map_deinterlock: missing ilock!"); 4609 } 4610 4611 #include "opt_ddb.h" 4612 #ifdef DDB 4613 #include <ddb/ddb.h> 4614 4615 /* 4616 * Debugging only 4617 */ 4618 DB_SHOW_COMMAND(map, vm_map_print) 4619 { 4620 static int nlines; 4621 /* XXX convert args. */ 4622 vm_map_t map = (vm_map_t)addr; 4623 boolean_t full = have_addr; 4624 4625 vm_map_entry_t entry; 4626 4627 db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n", 4628 (void *)map, 4629 (void *)map->pmap, map->nentries, map->timestamp); 4630 nlines++; 4631 4632 if (!full && db_indent) 4633 return; 4634 4635 db_indent += 2; 4636 RB_FOREACH(entry, vm_map_rb_tree, &map->rb_root) { 4637 db_iprintf("map entry %p: start=%p, end=%p\n", 4638 (void *)entry, 4639 (void *)entry->ba.start, (void *)entry->ba.end); 4640 nlines++; 4641 { 4642 static char *inheritance_name[4] = 4643 {"share", "copy", "none", "donate_copy"}; 4644 4645 db_iprintf(" prot=%x/%x/%s", 4646 entry->protection, 4647 entry->max_protection, 4648 inheritance_name[(int)(unsigned char) 4649 entry->inheritance]); 4650 if (entry->wired_count != 0) 4651 db_printf(", wired"); 4652 } 4653 switch(entry->maptype) { 4654 case VM_MAPTYPE_SUBMAP: 4655 /* XXX no %qd in kernel. Truncate entry->ba.offset. */ 4656 db_printf(", share=%p, offset=0x%lx\n", 4657 (void *)entry->ba.sub_map, 4658 (long)entry->ba.offset); 4659 nlines++; 4660 4661 db_indent += 2; 4662 vm_map_print((db_expr_t)(intptr_t)entry->ba.sub_map, 4663 full, 0, NULL); 4664 db_indent -= 2; 4665 break; 4666 case VM_MAPTYPE_NORMAL: 4667 case VM_MAPTYPE_VPAGETABLE: 4668 /* XXX no %qd in kernel. Truncate entry->ba.offset. */ 4669 db_printf(", object=%p, offset=0x%lx", 4670 (void *)entry->ba.object, 4671 (long)entry->ba.offset); 4672 if (entry->eflags & MAP_ENTRY_COW) 4673 db_printf(", copy (%s)", 4674 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); 4675 db_printf("\n"); 4676 nlines++; 4677 4678 if (entry->ba.object) { 4679 db_indent += 2; 4680 vm_object_print((db_expr_t)(intptr_t) 4681 entry->ba.object, 4682 full, 0, NULL); 4683 nlines += 4; 4684 db_indent -= 2; 4685 } 4686 break; 4687 case VM_MAPTYPE_UKSMAP: 4688 db_printf(", uksmap=%p, offset=0x%lx", 4689 (void *)entry->ba.uksmap, 4690 (long)entry->ba.offset); 4691 if (entry->eflags & MAP_ENTRY_COW) 4692 db_printf(", copy (%s)", 4693 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); 4694 db_printf("\n"); 4695 nlines++; 4696 break; 4697 default: 4698 break; 4699 } 4700 } 4701 db_indent -= 2; 4702 if (db_indent == 0) 4703 nlines = 0; 4704 } 4705 4706 /* 4707 * Debugging only 4708 */ 4709 DB_SHOW_COMMAND(procvm, procvm) 4710 { 4711 struct proc *p; 4712 4713 if (have_addr) { 4714 p = (struct proc *) addr; 4715 } else { 4716 p = curproc; 4717 } 4718 4719 db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n", 4720 (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map, 4721 (void *)vmspace_pmap(p->p_vmspace)); 4722 4723 vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL); 4724 } 4725 4726 #endif /* DDB */ 4727