1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * Copyright (c) 2003-2019 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * The Mach Operating System project at Carnegie-Mellon University. 8 * 9 * This code is derived from software contributed to The DragonFly Project 10 * by Matthew Dillon <dillon@backplane.com> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_map.c 8.3 (Berkeley) 1/12/94 37 * 38 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 39 * All rights reserved. 40 * 41 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 42 * 43 * Permission to use, copy, modify and distribute this software and 44 * its documentation is hereby granted, provided that both the copyright 45 * notice and this permission notice appear in all copies of the 46 * software, derivative works or modified versions, and any portions 47 * thereof, and that both notices appear in supporting documentation. 48 * 49 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 50 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 51 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 52 * 53 * Carnegie Mellon requests users of this software to return to 54 * 55 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 56 * School of Computer Science 57 * Carnegie Mellon University 58 * Pittsburgh PA 15213-3890 59 * 60 * any improvements or extensions that they make and grant Carnegie the 61 * rights to redistribute these changes. 62 */ 63 #include <sys/param.h> 64 #include <sys/systm.h> 65 #include <sys/kernel.h> 66 #include <sys/proc.h> 67 #include <sys/serialize.h> 68 #include <sys/lock.h> 69 #include <sys/vmmeter.h> 70 #include <sys/mman.h> 71 #include <sys/vnode.h> 72 #include <sys/resourcevar.h> 73 #include <sys/shm.h> 74 #include <sys/tree.h> 75 #include <sys/malloc.h> 76 #include <sys/objcache.h> 77 #include <sys/kern_syscall.h> 78 79 #include <vm/vm.h> 80 #include <vm/vm_param.h> 81 #include <vm/pmap.h> 82 #include <vm/vm_map.h> 83 #include <vm/vm_page.h> 84 #include <vm/vm_object.h> 85 #include <vm/vm_pager.h> 86 #include <vm/vm_kern.h> 87 #include <vm/vm_extern.h> 88 #include <vm/swap_pager.h> 89 #include <vm/vm_zone.h> 90 91 #include <sys/random.h> 92 #include <sys/sysctl.h> 93 #include <sys/spinlock.h> 94 95 #include <sys/thread2.h> 96 #include <sys/spinlock2.h> 97 98 /* 99 * Virtual memory maps provide for the mapping, protection, and sharing 100 * of virtual memory objects. In addition, this module provides for an 101 * efficient virtual copy of memory from one map to another. 102 * 103 * Synchronization is required prior to most operations. 104 * 105 * Maps consist of an ordered doubly-linked list of simple entries. 106 * A hint and a RB tree is used to speed-up lookups. 107 * 108 * Callers looking to modify maps specify start/end addresses which cause 109 * the related map entry to be clipped if necessary, and then later 110 * recombined if the pieces remained compatible. 111 * 112 * Virtual copy operations are performed by copying VM object references 113 * from one map to another, and then marking both regions as copy-on-write. 114 */ 115 static boolean_t vmspace_ctor(void *obj, void *privdata, int ocflags); 116 static void vmspace_dtor(void *obj, void *privdata); 117 static void vmspace_terminate(struct vmspace *vm, int final); 118 119 MALLOC_DEFINE(M_VMSPACE, "vmspace", "vmspace objcache backingstore"); 120 MALLOC_DEFINE(M_MAP_BACKING, "map_backing", "vm_map_backing to entry"); 121 static struct objcache *vmspace_cache; 122 123 /* 124 * per-cpu page table cross mappings are initialized in early boot 125 * and might require a considerable number of vm_map_entry structures. 126 */ 127 #define MAPENTRYBSP_CACHE (MAXCPU+1) 128 #define MAPENTRYAP_CACHE 8 129 130 /* 131 * Partioning threaded programs with large anonymous memory areas can 132 * improve concurrent fault performance. 133 */ 134 #define MAP_ENTRY_PARTITION_SIZE ((vm_offset_t)(32 * 1024 * 1024)) 135 #define MAP_ENTRY_PARTITION_MASK (MAP_ENTRY_PARTITION_SIZE - 1) 136 137 #define VM_MAP_ENTRY_WITHIN_PARTITION(entry) \ 138 ((((entry)->start ^ (entry)->end) & ~MAP_ENTRY_PARTITION_MASK) == 0) 139 140 static struct vm_zone mapentzone_store; 141 static vm_zone_t mapentzone; 142 143 static struct vm_map_entry map_entry_init[MAX_MAPENT]; 144 static struct vm_map_entry cpu_map_entry_init_bsp[MAPENTRYBSP_CACHE]; 145 static struct vm_map_entry cpu_map_entry_init_ap[MAXCPU][MAPENTRYAP_CACHE]; 146 147 static int randomize_mmap; 148 SYSCTL_INT(_vm, OID_AUTO, randomize_mmap, CTLFLAG_RW, &randomize_mmap, 0, 149 "Randomize mmap offsets"); 150 static int vm_map_relock_enable = 1; 151 SYSCTL_INT(_vm, OID_AUTO, map_relock_enable, CTLFLAG_RW, 152 &vm_map_relock_enable, 0, "insert pop pgtable optimization"); 153 static int vm_map_partition_enable = 1; 154 SYSCTL_INT(_vm, OID_AUTO, map_partition_enable, CTLFLAG_RW, 155 &vm_map_partition_enable, 0, "Break up larger vm_map_entry's"); 156 static int vm_map_backing_limit = 5; 157 SYSCTL_INT(_vm, OID_AUTO, map_backing_limit, CTLFLAG_RW, 158 &vm_map_backing_limit, 0, "ba.backing_ba link depth"); 159 static int vm_map_backing_shadow_test = 1; 160 SYSCTL_INT(_vm, OID_AUTO, map_backing_shadow_test, CTLFLAG_RW, 161 &vm_map_backing_shadow_test, 0, "ba.object shadow test"); 162 163 static void vmspace_drop_notoken(struct vmspace *vm); 164 static void vm_map_entry_shadow(vm_map_entry_t entry); 165 static vm_map_entry_t vm_map_entry_create(int *); 166 static void vm_map_entry_dispose (vm_map_t map, vm_map_entry_t entry, int *); 167 static void vm_map_entry_dispose_ba (vm_map_backing_t ba); 168 static void vm_map_backing_replicated(vm_map_t map, 169 vm_map_entry_t entry, int flags); 170 static void vm_map_backing_attach (vm_map_backing_t ba); 171 static void vm_map_backing_detach (vm_map_backing_t ba); 172 static void _vm_map_clip_end (vm_map_t, vm_map_entry_t, vm_offset_t, int *); 173 static void _vm_map_clip_start (vm_map_t, vm_map_entry_t, vm_offset_t, int *); 174 static void vm_map_entry_delete (vm_map_t, vm_map_entry_t, int *); 175 static void vm_map_entry_unwire (vm_map_t, vm_map_entry_t); 176 static void vm_map_copy_entry (vm_map_t, vm_map_t, vm_map_entry_t, 177 vm_map_entry_t); 178 static void vm_map_unclip_range (vm_map_t map, vm_map_entry_t start_entry, 179 vm_offset_t start, vm_offset_t end, int *countp, int flags); 180 static void vm_map_entry_partition(vm_map_t map, vm_map_entry_t entry, 181 vm_offset_t vaddr, int *countp); 182 183 #define MAP_BACK_CLIPPED 0x0001 184 #define MAP_BACK_BASEOBJREFD 0x0002 185 186 /* 187 * Initialize the vm_map module. Must be called before any other vm_map 188 * routines. 189 * 190 * Map and entry structures are allocated from the general purpose 191 * memory pool with some exceptions: 192 * 193 * - The kernel map is allocated statically. 194 * - Initial kernel map entries are allocated out of a static pool. 195 * - We must set ZONE_SPECIAL here or the early boot code can get 196 * stuck if there are >63 cores. 197 * 198 * These restrictions are necessary since malloc() uses the 199 * maps and requires map entries. 200 * 201 * Called from the low level boot code only. 202 */ 203 void 204 vm_map_startup(void) 205 { 206 mapentzone = &mapentzone_store; 207 zbootinit(mapentzone, "MAP ENTRY", sizeof (struct vm_map_entry), 208 map_entry_init, MAX_MAPENT); 209 mapentzone_store.zflags |= ZONE_SPECIAL; 210 } 211 212 /* 213 * Called prior to any vmspace allocations. 214 * 215 * Called from the low level boot code only. 216 */ 217 void 218 vm_init2(void) 219 { 220 vmspace_cache = objcache_create_mbacked(M_VMSPACE, 221 sizeof(struct vmspace), 222 0, ncpus * 4, 223 vmspace_ctor, vmspace_dtor, 224 NULL); 225 zinitna(mapentzone, NULL, 0, 0, ZONE_USE_RESERVE | ZONE_SPECIAL); 226 pmap_init2(); 227 vm_object_init2(); 228 } 229 230 /* 231 * objcache support. We leave the pmap root cached as long as possible 232 * for performance reasons. 233 */ 234 static 235 boolean_t 236 vmspace_ctor(void *obj, void *privdata, int ocflags) 237 { 238 struct vmspace *vm = obj; 239 240 bzero(vm, sizeof(*vm)); 241 vm->vm_refcnt = VM_REF_DELETED; 242 243 return 1; 244 } 245 246 static 247 void 248 vmspace_dtor(void *obj, void *privdata) 249 { 250 struct vmspace *vm = obj; 251 252 KKASSERT(vm->vm_refcnt == VM_REF_DELETED); 253 pmap_puninit(vmspace_pmap(vm)); 254 } 255 256 /* 257 * Red black tree functions 258 * 259 * The caller must hold the related map lock. 260 */ 261 static int rb_vm_map_compare(vm_map_entry_t a, vm_map_entry_t b); 262 RB_GENERATE(vm_map_rb_tree, vm_map_entry, rb_entry, rb_vm_map_compare); 263 264 /* a->start is address, and the only field which must be initialized */ 265 static int 266 rb_vm_map_compare(vm_map_entry_t a, vm_map_entry_t b) 267 { 268 if (a->start < b->start) 269 return(-1); 270 else if (a->start > b->start) 271 return(1); 272 return(0); 273 } 274 275 /* 276 * Initialize vmspace ref/hold counts vmspace0. There is a holdcnt for 277 * every refcnt. 278 */ 279 void 280 vmspace_initrefs(struct vmspace *vm) 281 { 282 vm->vm_refcnt = 1; 283 vm->vm_holdcnt = 1; 284 } 285 286 /* 287 * Allocate a vmspace structure, including a vm_map and pmap. 288 * Initialize numerous fields. While the initial allocation is zerod, 289 * subsequence reuse from the objcache leaves elements of the structure 290 * intact (particularly the pmap), so portions must be zerod. 291 * 292 * Returns a referenced vmspace. 293 * 294 * No requirements. 295 */ 296 struct vmspace * 297 vmspace_alloc(vm_offset_t min, vm_offset_t max) 298 { 299 struct vmspace *vm; 300 301 vm = objcache_get(vmspace_cache, M_WAITOK); 302 303 bzero(&vm->vm_startcopy, 304 (char *)&vm->vm_endcopy - (char *)&vm->vm_startcopy); 305 vm_map_init(&vm->vm_map, min, max, NULL); /* initializes token */ 306 307 /* 308 * NOTE: hold to acquires token for safety. 309 * 310 * On return vmspace is referenced (refs=1, hold=1). That is, 311 * each refcnt also has a holdcnt. There can be additional holds 312 * (holdcnt) above and beyond the refcnt. Finalization is handled in 313 * two stages, one on refs 1->0, and the the second on hold 1->0. 314 */ 315 KKASSERT(vm->vm_holdcnt == 0); 316 KKASSERT(vm->vm_refcnt == VM_REF_DELETED); 317 vmspace_initrefs(vm); 318 vmspace_hold(vm); 319 pmap_pinit(vmspace_pmap(vm)); /* (some fields reused) */ 320 vm->vm_map.pmap = vmspace_pmap(vm); /* XXX */ 321 vm->vm_shm = NULL; 322 vm->vm_flags = 0; 323 cpu_vmspace_alloc(vm); 324 vmspace_drop(vm); 325 326 return (vm); 327 } 328 329 /* 330 * NOTE: Can return 0 if the vmspace is exiting. 331 */ 332 int 333 vmspace_getrefs(struct vmspace *vm) 334 { 335 int32_t n; 336 337 n = vm->vm_refcnt; 338 cpu_ccfence(); 339 if (n & VM_REF_DELETED) 340 n = -1; 341 return n; 342 } 343 344 void 345 vmspace_hold(struct vmspace *vm) 346 { 347 atomic_add_int(&vm->vm_holdcnt, 1); 348 lwkt_gettoken(&vm->vm_map.token); 349 } 350 351 /* 352 * Drop with final termination interlock. 353 */ 354 void 355 vmspace_drop(struct vmspace *vm) 356 { 357 lwkt_reltoken(&vm->vm_map.token); 358 vmspace_drop_notoken(vm); 359 } 360 361 static void 362 vmspace_drop_notoken(struct vmspace *vm) 363 { 364 if (atomic_fetchadd_int(&vm->vm_holdcnt, -1) == 1) { 365 if (vm->vm_refcnt & VM_REF_DELETED) 366 vmspace_terminate(vm, 1); 367 } 368 } 369 370 /* 371 * A vmspace object must not be in a terminated state to be able to obtain 372 * additional refs on it. 373 * 374 * These are official references to the vmspace, the count is used to check 375 * for vmspace sharing. Foreign accessors should use 'hold' and not 'ref'. 376 * 377 * XXX we need to combine hold & ref together into one 64-bit field to allow 378 * holds to prevent stage-1 termination. 379 */ 380 void 381 vmspace_ref(struct vmspace *vm) 382 { 383 uint32_t n; 384 385 atomic_add_int(&vm->vm_holdcnt, 1); 386 n = atomic_fetchadd_int(&vm->vm_refcnt, 1); 387 KKASSERT((n & VM_REF_DELETED) == 0); 388 } 389 390 /* 391 * Release a ref on the vmspace. On the 1->0 transition we do stage-1 392 * termination of the vmspace. Then, on the final drop of the hold we 393 * will do stage-2 final termination. 394 */ 395 void 396 vmspace_rel(struct vmspace *vm) 397 { 398 uint32_t n; 399 400 /* 401 * Drop refs. Each ref also has a hold which is also dropped. 402 * 403 * When refs hits 0 compete to get the VM_REF_DELETED flag (hold 404 * prevent finalization) to start termination processing. 405 * Finalization occurs when the last hold count drops to 0. 406 */ 407 n = atomic_fetchadd_int(&vm->vm_refcnt, -1) - 1; 408 while (n == 0) { 409 if (atomic_cmpset_int(&vm->vm_refcnt, 0, VM_REF_DELETED)) { 410 vmspace_terminate(vm, 0); 411 break; 412 } 413 n = vm->vm_refcnt; 414 cpu_ccfence(); 415 } 416 vmspace_drop_notoken(vm); 417 } 418 419 /* 420 * This is called during exit indicating that the vmspace is no 421 * longer in used by an exiting process, but the process has not yet 422 * been reaped. 423 * 424 * We drop refs, allowing for stage-1 termination, but maintain a holdcnt 425 * to prevent stage-2 until the process is reaped. Note hte order of 426 * operation, we must hold first. 427 * 428 * No requirements. 429 */ 430 void 431 vmspace_relexit(struct vmspace *vm) 432 { 433 atomic_add_int(&vm->vm_holdcnt, 1); 434 vmspace_rel(vm); 435 } 436 437 /* 438 * Called during reap to disconnect the remainder of the vmspace from 439 * the process. On the hold drop the vmspace termination is finalized. 440 * 441 * No requirements. 442 */ 443 void 444 vmspace_exitfree(struct proc *p) 445 { 446 struct vmspace *vm; 447 448 vm = p->p_vmspace; 449 p->p_vmspace = NULL; 450 vmspace_drop_notoken(vm); 451 } 452 453 /* 454 * Called in two cases: 455 * 456 * (1) When the last refcnt is dropped and the vmspace becomes inactive, 457 * called with final == 0. refcnt will be (u_int)-1 at this point, 458 * and holdcnt will still be non-zero. 459 * 460 * (2) When holdcnt becomes 0, called with final == 1. There should no 461 * longer be anyone with access to the vmspace. 462 * 463 * VMSPACE_EXIT1 flags the primary deactivation 464 * VMSPACE_EXIT2 flags the last reap 465 */ 466 static void 467 vmspace_terminate(struct vmspace *vm, int final) 468 { 469 int count; 470 471 lwkt_gettoken(&vm->vm_map.token); 472 if (final == 0) { 473 KKASSERT((vm->vm_flags & VMSPACE_EXIT1) == 0); 474 vm->vm_flags |= VMSPACE_EXIT1; 475 476 /* 477 * Get rid of most of the resources. Leave the kernel pmap 478 * intact. 479 * 480 * If the pmap does not contain wired pages we can bulk-delete 481 * the pmap as a performance optimization before removing the 482 * related mappings. 483 * 484 * If the pmap contains wired pages we cannot do this 485 * pre-optimization because currently vm_fault_unwire() 486 * expects the pmap pages to exist and will not decrement 487 * p->wire_count if they do not. 488 */ 489 shmexit(vm); 490 if (vmspace_pmap(vm)->pm_stats.wired_count) { 491 vm_map_remove(&vm->vm_map, VM_MIN_USER_ADDRESS, 492 VM_MAX_USER_ADDRESS); 493 pmap_remove_pages(vmspace_pmap(vm), VM_MIN_USER_ADDRESS, 494 VM_MAX_USER_ADDRESS); 495 } else { 496 pmap_remove_pages(vmspace_pmap(vm), VM_MIN_USER_ADDRESS, 497 VM_MAX_USER_ADDRESS); 498 vm_map_remove(&vm->vm_map, VM_MIN_USER_ADDRESS, 499 VM_MAX_USER_ADDRESS); 500 } 501 lwkt_reltoken(&vm->vm_map.token); 502 } else { 503 KKASSERT((vm->vm_flags & VMSPACE_EXIT1) != 0); 504 KKASSERT((vm->vm_flags & VMSPACE_EXIT2) == 0); 505 506 /* 507 * Get rid of remaining basic resources. 508 */ 509 vm->vm_flags |= VMSPACE_EXIT2; 510 shmexit(vm); 511 512 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 513 vm_map_lock(&vm->vm_map); 514 cpu_vmspace_free(vm); 515 516 /* 517 * Lock the map, to wait out all other references to it. 518 * Delete all of the mappings and pages they hold, then call 519 * the pmap module to reclaim anything left. 520 */ 521 vm_map_delete(&vm->vm_map, 522 vm_map_min(&vm->vm_map), 523 vm_map_max(&vm->vm_map), 524 &count); 525 vm_map_unlock(&vm->vm_map); 526 vm_map_entry_release(count); 527 528 pmap_release(vmspace_pmap(vm)); 529 lwkt_reltoken(&vm->vm_map.token); 530 objcache_put(vmspace_cache, vm); 531 } 532 } 533 534 /* 535 * Swap useage is determined by taking the proportional swap used by 536 * VM objects backing the VM map. To make up for fractional losses, 537 * if the VM object has any swap use at all the associated map entries 538 * count for at least 1 swap page. 539 * 540 * No requirements. 541 */ 542 vm_offset_t 543 vmspace_swap_count(struct vmspace *vm) 544 { 545 vm_map_t map = &vm->vm_map; 546 vm_map_entry_t cur; 547 vm_object_t object; 548 vm_offset_t count = 0; 549 vm_offset_t n; 550 551 vmspace_hold(vm); 552 553 RB_FOREACH(cur, vm_map_rb_tree, &map->rb_root) { 554 switch(cur->maptype) { 555 case VM_MAPTYPE_NORMAL: 556 case VM_MAPTYPE_VPAGETABLE: 557 if ((object = cur->ba.object) == NULL) 558 break; 559 if (object->swblock_count) { 560 n = (cur->end - cur->start) / PAGE_SIZE; 561 count += object->swblock_count * 562 SWAP_META_PAGES * n / object->size + 1; 563 } 564 break; 565 default: 566 break; 567 } 568 } 569 vmspace_drop(vm); 570 571 return(count); 572 } 573 574 /* 575 * Calculate the approximate number of anonymous pages in use by 576 * this vmspace. To make up for fractional losses, we count each 577 * VM object as having at least 1 anonymous page. 578 * 579 * No requirements. 580 */ 581 vm_offset_t 582 vmspace_anonymous_count(struct vmspace *vm) 583 { 584 vm_map_t map = &vm->vm_map; 585 vm_map_entry_t cur; 586 vm_object_t object; 587 vm_offset_t count = 0; 588 589 vmspace_hold(vm); 590 RB_FOREACH(cur, vm_map_rb_tree, &map->rb_root) { 591 switch(cur->maptype) { 592 case VM_MAPTYPE_NORMAL: 593 case VM_MAPTYPE_VPAGETABLE: 594 if ((object = cur->ba.object) == NULL) 595 break; 596 if (object->type != OBJT_DEFAULT && 597 object->type != OBJT_SWAP) { 598 break; 599 } 600 count += object->resident_page_count; 601 break; 602 default: 603 break; 604 } 605 } 606 vmspace_drop(vm); 607 608 return(count); 609 } 610 611 /* 612 * Initialize an existing vm_map structure such as that in the vmspace 613 * structure. The pmap is initialized elsewhere. 614 * 615 * No requirements. 616 */ 617 void 618 vm_map_init(struct vm_map *map, vm_offset_t min_addr, vm_offset_t max_addr, 619 pmap_t pmap) 620 { 621 RB_INIT(&map->rb_root); 622 spin_init(&map->ilock_spin, "ilock"); 623 map->ilock_base = NULL; 624 map->nentries = 0; 625 map->size = 0; 626 map->system_map = 0; 627 vm_map_min(map) = min_addr; 628 vm_map_max(map) = max_addr; 629 map->pmap = pmap; 630 map->timestamp = 0; 631 map->flags = 0; 632 bzero(&map->freehint, sizeof(map->freehint)); 633 lwkt_token_init(&map->token, "vm_map"); 634 lockinit(&map->lock, "vm_maplk", (hz + 9) / 10, 0); 635 } 636 637 /* 638 * Find the first possible free address for the specified request length. 639 * Returns 0 if we don't have one cached. 640 */ 641 static 642 vm_offset_t 643 vm_map_freehint_find(vm_map_t map, vm_size_t length, vm_size_t align) 644 { 645 vm_map_freehint_t *scan; 646 647 scan = &map->freehint[0]; 648 while (scan < &map->freehint[VM_MAP_FFCOUNT]) { 649 if (scan->length == length && scan->align == align) 650 return(scan->start); 651 ++scan; 652 } 653 return 0; 654 } 655 656 /* 657 * Unconditionally set the freehint. Called by vm_map_findspace() after 658 * it finds an address. This will help us iterate optimally on the next 659 * similar findspace. 660 */ 661 static 662 void 663 vm_map_freehint_update(vm_map_t map, vm_offset_t start, 664 vm_size_t length, vm_size_t align) 665 { 666 vm_map_freehint_t *scan; 667 668 scan = &map->freehint[0]; 669 while (scan < &map->freehint[VM_MAP_FFCOUNT]) { 670 if (scan->length == length && scan->align == align) { 671 scan->start = start; 672 return; 673 } 674 ++scan; 675 } 676 scan = &map->freehint[map->freehint_newindex & VM_MAP_FFMASK]; 677 scan->start = start; 678 scan->align = align; 679 scan->length = length; 680 ++map->freehint_newindex; 681 } 682 683 /* 684 * Update any existing freehints (for any alignment), for the hole we just 685 * added. 686 */ 687 static 688 void 689 vm_map_freehint_hole(vm_map_t map, vm_offset_t start, vm_size_t length) 690 { 691 vm_map_freehint_t *scan; 692 693 scan = &map->freehint[0]; 694 while (scan < &map->freehint[VM_MAP_FFCOUNT]) { 695 if (scan->length <= length && scan->start > start) 696 scan->start = start; 697 ++scan; 698 } 699 } 700 701 /* 702 * This function handles MAP_ENTRY_NEEDS_COPY by inserting a fronting 703 * object in the entry for COW faults. 704 * 705 * The entire chain including entry->ba (prior to inserting the fronting 706 * object) essentially becomes set in stone... elements of it can be paged 707 * in or out, but cannot be further modified. 708 * 709 * NOTE: If we do not optimize the backing chain then a unique copy is not 710 * needed. Note, however, that because portions of the chain are 711 * shared across pmaps we cannot make any changes to the vm_map_backing 712 * elements themselves. 713 * 714 * If the map segment is governed by a virtual page table then it is 715 * possible to address offsets beyond the mapped area. Just allocate 716 * a maximally sized object for this case. 717 * 718 * If addref is non-zero an additional reference is added to the returned 719 * entry. This mechanic exists because the additional reference might have 720 * to be added atomically and not after return to prevent a premature 721 * collapse. XXX currently there is no collapse code. 722 * 723 * The vm_map must be exclusively locked. 724 * No other requirements. 725 */ 726 static 727 void 728 vm_map_entry_shadow(vm_map_entry_t entry) 729 { 730 vm_map_backing_t ba; 731 vm_size_t length; 732 vm_object_t source; 733 vm_object_t result; 734 735 if (entry->maptype == VM_MAPTYPE_VPAGETABLE) 736 length = 0x7FFFFFFF; 737 else 738 length = atop(entry->end - entry->start); 739 ba = kmalloc(sizeof(*ba), M_MAP_BACKING, M_INTWAIT); /* copied later */ 740 741 /* 742 * Don't create the new object if the old object isn't shared. 743 * 744 * Caller ensures source exists (all backing_ba's must have objects), 745 * typically indirectly by virtue of the NEEDS_COPY flag being set. 746 * 747 * WARNING! Checking ref_count == 1 only works because we are testing 748 * the object embedded in the entry (entry->ba.object). 749 * This test DOES NOT WORK if checking an object hanging off 750 * the backing chain (entry->ba.backing_ba list) because the 751 * vm_map_backing might be shared, or part of a chain that 752 * is shared. Checking ba->refs is worthless. 753 * 754 * XXX since we now replicate vm_map_backing's, ref_count==1 755 * actually works generally for non-vnodes. 756 */ 757 source = entry->ba.object; 758 KKASSERT(source); 759 vm_object_hold(source); 760 761 if (source->type != OBJT_VNODE) { 762 if (source->ref_count == 1 && 763 source->handle == NULL && 764 (source->type == OBJT_DEFAULT || 765 source->type == OBJT_SWAP)) { 766 vm_object_drop(source); 767 kfree(ba, M_MAP_BACKING); 768 goto done; 769 } 770 } 771 772 /* 773 * Once it becomes part of a backing_ba chain it can wind up anywhere, 774 * drop the ONEMAPPING flag now. 775 */ 776 vm_object_clear_flag(source, OBJ_ONEMAPPING); 777 778 /* 779 * Allocate a new object with the given length. The new object 780 * is returned referenced but we may have to add another one. 781 * If we are adding a second reference we must clear OBJ_ONEMAPPING. 782 * (typically because the caller is about to clone a vm_map_entry). 783 * 784 * The source object currently has an extra reference to prevent 785 * collapses into it while we mess with its shadow list, which 786 * we will remove later in this routine. 787 * 788 * The target object may require a second reference if asked for one 789 * by the caller. 790 */ 791 result = vm_object_allocate_hold(OBJT_DEFAULT, length); 792 if (result == NULL) 793 panic("vm_object_shadow: no object for shadowing"); 794 795 /* 796 * The new object shadows the source object. 797 * 798 * Try to optimize the result object's page color when shadowing 799 * in order to maintain page coloring consistency in the combined 800 * shadowed object. 801 * 802 * The source object is moved to ba, retaining its existing ref-count. 803 * No additional ref is needed. 804 * 805 * SHADOWING IS NOT APPLICABLE TO OBJT_VNODE OBJECTS 806 */ 807 vm_map_backing_detach(&entry->ba); 808 *ba = entry->ba; /* previous ba */ 809 ba->refs = 1; /* initialize ref count */ 810 entry->ba.object = result; /* new ba (at head of entry) */ 811 entry->ba.backing_ba = ba; 812 entry->ba.backing_count = ba->backing_count + 1; 813 entry->ba.offset = 0; 814 entry->ba.refs = 0; 815 816 /* cpu localization twist */ 817 result->pg_color = vm_quickcolor(); 818 819 vm_map_backing_attach(&entry->ba); 820 vm_map_backing_attach(ba); 821 822 /* 823 * Adjust the return storage. Drop the ref on source before 824 * returning. 825 */ 826 vm_object_drop(result); 827 vm_object_drop(source); 828 done: 829 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 830 } 831 832 /* 833 * Allocate an object for a vm_map_entry. 834 * 835 * Object allocation for anonymous mappings is defered as long as possible. 836 * This function is called when we can defer no longer, generally when a map 837 * entry might be split or forked or takes a page fault. 838 * 839 * If the map segment is governed by a virtual page table then it is 840 * possible to address offsets beyond the mapped area. Just allocate 841 * a maximally sized object for this case. 842 * 843 * The vm_map must be exclusively locked. 844 * No other requirements. 845 */ 846 void 847 vm_map_entry_allocate_object(vm_map_entry_t entry) 848 { 849 vm_object_t obj; 850 851 /* 852 * ba.offset is added cumulatively in the backing_ba scan, so we 853 * can noly reset it to zero if ba.backing_ba is NULL. We reset 854 * it to 0 only for debugging convenience. 855 * 856 * ba.offset cannot otherwise be modified because it effects 857 * the offsets for the entire backing_ba chain. 858 */ 859 if (entry->ba.backing_ba == NULL) 860 entry->ba.offset = 0; 861 862 if (entry->maptype == VM_MAPTYPE_VPAGETABLE) { 863 /* XXX */ 864 obj = vm_object_allocate(OBJT_DEFAULT, 0x7FFFFFFF); 865 } else { 866 obj = vm_object_allocate(OBJT_DEFAULT, 867 atop(entry->end - entry->start) + 868 entry->ba.offset); 869 } 870 entry->ba.object = obj; 871 vm_map_backing_attach(&entry->ba); 872 } 873 874 /* 875 * Set an initial negative count so the first attempt to reserve 876 * space preloads a bunch of vm_map_entry's for this cpu. Also 877 * pre-allocate 2 vm_map_entries which will be needed by zalloc() to 878 * map a new page for vm_map_entry structures. SMP systems are 879 * particularly sensitive. 880 * 881 * This routine is called in early boot so we cannot just call 882 * vm_map_entry_reserve(). 883 * 884 * Called from the low level boot code only (for each cpu) 885 * 886 * WARNING! Take care not to have too-big a static/BSS structure here 887 * as MAXCPU can be 256+, otherwise the loader's 64MB heap 888 * can get blown out by the kernel plus the initrd image. 889 */ 890 void 891 vm_map_entry_reserve_cpu_init(globaldata_t gd) 892 { 893 vm_map_entry_t entry; 894 int count; 895 int i; 896 897 atomic_add_int(&gd->gd_vme_avail, -MAP_RESERVE_COUNT * 2); 898 if (gd->gd_cpuid == 0) { 899 entry = &cpu_map_entry_init_bsp[0]; 900 count = MAPENTRYBSP_CACHE; 901 } else { 902 entry = &cpu_map_entry_init_ap[gd->gd_cpuid][0]; 903 count = MAPENTRYAP_CACHE; 904 } 905 for (i = 0; i < count; ++i, ++entry) { 906 MAPENT_FREELIST(entry) = gd->gd_vme_base; 907 gd->gd_vme_base = entry; 908 } 909 } 910 911 /* 912 * Reserves vm_map_entry structures so code later-on can manipulate 913 * map_entry structures within a locked map without blocking trying 914 * to allocate a new vm_map_entry. 915 * 916 * No requirements. 917 * 918 * WARNING! We must not decrement gd_vme_avail until after we have 919 * ensured that sufficient entries exist, otherwise we can 920 * get into an endless call recursion in the zalloc code 921 * itself. 922 */ 923 int 924 vm_map_entry_reserve(int count) 925 { 926 struct globaldata *gd = mycpu; 927 vm_map_entry_t entry; 928 929 /* 930 * Make sure we have enough structures in gd_vme_base to handle 931 * the reservation request. 932 * 933 * Use a critical section to protect against VM faults. It might 934 * not be needed, but we have to be careful here. 935 */ 936 if (gd->gd_vme_avail < count) { 937 crit_enter(); 938 while (gd->gd_vme_avail < count) { 939 entry = zalloc(mapentzone); 940 MAPENT_FREELIST(entry) = gd->gd_vme_base; 941 gd->gd_vme_base = entry; 942 atomic_add_int(&gd->gd_vme_avail, 1); 943 } 944 crit_exit(); 945 } 946 atomic_add_int(&gd->gd_vme_avail, -count); 947 948 return(count); 949 } 950 951 /* 952 * Releases previously reserved vm_map_entry structures that were not 953 * used. If we have too much junk in our per-cpu cache clean some of 954 * it out. 955 * 956 * No requirements. 957 */ 958 void 959 vm_map_entry_release(int count) 960 { 961 struct globaldata *gd = mycpu; 962 vm_map_entry_t entry; 963 vm_map_entry_t efree; 964 965 count = atomic_fetchadd_int(&gd->gd_vme_avail, count) + count; 966 if (gd->gd_vme_avail > MAP_RESERVE_SLOP) { 967 efree = NULL; 968 crit_enter(); 969 while (gd->gd_vme_avail > MAP_RESERVE_HYST) { 970 entry = gd->gd_vme_base; 971 KKASSERT(entry != NULL); 972 gd->gd_vme_base = MAPENT_FREELIST(entry); 973 atomic_add_int(&gd->gd_vme_avail, -1); 974 MAPENT_FREELIST(entry) = efree; 975 efree = entry; 976 } 977 crit_exit(); 978 while ((entry = efree) != NULL) { 979 efree = MAPENT_FREELIST(efree); 980 zfree(mapentzone, entry); 981 } 982 } 983 } 984 985 /* 986 * Reserve map entry structures for use in kernel_map itself. These 987 * entries have *ALREADY* been reserved on a per-cpu basis when the map 988 * was inited. This function is used by zalloc() to avoid a recursion 989 * when zalloc() itself needs to allocate additional kernel memory. 990 * 991 * This function works like the normal reserve but does not load the 992 * vm_map_entry cache (because that would result in an infinite 993 * recursion). Note that gd_vme_avail may go negative. This is expected. 994 * 995 * Any caller of this function must be sure to renormalize after 996 * potentially eating entries to ensure that the reserve supply 997 * remains intact. 998 * 999 * No requirements. 1000 */ 1001 int 1002 vm_map_entry_kreserve(int count) 1003 { 1004 struct globaldata *gd = mycpu; 1005 1006 atomic_add_int(&gd->gd_vme_avail, -count); 1007 KASSERT(gd->gd_vme_base != NULL, 1008 ("no reserved entries left, gd_vme_avail = %d", 1009 gd->gd_vme_avail)); 1010 return(count); 1011 } 1012 1013 /* 1014 * Release previously reserved map entries for kernel_map. We do not 1015 * attempt to clean up like the normal release function as this would 1016 * cause an unnecessary (but probably not fatal) deep procedure call. 1017 * 1018 * No requirements. 1019 */ 1020 void 1021 vm_map_entry_krelease(int count) 1022 { 1023 struct globaldata *gd = mycpu; 1024 1025 atomic_add_int(&gd->gd_vme_avail, count); 1026 } 1027 1028 /* 1029 * Allocates a VM map entry for insertion. No entry fields are filled in. 1030 * 1031 * The entries should have previously been reserved. The reservation count 1032 * is tracked in (*countp). 1033 * 1034 * No requirements. 1035 */ 1036 static vm_map_entry_t 1037 vm_map_entry_create(int *countp) 1038 { 1039 struct globaldata *gd = mycpu; 1040 vm_map_entry_t entry; 1041 1042 KKASSERT(*countp > 0); 1043 --*countp; 1044 crit_enter(); 1045 entry = gd->gd_vme_base; 1046 KASSERT(entry != NULL, ("gd_vme_base NULL! count %d", *countp)); 1047 gd->gd_vme_base = MAPENT_FREELIST(entry); 1048 crit_exit(); 1049 1050 return(entry); 1051 } 1052 1053 /* 1054 * 1055 */ 1056 static void 1057 vm_map_backing_attach(vm_map_backing_t ba) 1058 { 1059 vm_object_t obj = ba->object; 1060 1061 spin_lock(&obj->spin); 1062 TAILQ_INSERT_TAIL(&obj->backing_list, ba, entry); 1063 spin_unlock(&obj->spin); 1064 } 1065 1066 static void 1067 vm_map_backing_detach(vm_map_backing_t ba) 1068 { 1069 vm_object_t obj = ba->object; 1070 1071 spin_lock(&obj->spin); 1072 TAILQ_REMOVE(&obj->backing_list, ba, entry); 1073 spin_unlock(&obj->spin); 1074 } 1075 1076 /* 1077 * Dispose of the dynamically allocated backing_ba chain associated 1078 * with a vm_map_entry. 1079 * 1080 * We decrement the (possibly shared) element and kfree() on the 1081 * 1->0 transition. We only iterate to the next backing_ba when 1082 * the previous one went through a 1->0 transition. 1083 */ 1084 static void 1085 vm_map_entry_dispose_ba(vm_map_backing_t ba) 1086 { 1087 vm_map_backing_t next; 1088 long refs; 1089 1090 while (ba) { 1091 refs = atomic_fetchadd_long(&ba->refs, -1); 1092 if (refs > 1) 1093 break; 1094 KKASSERT(refs == 1); /* transitioned 1->0 */ 1095 if (ba->object) { 1096 vm_map_backing_detach(ba); 1097 vm_object_deallocate(ba->object); 1098 } 1099 next = ba->backing_ba; 1100 kfree(ba, M_MAP_BACKING); 1101 ba = next; 1102 } 1103 } 1104 1105 /* 1106 * Dispose of a vm_map_entry that is no longer being referenced. 1107 * 1108 * No requirements. 1109 */ 1110 static void 1111 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry, int *countp) 1112 { 1113 struct globaldata *gd = mycpu; 1114 1115 /* 1116 * Dispose of the base object and the backing link. 1117 */ 1118 switch(entry->maptype) { 1119 case VM_MAPTYPE_NORMAL: 1120 case VM_MAPTYPE_VPAGETABLE: 1121 if (entry->ba.object) { 1122 vm_map_backing_detach(&entry->ba); 1123 vm_object_deallocate(entry->ba.object); 1124 } 1125 break; 1126 case VM_MAPTYPE_SUBMAP: 1127 case VM_MAPTYPE_UKSMAP: 1128 /* XXX TODO */ 1129 break; 1130 default: 1131 break; 1132 } 1133 vm_map_entry_dispose_ba(entry->ba.backing_ba); 1134 1135 /* 1136 * Cleanup for safety. 1137 */ 1138 entry->ba.backing_ba = NULL; 1139 entry->ba.object = NULL; 1140 entry->ba.offset = 0; 1141 1142 ++*countp; 1143 crit_enter(); 1144 MAPENT_FREELIST(entry) = gd->gd_vme_base; 1145 gd->gd_vme_base = entry; 1146 crit_exit(); 1147 } 1148 1149 1150 /* 1151 * Insert/remove entries from maps. 1152 * 1153 * The related map must be exclusively locked. 1154 * The caller must hold map->token 1155 * No other requirements. 1156 */ 1157 static __inline void 1158 vm_map_entry_link(vm_map_t map, vm_map_entry_t entry) 1159 { 1160 ASSERT_VM_MAP_LOCKED(map); 1161 1162 map->nentries++; 1163 if (vm_map_rb_tree_RB_INSERT(&map->rb_root, entry)) 1164 panic("vm_map_entry_link: dup addr map %p ent %p", map, entry); 1165 } 1166 1167 static __inline void 1168 vm_map_entry_unlink(vm_map_t map, 1169 vm_map_entry_t entry) 1170 { 1171 ASSERT_VM_MAP_LOCKED(map); 1172 1173 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 1174 panic("vm_map_entry_unlink: attempt to mess with " 1175 "locked entry! %p", entry); 1176 } 1177 vm_map_rb_tree_RB_REMOVE(&map->rb_root, entry); 1178 map->nentries--; 1179 } 1180 1181 /* 1182 * Finds the map entry containing (or immediately preceding) the specified 1183 * address in the given map. The entry is returned in (*entry). 1184 * 1185 * The boolean result indicates whether the address is actually contained 1186 * in the map. 1187 * 1188 * The related map must be locked. 1189 * No other requirements. 1190 */ 1191 boolean_t 1192 vm_map_lookup_entry(vm_map_t map, vm_offset_t address, vm_map_entry_t *entry) 1193 { 1194 vm_map_entry_t tmp; 1195 vm_map_entry_t last; 1196 1197 ASSERT_VM_MAP_LOCKED(map); 1198 1199 /* 1200 * Locate the record from the top of the tree. 'last' tracks the 1201 * closest prior record and is returned if no match is found, which 1202 * in binary tree terms means tracking the most recent right-branch 1203 * taken. If there is no prior record, *entry is set to NULL. 1204 */ 1205 last = NULL; 1206 tmp = RB_ROOT(&map->rb_root); 1207 1208 while (tmp) { 1209 if (address >= tmp->start) { 1210 if (address < tmp->end) { 1211 *entry = tmp; 1212 return(TRUE); 1213 } 1214 last = tmp; 1215 tmp = RB_RIGHT(tmp, rb_entry); 1216 } else { 1217 tmp = RB_LEFT(tmp, rb_entry); 1218 } 1219 } 1220 *entry = last; 1221 return (FALSE); 1222 } 1223 1224 /* 1225 * Inserts the given whole VM object into the target map at the specified 1226 * address range. The object's size should match that of the address range. 1227 * 1228 * The map must be exclusively locked. 1229 * The object must be held. 1230 * The caller must have reserved sufficient vm_map_entry structures. 1231 * 1232 * If object is non-NULL, ref count must be bumped by caller prior to 1233 * making call to account for the new entry. XXX API is a bit messy. 1234 */ 1235 int 1236 vm_map_insert(vm_map_t map, int *countp, void *map_object, void *map_aux, 1237 vm_ooffset_t offset, vm_offset_t start, vm_offset_t end, 1238 vm_maptype_t maptype, vm_subsys_t id, 1239 vm_prot_t prot, vm_prot_t max, int cow) 1240 { 1241 vm_map_entry_t new_entry; 1242 vm_map_entry_t prev_entry; 1243 vm_map_entry_t next; 1244 vm_map_entry_t temp_entry; 1245 vm_eflags_t protoeflags; 1246 vm_object_t object; 1247 int must_drop = 0; 1248 1249 if (maptype == VM_MAPTYPE_UKSMAP) 1250 object = NULL; 1251 else 1252 object = map_object; 1253 1254 ASSERT_VM_MAP_LOCKED(map); 1255 if (object) 1256 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 1257 1258 /* 1259 * Check that the start and end points are not bogus. 1260 */ 1261 if ((start < vm_map_min(map)) || (end > vm_map_max(map)) || 1262 (start >= end)) { 1263 return (KERN_INVALID_ADDRESS); 1264 } 1265 1266 /* 1267 * Find the entry prior to the proposed starting address; if it's part 1268 * of an existing entry, this range is bogus. 1269 */ 1270 if (vm_map_lookup_entry(map, start, &temp_entry)) 1271 return (KERN_NO_SPACE); 1272 prev_entry = temp_entry; 1273 1274 /* 1275 * Assert that the next entry doesn't overlap the end point. 1276 */ 1277 if (prev_entry) 1278 next = vm_map_rb_tree_RB_NEXT(prev_entry); 1279 else 1280 next = RB_MIN(vm_map_rb_tree, &map->rb_root); 1281 if (next && next->start < end) 1282 return (KERN_NO_SPACE); 1283 1284 protoeflags = 0; 1285 1286 if (cow & MAP_COPY_ON_WRITE) 1287 protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY; 1288 1289 if (cow & MAP_NOFAULT) { 1290 protoeflags |= MAP_ENTRY_NOFAULT; 1291 1292 KASSERT(object == NULL, 1293 ("vm_map_insert: paradoxical MAP_NOFAULT request")); 1294 } 1295 if (cow & MAP_DISABLE_SYNCER) 1296 protoeflags |= MAP_ENTRY_NOSYNC; 1297 if (cow & MAP_DISABLE_COREDUMP) 1298 protoeflags |= MAP_ENTRY_NOCOREDUMP; 1299 if (cow & MAP_IS_STACK) 1300 protoeflags |= MAP_ENTRY_STACK; 1301 if (cow & MAP_IS_KSTACK) 1302 protoeflags |= MAP_ENTRY_KSTACK; 1303 1304 lwkt_gettoken(&map->token); 1305 1306 if (object) { 1307 ; 1308 } else if (prev_entry && 1309 (prev_entry->eflags == protoeflags) && 1310 (prev_entry->end == start) && 1311 (prev_entry->wired_count == 0) && 1312 (prev_entry->id == id) && 1313 prev_entry->maptype == maptype && 1314 maptype == VM_MAPTYPE_NORMAL && 1315 prev_entry->ba.backing_ba == NULL && /* not backed */ 1316 ((prev_entry->ba.object == NULL) || 1317 vm_object_coalesce(prev_entry->ba.object, 1318 OFF_TO_IDX(prev_entry->ba.offset), 1319 (vm_size_t)(prev_entry->end - prev_entry->start), 1320 (vm_size_t)(end - prev_entry->end)))) { 1321 /* 1322 * We were able to extend the object. Determine if we 1323 * can extend the previous map entry to include the 1324 * new range as well. 1325 */ 1326 if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) && 1327 (prev_entry->protection == prot) && 1328 (prev_entry->max_protection == max)) { 1329 map->size += (end - prev_entry->end); 1330 prev_entry->end = end; 1331 vm_map_simplify_entry(map, prev_entry, countp); 1332 lwkt_reltoken(&map->token); 1333 return (KERN_SUCCESS); 1334 } 1335 1336 /* 1337 * If we can extend the object but cannot extend the 1338 * map entry, we have to create a new map entry. We 1339 * must bump the ref count on the extended object to 1340 * account for it. object may be NULL. 1341 */ 1342 object = prev_entry->ba.object; 1343 offset = prev_entry->ba.offset + 1344 (prev_entry->end - prev_entry->start); 1345 if (object) { 1346 vm_object_hold(object); 1347 vm_object_lock_swap(); /* map->token order */ 1348 vm_object_reference_locked(object); 1349 map_object = object; 1350 must_drop = 1; 1351 } 1352 } 1353 1354 /* 1355 * NOTE: if conditionals fail, object can be NULL here. This occurs 1356 * in things like the buffer map where we manage kva but do not manage 1357 * backing objects. 1358 */ 1359 1360 /* 1361 * Create a new entry 1362 */ 1363 new_entry = vm_map_entry_create(countp); 1364 new_entry->map = map; 1365 new_entry->start = start; 1366 new_entry->end = end; 1367 new_entry->id = id; 1368 1369 new_entry->maptype = maptype; 1370 new_entry->eflags = protoeflags; 1371 new_entry->aux.master_pde = 0; /* in case size is different */ 1372 new_entry->aux.map_aux = map_aux; 1373 new_entry->ba.map_object = map_object; 1374 new_entry->ba.backing_ba = NULL; 1375 new_entry->ba.backing_count = 0; 1376 new_entry->ba.offset = offset; 1377 new_entry->ba.refs = 0; 1378 new_entry->ba.flags = 0; 1379 new_entry->ba.base_entry = new_entry; 1380 1381 new_entry->inheritance = VM_INHERIT_DEFAULT; 1382 new_entry->protection = prot; 1383 new_entry->max_protection = max; 1384 new_entry->wired_count = 0; 1385 1386 /* 1387 * Insert the new entry into the list 1388 */ 1389 vm_map_backing_replicated(map, new_entry, MAP_BACK_BASEOBJREFD); 1390 vm_map_entry_link(map, new_entry); 1391 map->size += new_entry->end - new_entry->start; 1392 1393 /* 1394 * Don't worry about updating freehint[] when inserting, allow 1395 * addresses to be lower than the actual first free spot. 1396 */ 1397 #if 0 1398 /* 1399 * Temporarily removed to avoid MAP_STACK panic, due to 1400 * MAP_STACK being a huge hack. Will be added back in 1401 * when MAP_STACK (and the user stack mapping) is fixed. 1402 */ 1403 /* 1404 * It may be possible to simplify the entry 1405 */ 1406 vm_map_simplify_entry(map, new_entry, countp); 1407 #endif 1408 1409 /* 1410 * Try to pre-populate the page table. Mappings governed by virtual 1411 * page tables cannot be prepopulated without a lot of work, so 1412 * don't try. 1413 */ 1414 if ((cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) && 1415 maptype != VM_MAPTYPE_VPAGETABLE && 1416 maptype != VM_MAPTYPE_UKSMAP) { 1417 int dorelock = 0; 1418 if (vm_map_relock_enable && (cow & MAP_PREFAULT_RELOCK)) { 1419 dorelock = 1; 1420 vm_object_lock_swap(); 1421 vm_object_drop(object); 1422 } 1423 pmap_object_init_pt(map->pmap, start, prot, 1424 object, OFF_TO_IDX(offset), end - start, 1425 cow & MAP_PREFAULT_PARTIAL); 1426 if (dorelock) { 1427 vm_object_hold(object); 1428 vm_object_lock_swap(); 1429 } 1430 } 1431 lwkt_reltoken(&map->token); 1432 if (must_drop) 1433 vm_object_drop(object); 1434 1435 return (KERN_SUCCESS); 1436 } 1437 1438 /* 1439 * Find sufficient space for `length' bytes in the given map, starting at 1440 * `start'. Returns 0 on success, 1 on no space. 1441 * 1442 * This function will returned an arbitrarily aligned pointer. If no 1443 * particular alignment is required you should pass align as 1. Note that 1444 * the map may return PAGE_SIZE aligned pointers if all the lengths used in 1445 * the map are a multiple of PAGE_SIZE, even if you pass a smaller align 1446 * argument. 1447 * 1448 * 'align' should be a power of 2 but is not required to be. 1449 * 1450 * The map must be exclusively locked. 1451 * No other requirements. 1452 */ 1453 int 1454 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length, 1455 vm_size_t align, int flags, vm_offset_t *addr) 1456 { 1457 vm_map_entry_t entry; 1458 vm_map_entry_t tmp; 1459 vm_offset_t hole_start; 1460 vm_offset_t end; 1461 vm_offset_t align_mask; 1462 1463 if (start < vm_map_min(map)) 1464 start = vm_map_min(map); 1465 if (start > vm_map_max(map)) 1466 return (1); 1467 1468 /* 1469 * If the alignment is not a power of 2 we will have to use 1470 * a mod/division, set align_mask to a special value. 1471 */ 1472 if ((align | (align - 1)) + 1 != (align << 1)) 1473 align_mask = (vm_offset_t)-1; 1474 else 1475 align_mask = align - 1; 1476 1477 /* 1478 * Use freehint to adjust the start point, hopefully reducing 1479 * the iteration to O(1). 1480 */ 1481 hole_start = vm_map_freehint_find(map, length, align); 1482 if (start < hole_start) 1483 start = hole_start; 1484 if (vm_map_lookup_entry(map, start, &tmp)) 1485 start = tmp->end; 1486 entry = tmp; /* may be NULL */ 1487 1488 /* 1489 * Look through the rest of the map, trying to fit a new region in the 1490 * gap between existing regions, or after the very last region. 1491 */ 1492 for (;;) { 1493 /* 1494 * Adjust the proposed start by the requested alignment, 1495 * be sure that we didn't wrap the address. 1496 */ 1497 if (align_mask == (vm_offset_t)-1) 1498 end = roundup(start, align); 1499 else 1500 end = (start + align_mask) & ~align_mask; 1501 if (end < start) 1502 return (1); 1503 start = end; 1504 1505 /* 1506 * Find the end of the proposed new region. Be sure we didn't 1507 * go beyond the end of the map, or wrap around the address. 1508 * Then check to see if this is the last entry or if the 1509 * proposed end fits in the gap between this and the next 1510 * entry. 1511 */ 1512 end = start + length; 1513 if (end > vm_map_max(map) || end < start) 1514 return (1); 1515 1516 /* 1517 * Locate the next entry, we can stop if this is the 1518 * last entry (we know we are in-bounds so that would 1519 * be a sucess). 1520 */ 1521 if (entry) 1522 entry = vm_map_rb_tree_RB_NEXT(entry); 1523 else 1524 entry = RB_MIN(vm_map_rb_tree, &map->rb_root); 1525 if (entry == NULL) 1526 break; 1527 1528 /* 1529 * Determine if the proposed area would overlap the 1530 * next entry. 1531 * 1532 * When matching against a STACK entry, only allow the 1533 * memory map to intrude on the ungrown portion of the 1534 * STACK entry when MAP_TRYFIXED is set. 1535 */ 1536 if (entry->start >= end) { 1537 if ((entry->eflags & MAP_ENTRY_STACK) == 0) 1538 break; 1539 if (flags & MAP_TRYFIXED) 1540 break; 1541 if (entry->start - entry->aux.avail_ssize >= end) 1542 break; 1543 } 1544 start = entry->end; 1545 } 1546 1547 /* 1548 * Update the freehint 1549 */ 1550 vm_map_freehint_update(map, start, length, align); 1551 1552 /* 1553 * Grow the kernel_map if necessary. pmap_growkernel() will panic 1554 * if it fails. The kernel_map is locked and nothing can steal 1555 * our address space if pmap_growkernel() blocks. 1556 * 1557 * NOTE: This may be unconditionally called for kldload areas on 1558 * x86_64 because these do not bump kernel_vm_end (which would 1559 * fill 128G worth of page tables!). Therefore we must not 1560 * retry. 1561 */ 1562 if (map == &kernel_map) { 1563 vm_offset_t kstop; 1564 1565 kstop = round_page(start + length); 1566 if (kstop > kernel_vm_end) 1567 pmap_growkernel(start, kstop); 1568 } 1569 *addr = start; 1570 return (0); 1571 } 1572 1573 /* 1574 * vm_map_find finds an unallocated region in the target address map with 1575 * the given length and allocates it. The search is defined to be first-fit 1576 * from the specified address; the region found is returned in the same 1577 * parameter. 1578 * 1579 * If object is non-NULL, ref count must be bumped by caller 1580 * prior to making call to account for the new entry. 1581 * 1582 * No requirements. This function will lock the map temporarily. 1583 */ 1584 int 1585 vm_map_find(vm_map_t map, void *map_object, void *map_aux, 1586 vm_ooffset_t offset, vm_offset_t *addr, 1587 vm_size_t length, vm_size_t align, boolean_t fitit, 1588 vm_maptype_t maptype, vm_subsys_t id, 1589 vm_prot_t prot, vm_prot_t max, int cow) 1590 { 1591 vm_offset_t start; 1592 vm_object_t object; 1593 int result; 1594 int count; 1595 1596 if (maptype == VM_MAPTYPE_UKSMAP) 1597 object = NULL; 1598 else 1599 object = map_object; 1600 1601 start = *addr; 1602 1603 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 1604 vm_map_lock(map); 1605 if (object) 1606 vm_object_hold_shared(object); 1607 if (fitit) { 1608 if (vm_map_findspace(map, start, length, align, 0, addr)) { 1609 if (object) 1610 vm_object_drop(object); 1611 vm_map_unlock(map); 1612 vm_map_entry_release(count); 1613 return (KERN_NO_SPACE); 1614 } 1615 start = *addr; 1616 } 1617 result = vm_map_insert(map, &count, map_object, map_aux, 1618 offset, start, start + length, 1619 maptype, id, prot, max, cow); 1620 if (object) 1621 vm_object_drop(object); 1622 vm_map_unlock(map); 1623 vm_map_entry_release(count); 1624 1625 return (result); 1626 } 1627 1628 /* 1629 * Simplify the given map entry by merging with either neighbor. This 1630 * routine also has the ability to merge with both neighbors. 1631 * 1632 * This routine guarentees that the passed entry remains valid (though 1633 * possibly extended). When merging, this routine may delete one or 1634 * both neighbors. No action is taken on entries which have their 1635 * in-transition flag set. 1636 * 1637 * The map must be exclusively locked. 1638 */ 1639 void 1640 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry, int *countp) 1641 { 1642 vm_map_entry_t next, prev; 1643 vm_size_t prevsize, esize; 1644 1645 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 1646 ++mycpu->gd_cnt.v_intrans_coll; 1647 return; 1648 } 1649 1650 if (entry->maptype == VM_MAPTYPE_SUBMAP) 1651 return; 1652 if (entry->maptype == VM_MAPTYPE_UKSMAP) 1653 return; 1654 1655 prev = vm_map_rb_tree_RB_PREV(entry); 1656 if (prev) { 1657 prevsize = prev->end - prev->start; 1658 if ( (prev->end == entry->start) && 1659 (prev->maptype == entry->maptype) && 1660 (prev->ba.object == entry->ba.object) && 1661 (prev->ba.backing_ba == entry->ba.backing_ba) && 1662 (!prev->ba.object || 1663 (prev->ba.offset + prevsize == entry->ba.offset)) && 1664 (prev->eflags == entry->eflags) && 1665 (prev->protection == entry->protection) && 1666 (prev->max_protection == entry->max_protection) && 1667 (prev->inheritance == entry->inheritance) && 1668 (prev->id == entry->id) && 1669 (prev->wired_count == entry->wired_count)) { 1670 vm_map_entry_unlink(map, prev); 1671 entry->start = prev->start; 1672 entry->ba.offset = prev->ba.offset; 1673 vm_map_entry_dispose(map, prev, countp); 1674 } 1675 } 1676 1677 next = vm_map_rb_tree_RB_NEXT(entry); 1678 if (next) { 1679 esize = entry->end - entry->start; 1680 if ((entry->end == next->start) && 1681 (next->maptype == entry->maptype) && 1682 (next->ba.object == entry->ba.object) && 1683 (prev->ba.backing_ba == entry->ba.backing_ba) && 1684 (!entry->ba.object || 1685 (entry->ba.offset + esize == next->ba.offset)) && 1686 (next->eflags == entry->eflags) && 1687 (next->protection == entry->protection) && 1688 (next->max_protection == entry->max_protection) && 1689 (next->inheritance == entry->inheritance) && 1690 (next->id == entry->id) && 1691 (next->wired_count == entry->wired_count)) { 1692 vm_map_entry_unlink(map, next); 1693 entry->end = next->end; 1694 vm_map_entry_dispose(map, next, countp); 1695 } 1696 } 1697 } 1698 1699 /* 1700 * Asserts that the given entry begins at or after the specified address. 1701 * If necessary, it splits the entry into two. 1702 */ 1703 #define vm_map_clip_start(map, entry, startaddr, countp) \ 1704 { \ 1705 if (startaddr > entry->start) \ 1706 _vm_map_clip_start(map, entry, startaddr, countp); \ 1707 } 1708 1709 /* 1710 * This routine is called only when it is known that the entry must be split. 1711 * 1712 * The map must be exclusively locked. 1713 */ 1714 static void 1715 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start, 1716 int *countp) 1717 { 1718 vm_map_entry_t new_entry; 1719 1720 /* 1721 * Split off the front portion -- note that we must insert the new 1722 * entry BEFORE this one, so that this entry has the specified 1723 * starting address. 1724 */ 1725 1726 vm_map_simplify_entry(map, entry, countp); 1727 1728 /* 1729 * If there is no object backing this entry, we might as well create 1730 * one now. If we defer it, an object can get created after the map 1731 * is clipped, and individual objects will be created for the split-up 1732 * map. This is a bit of a hack, but is also about the best place to 1733 * put this improvement. 1734 */ 1735 if (entry->ba.object == NULL && !map->system_map && 1736 VM_MAP_ENTRY_WITHIN_PARTITION(entry)) { 1737 vm_map_entry_allocate_object(entry); 1738 } 1739 1740 new_entry = vm_map_entry_create(countp); 1741 *new_entry = *entry; 1742 1743 new_entry->end = start; 1744 entry->ba.offset += (start - entry->start); 1745 entry->start = start; 1746 1747 vm_map_backing_replicated(map, new_entry, MAP_BACK_CLIPPED); 1748 vm_map_entry_link(map, new_entry); 1749 } 1750 1751 /* 1752 * Asserts that the given entry ends at or before the specified address. 1753 * If necessary, it splits the entry into two. 1754 * 1755 * The map must be exclusively locked. 1756 */ 1757 #define vm_map_clip_end(map, entry, endaddr, countp) \ 1758 { \ 1759 if (endaddr < entry->end) \ 1760 _vm_map_clip_end(map, entry, endaddr, countp); \ 1761 } 1762 1763 /* 1764 * This routine is called only when it is known that the entry must be split. 1765 * 1766 * The map must be exclusively locked. 1767 */ 1768 static void 1769 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end, 1770 int *countp) 1771 { 1772 vm_map_entry_t new_entry; 1773 1774 /* 1775 * If there is no object backing this entry, we might as well create 1776 * one now. If we defer it, an object can get created after the map 1777 * is clipped, and individual objects will be created for the split-up 1778 * map. This is a bit of a hack, but is also about the best place to 1779 * put this improvement. 1780 */ 1781 1782 if (entry->ba.object == NULL && !map->system_map && 1783 VM_MAP_ENTRY_WITHIN_PARTITION(entry)) { 1784 vm_map_entry_allocate_object(entry); 1785 } 1786 1787 /* 1788 * Create a new entry and insert it AFTER the specified entry 1789 */ 1790 new_entry = vm_map_entry_create(countp); 1791 *new_entry = *entry; 1792 1793 new_entry->start = entry->end = end; 1794 new_entry->ba.offset += (end - entry->start); 1795 1796 vm_map_backing_replicated(map, new_entry, MAP_BACK_CLIPPED); 1797 vm_map_entry_link(map, new_entry); 1798 } 1799 1800 /* 1801 * Asserts that the starting and ending region addresses fall within the 1802 * valid range for the map. 1803 */ 1804 #define VM_MAP_RANGE_CHECK(map, start, end) \ 1805 { \ 1806 if (start < vm_map_min(map)) \ 1807 start = vm_map_min(map); \ 1808 if (end > vm_map_max(map)) \ 1809 end = vm_map_max(map); \ 1810 if (start > end) \ 1811 start = end; \ 1812 } 1813 1814 /* 1815 * Used to block when an in-transition collison occurs. The map 1816 * is unlocked for the sleep and relocked before the return. 1817 */ 1818 void 1819 vm_map_transition_wait(vm_map_t map, int relock) 1820 { 1821 tsleep_interlock(map, 0); 1822 vm_map_unlock(map); 1823 tsleep(map, PINTERLOCKED, "vment", 0); 1824 if (relock) 1825 vm_map_lock(map); 1826 } 1827 1828 /* 1829 * When we do blocking operations with the map lock held it is 1830 * possible that a clip might have occured on our in-transit entry, 1831 * requiring an adjustment to the entry in our loop. These macros 1832 * help the pageable and clip_range code deal with the case. The 1833 * conditional costs virtually nothing if no clipping has occured. 1834 */ 1835 1836 #define CLIP_CHECK_BACK(entry, save_start) \ 1837 do { \ 1838 while (entry->start != save_start) { \ 1839 entry = vm_map_rb_tree_RB_PREV(entry); \ 1840 KASSERT(entry, ("bad entry clip")); \ 1841 } \ 1842 } while(0) 1843 1844 #define CLIP_CHECK_FWD(entry, save_end) \ 1845 do { \ 1846 while (entry->end != save_end) { \ 1847 entry = vm_map_rb_tree_RB_NEXT(entry); \ 1848 KASSERT(entry, ("bad entry clip")); \ 1849 } \ 1850 } while(0) 1851 1852 1853 /* 1854 * Clip the specified range and return the base entry. The 1855 * range may cover several entries starting at the returned base 1856 * and the first and last entry in the covering sequence will be 1857 * properly clipped to the requested start and end address. 1858 * 1859 * If no holes are allowed you should pass the MAP_CLIP_NO_HOLES 1860 * flag. 1861 * 1862 * The MAP_ENTRY_IN_TRANSITION flag will be set for the entries 1863 * covered by the requested range. 1864 * 1865 * The map must be exclusively locked on entry and will remain locked 1866 * on return. If no range exists or the range contains holes and you 1867 * specified that no holes were allowed, NULL will be returned. This 1868 * routine may temporarily unlock the map in order avoid a deadlock when 1869 * sleeping. 1870 */ 1871 static 1872 vm_map_entry_t 1873 vm_map_clip_range(vm_map_t map, vm_offset_t start, vm_offset_t end, 1874 int *countp, int flags) 1875 { 1876 vm_map_entry_t start_entry; 1877 vm_map_entry_t entry; 1878 vm_map_entry_t next; 1879 1880 /* 1881 * Locate the entry and effect initial clipping. The in-transition 1882 * case does not occur very often so do not try to optimize it. 1883 */ 1884 again: 1885 if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) 1886 return (NULL); 1887 entry = start_entry; 1888 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 1889 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 1890 ++mycpu->gd_cnt.v_intrans_coll; 1891 ++mycpu->gd_cnt.v_intrans_wait; 1892 vm_map_transition_wait(map, 1); 1893 /* 1894 * entry and/or start_entry may have been clipped while 1895 * we slept, or may have gone away entirely. We have 1896 * to restart from the lookup. 1897 */ 1898 goto again; 1899 } 1900 1901 /* 1902 * Since we hold an exclusive map lock we do not have to restart 1903 * after clipping, even though clipping may block in zalloc. 1904 */ 1905 vm_map_clip_start(map, entry, start, countp); 1906 vm_map_clip_end(map, entry, end, countp); 1907 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 1908 1909 /* 1910 * Scan entries covered by the range. When working on the next 1911 * entry a restart need only re-loop on the current entry which 1912 * we have already locked, since 'next' may have changed. Also, 1913 * even though entry is safe, it may have been clipped so we 1914 * have to iterate forwards through the clip after sleeping. 1915 */ 1916 for (;;) { 1917 next = vm_map_rb_tree_RB_NEXT(entry); 1918 if (next == NULL || next->start >= end) 1919 break; 1920 if (flags & MAP_CLIP_NO_HOLES) { 1921 if (next->start > entry->end) { 1922 vm_map_unclip_range(map, start_entry, 1923 start, entry->end, countp, flags); 1924 return(NULL); 1925 } 1926 } 1927 1928 if (next->eflags & MAP_ENTRY_IN_TRANSITION) { 1929 vm_offset_t save_end = entry->end; 1930 next->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 1931 ++mycpu->gd_cnt.v_intrans_coll; 1932 ++mycpu->gd_cnt.v_intrans_wait; 1933 vm_map_transition_wait(map, 1); 1934 1935 /* 1936 * clips might have occured while we blocked. 1937 */ 1938 CLIP_CHECK_FWD(entry, save_end); 1939 CLIP_CHECK_BACK(start_entry, start); 1940 continue; 1941 } 1942 1943 /* 1944 * No restart necessary even though clip_end may block, we 1945 * are holding the map lock. 1946 */ 1947 vm_map_clip_end(map, next, end, countp); 1948 next->eflags |= MAP_ENTRY_IN_TRANSITION; 1949 entry = next; 1950 } 1951 if (flags & MAP_CLIP_NO_HOLES) { 1952 if (entry->end != end) { 1953 vm_map_unclip_range(map, start_entry, 1954 start, entry->end, countp, flags); 1955 return(NULL); 1956 } 1957 } 1958 return(start_entry); 1959 } 1960 1961 /* 1962 * Undo the effect of vm_map_clip_range(). You should pass the same 1963 * flags and the same range that you passed to vm_map_clip_range(). 1964 * This code will clear the in-transition flag on the entries and 1965 * wake up anyone waiting. This code will also simplify the sequence 1966 * and attempt to merge it with entries before and after the sequence. 1967 * 1968 * The map must be locked on entry and will remain locked on return. 1969 * 1970 * Note that you should also pass the start_entry returned by 1971 * vm_map_clip_range(). However, if you block between the two calls 1972 * with the map unlocked please be aware that the start_entry may 1973 * have been clipped and you may need to scan it backwards to find 1974 * the entry corresponding with the original start address. You are 1975 * responsible for this, vm_map_unclip_range() expects the correct 1976 * start_entry to be passed to it and will KASSERT otherwise. 1977 */ 1978 static 1979 void 1980 vm_map_unclip_range(vm_map_t map, vm_map_entry_t start_entry, 1981 vm_offset_t start, vm_offset_t end, 1982 int *countp, int flags) 1983 { 1984 vm_map_entry_t entry; 1985 1986 entry = start_entry; 1987 1988 KASSERT(entry->start == start, ("unclip_range: illegal base entry")); 1989 while (entry && entry->start < end) { 1990 KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION, 1991 ("in-transition flag not set during unclip on: %p", 1992 entry)); 1993 KASSERT(entry->end <= end, 1994 ("unclip_range: tail wasn't clipped")); 1995 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; 1996 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 1997 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 1998 wakeup(map); 1999 } 2000 entry = vm_map_rb_tree_RB_NEXT(entry); 2001 } 2002 2003 /* 2004 * Simplification does not block so there is no restart case. 2005 */ 2006 entry = start_entry; 2007 while (entry && entry->start < end) { 2008 vm_map_simplify_entry(map, entry, countp); 2009 entry = vm_map_rb_tree_RB_NEXT(entry); 2010 } 2011 } 2012 2013 /* 2014 * Mark the given range as handled by a subordinate map. 2015 * 2016 * This range must have been created with vm_map_find(), and no other 2017 * operations may have been performed on this range prior to calling 2018 * vm_map_submap(). 2019 * 2020 * Submappings cannot be removed. 2021 * 2022 * No requirements. 2023 */ 2024 int 2025 vm_map_submap(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_map_t submap) 2026 { 2027 vm_map_entry_t entry; 2028 int result = KERN_INVALID_ARGUMENT; 2029 int count; 2030 2031 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 2032 vm_map_lock(map); 2033 2034 VM_MAP_RANGE_CHECK(map, start, end); 2035 2036 if (vm_map_lookup_entry(map, start, &entry)) { 2037 vm_map_clip_start(map, entry, start, &count); 2038 } else if (entry) { 2039 entry = vm_map_rb_tree_RB_NEXT(entry); 2040 } else { 2041 entry = RB_MIN(vm_map_rb_tree, &map->rb_root); 2042 } 2043 2044 vm_map_clip_end(map, entry, end, &count); 2045 2046 if ((entry->start == start) && (entry->end == end) && 2047 ((entry->eflags & MAP_ENTRY_COW) == 0) && 2048 (entry->ba.object == NULL)) { 2049 entry->ba.sub_map = submap; 2050 entry->maptype = VM_MAPTYPE_SUBMAP; 2051 result = KERN_SUCCESS; 2052 } 2053 vm_map_unlock(map); 2054 vm_map_entry_release(count); 2055 2056 return (result); 2057 } 2058 2059 /* 2060 * Sets the protection of the specified address region in the target map. 2061 * If "set_max" is specified, the maximum protection is to be set; 2062 * otherwise, only the current protection is affected. 2063 * 2064 * The protection is not applicable to submaps, but is applicable to normal 2065 * maps and maps governed by virtual page tables. For example, when operating 2066 * on a virtual page table our protection basically controls how COW occurs 2067 * on the backing object, whereas the virtual page table abstraction itself 2068 * is an abstraction for userland. 2069 * 2070 * No requirements. 2071 */ 2072 int 2073 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, 2074 vm_prot_t new_prot, boolean_t set_max) 2075 { 2076 vm_map_entry_t current; 2077 vm_map_entry_t entry; 2078 int count; 2079 2080 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 2081 vm_map_lock(map); 2082 2083 VM_MAP_RANGE_CHECK(map, start, end); 2084 2085 if (vm_map_lookup_entry(map, start, &entry)) { 2086 vm_map_clip_start(map, entry, start, &count); 2087 } else if (entry) { 2088 entry = vm_map_rb_tree_RB_NEXT(entry); 2089 } else { 2090 entry = RB_MIN(vm_map_rb_tree, &map->rb_root); 2091 } 2092 2093 /* 2094 * Make a first pass to check for protection violations. 2095 */ 2096 current = entry; 2097 while (current && current->start < end) { 2098 if (current->maptype == VM_MAPTYPE_SUBMAP) { 2099 vm_map_unlock(map); 2100 vm_map_entry_release(count); 2101 return (KERN_INVALID_ARGUMENT); 2102 } 2103 if ((new_prot & current->max_protection) != new_prot) { 2104 vm_map_unlock(map); 2105 vm_map_entry_release(count); 2106 return (KERN_PROTECTION_FAILURE); 2107 } 2108 2109 /* 2110 * When making a SHARED+RW file mmap writable, update 2111 * v_lastwrite_ts. 2112 */ 2113 if (new_prot & PROT_WRITE && 2114 (current->eflags & MAP_ENTRY_NEEDS_COPY) == 0 && 2115 (current->maptype == VM_MAPTYPE_NORMAL || 2116 current->maptype == VM_MAPTYPE_VPAGETABLE) && 2117 current->ba.object && 2118 current->ba.object->type == OBJT_VNODE) { 2119 struct vnode *vp; 2120 2121 vp = current->ba.object->handle; 2122 if (vp && vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT) == 0) { 2123 vfs_timestamp(&vp->v_lastwrite_ts); 2124 vsetflags(vp, VLASTWRITETS); 2125 vn_unlock(vp); 2126 } 2127 } 2128 current = vm_map_rb_tree_RB_NEXT(current); 2129 } 2130 2131 /* 2132 * Go back and fix up protections. [Note that clipping is not 2133 * necessary the second time.] 2134 */ 2135 current = entry; 2136 2137 while (current && current->start < end) { 2138 vm_prot_t old_prot; 2139 2140 vm_map_clip_end(map, current, end, &count); 2141 2142 old_prot = current->protection; 2143 if (set_max) { 2144 current->max_protection = new_prot; 2145 current->protection = new_prot & old_prot; 2146 } else { 2147 current->protection = new_prot; 2148 } 2149 2150 /* 2151 * Update physical map if necessary. Worry about copy-on-write 2152 * here -- CHECK THIS XXX 2153 */ 2154 if (current->protection != old_prot) { 2155 #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ 2156 VM_PROT_ALL) 2157 2158 pmap_protect(map->pmap, current->start, 2159 current->end, 2160 current->protection & MASK(current)); 2161 #undef MASK 2162 } 2163 2164 vm_map_simplify_entry(map, current, &count); 2165 2166 current = vm_map_rb_tree_RB_NEXT(current); 2167 } 2168 vm_map_unlock(map); 2169 vm_map_entry_release(count); 2170 return (KERN_SUCCESS); 2171 } 2172 2173 /* 2174 * This routine traverses a processes map handling the madvise 2175 * system call. Advisories are classified as either those effecting 2176 * the vm_map_entry structure, or those effecting the underlying 2177 * objects. 2178 * 2179 * The <value> argument is used for extended madvise calls. 2180 * 2181 * No requirements. 2182 */ 2183 int 2184 vm_map_madvise(vm_map_t map, vm_offset_t start, vm_offset_t end, 2185 int behav, off_t value) 2186 { 2187 vm_map_entry_t current, entry; 2188 int modify_map = 0; 2189 int error = 0; 2190 int count; 2191 2192 /* 2193 * Some madvise calls directly modify the vm_map_entry, in which case 2194 * we need to use an exclusive lock on the map and we need to perform 2195 * various clipping operations. Otherwise we only need a read-lock 2196 * on the map. 2197 */ 2198 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 2199 2200 switch(behav) { 2201 case MADV_NORMAL: 2202 case MADV_SEQUENTIAL: 2203 case MADV_RANDOM: 2204 case MADV_NOSYNC: 2205 case MADV_AUTOSYNC: 2206 case MADV_NOCORE: 2207 case MADV_CORE: 2208 case MADV_SETMAP: 2209 modify_map = 1; 2210 vm_map_lock(map); 2211 break; 2212 case MADV_INVAL: 2213 case MADV_WILLNEED: 2214 case MADV_DONTNEED: 2215 case MADV_FREE: 2216 vm_map_lock_read(map); 2217 break; 2218 default: 2219 vm_map_entry_release(count); 2220 return (EINVAL); 2221 } 2222 2223 /* 2224 * Locate starting entry and clip if necessary. 2225 */ 2226 2227 VM_MAP_RANGE_CHECK(map, start, end); 2228 2229 if (vm_map_lookup_entry(map, start, &entry)) { 2230 if (modify_map) 2231 vm_map_clip_start(map, entry, start, &count); 2232 } else if (entry) { 2233 entry = vm_map_rb_tree_RB_NEXT(entry); 2234 } else { 2235 entry = RB_MIN(vm_map_rb_tree, &map->rb_root); 2236 } 2237 2238 if (modify_map) { 2239 /* 2240 * madvise behaviors that are implemented in the vm_map_entry. 2241 * 2242 * We clip the vm_map_entry so that behavioral changes are 2243 * limited to the specified address range. 2244 */ 2245 for (current = entry; 2246 current && current->start < end; 2247 current = vm_map_rb_tree_RB_NEXT(current)) { 2248 /* 2249 * Ignore submaps 2250 */ 2251 if (current->maptype == VM_MAPTYPE_SUBMAP) 2252 continue; 2253 2254 vm_map_clip_end(map, current, end, &count); 2255 2256 switch (behav) { 2257 case MADV_NORMAL: 2258 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL); 2259 break; 2260 case MADV_SEQUENTIAL: 2261 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL); 2262 break; 2263 case MADV_RANDOM: 2264 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM); 2265 break; 2266 case MADV_NOSYNC: 2267 current->eflags |= MAP_ENTRY_NOSYNC; 2268 break; 2269 case MADV_AUTOSYNC: 2270 current->eflags &= ~MAP_ENTRY_NOSYNC; 2271 break; 2272 case MADV_NOCORE: 2273 current->eflags |= MAP_ENTRY_NOCOREDUMP; 2274 break; 2275 case MADV_CORE: 2276 current->eflags &= ~MAP_ENTRY_NOCOREDUMP; 2277 break; 2278 case MADV_SETMAP: 2279 /* 2280 * Set the page directory page for a map 2281 * governed by a virtual page table. Mark 2282 * the entry as being governed by a virtual 2283 * page table if it is not. 2284 * 2285 * XXX the page directory page is stored 2286 * in the avail_ssize field if the map_entry. 2287 * 2288 * XXX the map simplification code does not 2289 * compare this field so weird things may 2290 * happen if you do not apply this function 2291 * to the entire mapping governed by the 2292 * virtual page table. 2293 */ 2294 if (current->maptype != VM_MAPTYPE_VPAGETABLE) { 2295 error = EINVAL; 2296 break; 2297 } 2298 current->aux.master_pde = value; 2299 pmap_remove(map->pmap, 2300 current->start, current->end); 2301 break; 2302 case MADV_INVAL: 2303 /* 2304 * Invalidate the related pmap entries, used 2305 * to flush portions of the real kernel's 2306 * pmap when the caller has removed or 2307 * modified existing mappings in a virtual 2308 * page table. 2309 * 2310 * (exclusive locked map version does not 2311 * need the range interlock). 2312 */ 2313 pmap_remove(map->pmap, 2314 current->start, current->end); 2315 break; 2316 default: 2317 error = EINVAL; 2318 break; 2319 } 2320 vm_map_simplify_entry(map, current, &count); 2321 } 2322 vm_map_unlock(map); 2323 } else { 2324 vm_pindex_t pindex; 2325 vm_pindex_t delta; 2326 2327 /* 2328 * madvise behaviors that are implemented in the underlying 2329 * vm_object. 2330 * 2331 * Since we don't clip the vm_map_entry, we have to clip 2332 * the vm_object pindex and count. 2333 * 2334 * NOTE! These functions are only supported on normal maps, 2335 * except MADV_INVAL which is also supported on 2336 * virtual page tables. 2337 * 2338 * NOTE! These functions only apply to the top-most object. 2339 * It is not applicable to backing objects. 2340 */ 2341 for (current = entry; 2342 current && current->start < end; 2343 current = vm_map_rb_tree_RB_NEXT(current)) { 2344 vm_offset_t useStart; 2345 2346 if (current->maptype != VM_MAPTYPE_NORMAL && 2347 (current->maptype != VM_MAPTYPE_VPAGETABLE || 2348 behav != MADV_INVAL)) { 2349 continue; 2350 } 2351 2352 pindex = OFF_TO_IDX(current->ba.offset); 2353 delta = atop(current->end - current->start); 2354 useStart = current->start; 2355 2356 if (current->start < start) { 2357 pindex += atop(start - current->start); 2358 delta -= atop(start - current->start); 2359 useStart = start; 2360 } 2361 if (current->end > end) 2362 delta -= atop(current->end - end); 2363 2364 if ((vm_spindex_t)delta <= 0) 2365 continue; 2366 2367 if (behav == MADV_INVAL) { 2368 /* 2369 * Invalidate the related pmap entries, used 2370 * to flush portions of the real kernel's 2371 * pmap when the caller has removed or 2372 * modified existing mappings in a virtual 2373 * page table. 2374 * 2375 * (shared locked map version needs the 2376 * interlock, see vm_fault()). 2377 */ 2378 struct vm_map_ilock ilock; 2379 2380 KASSERT(useStart >= VM_MIN_USER_ADDRESS && 2381 useStart + ptoa(delta) <= 2382 VM_MAX_USER_ADDRESS, 2383 ("Bad range %016jx-%016jx (%016jx)", 2384 useStart, useStart + ptoa(delta), 2385 delta)); 2386 vm_map_interlock(map, &ilock, 2387 useStart, 2388 useStart + ptoa(delta)); 2389 pmap_remove(map->pmap, 2390 useStart, 2391 useStart + ptoa(delta)); 2392 vm_map_deinterlock(map, &ilock); 2393 } else { 2394 vm_object_madvise(current->ba.object, 2395 pindex, delta, behav); 2396 } 2397 2398 /* 2399 * Try to populate the page table. Mappings governed 2400 * by virtual page tables cannot be pre-populated 2401 * without a lot of work so don't try. 2402 */ 2403 if (behav == MADV_WILLNEED && 2404 current->maptype != VM_MAPTYPE_VPAGETABLE) { 2405 pmap_object_init_pt( 2406 map->pmap, 2407 useStart, 2408 current->protection, 2409 current->ba.object, 2410 pindex, 2411 (count << PAGE_SHIFT), 2412 MAP_PREFAULT_MADVISE 2413 ); 2414 } 2415 } 2416 vm_map_unlock_read(map); 2417 } 2418 vm_map_entry_release(count); 2419 return(error); 2420 } 2421 2422 2423 /* 2424 * Sets the inheritance of the specified address range in the target map. 2425 * Inheritance affects how the map will be shared with child maps at the 2426 * time of vm_map_fork. 2427 */ 2428 int 2429 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, 2430 vm_inherit_t new_inheritance) 2431 { 2432 vm_map_entry_t entry; 2433 vm_map_entry_t temp_entry; 2434 int count; 2435 2436 switch (new_inheritance) { 2437 case VM_INHERIT_NONE: 2438 case VM_INHERIT_COPY: 2439 case VM_INHERIT_SHARE: 2440 break; 2441 default: 2442 return (KERN_INVALID_ARGUMENT); 2443 } 2444 2445 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 2446 vm_map_lock(map); 2447 2448 VM_MAP_RANGE_CHECK(map, start, end); 2449 2450 if (vm_map_lookup_entry(map, start, &temp_entry)) { 2451 entry = temp_entry; 2452 vm_map_clip_start(map, entry, start, &count); 2453 } else if (temp_entry) { 2454 entry = vm_map_rb_tree_RB_NEXT(temp_entry); 2455 } else { 2456 entry = RB_MIN(vm_map_rb_tree, &map->rb_root); 2457 } 2458 2459 while (entry && entry->start < end) { 2460 vm_map_clip_end(map, entry, end, &count); 2461 2462 entry->inheritance = new_inheritance; 2463 2464 vm_map_simplify_entry(map, entry, &count); 2465 2466 entry = vm_map_rb_tree_RB_NEXT(entry); 2467 } 2468 vm_map_unlock(map); 2469 vm_map_entry_release(count); 2470 return (KERN_SUCCESS); 2471 } 2472 2473 /* 2474 * Implement the semantics of mlock 2475 */ 2476 int 2477 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t real_end, 2478 boolean_t new_pageable) 2479 { 2480 vm_map_entry_t entry; 2481 vm_map_entry_t start_entry; 2482 vm_offset_t end; 2483 int rv = KERN_SUCCESS; 2484 int count; 2485 2486 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 2487 vm_map_lock(map); 2488 VM_MAP_RANGE_CHECK(map, start, real_end); 2489 end = real_end; 2490 2491 start_entry = vm_map_clip_range(map, start, end, &count, 2492 MAP_CLIP_NO_HOLES); 2493 if (start_entry == NULL) { 2494 vm_map_unlock(map); 2495 vm_map_entry_release(count); 2496 return (KERN_INVALID_ADDRESS); 2497 } 2498 2499 if (new_pageable == 0) { 2500 entry = start_entry; 2501 while (entry && entry->start < end) { 2502 vm_offset_t save_start; 2503 vm_offset_t save_end; 2504 2505 /* 2506 * Already user wired or hard wired (trivial cases) 2507 */ 2508 if (entry->eflags & MAP_ENTRY_USER_WIRED) { 2509 entry = vm_map_rb_tree_RB_NEXT(entry); 2510 continue; 2511 } 2512 if (entry->wired_count != 0) { 2513 entry->wired_count++; 2514 entry->eflags |= MAP_ENTRY_USER_WIRED; 2515 entry = vm_map_rb_tree_RB_NEXT(entry); 2516 continue; 2517 } 2518 2519 /* 2520 * A new wiring requires instantiation of appropriate 2521 * management structures and the faulting in of the 2522 * page. 2523 */ 2524 if (entry->maptype == VM_MAPTYPE_NORMAL || 2525 entry->maptype == VM_MAPTYPE_VPAGETABLE) { 2526 int copyflag = entry->eflags & 2527 MAP_ENTRY_NEEDS_COPY; 2528 if (copyflag && ((entry->protection & 2529 VM_PROT_WRITE) != 0)) { 2530 vm_map_entry_shadow(entry); 2531 } else if (entry->ba.object == NULL && 2532 !map->system_map) { 2533 vm_map_entry_allocate_object(entry); 2534 } 2535 } 2536 entry->wired_count++; 2537 entry->eflags |= MAP_ENTRY_USER_WIRED; 2538 2539 /* 2540 * Now fault in the area. Note that vm_fault_wire() 2541 * may release the map lock temporarily, it will be 2542 * relocked on return. The in-transition 2543 * flag protects the entries. 2544 */ 2545 save_start = entry->start; 2546 save_end = entry->end; 2547 rv = vm_fault_wire(map, entry, TRUE, 0); 2548 if (rv) { 2549 CLIP_CHECK_BACK(entry, save_start); 2550 for (;;) { 2551 KASSERT(entry->wired_count == 1, ("bad wired_count on entry")); 2552 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2553 entry->wired_count = 0; 2554 if (entry->end == save_end) 2555 break; 2556 entry = vm_map_rb_tree_RB_NEXT(entry); 2557 KASSERT(entry, 2558 ("bad entry clip during backout")); 2559 } 2560 end = save_start; /* unwire the rest */ 2561 break; 2562 } 2563 /* 2564 * note that even though the entry might have been 2565 * clipped, the USER_WIRED flag we set prevents 2566 * duplication so we do not have to do a 2567 * clip check. 2568 */ 2569 entry = vm_map_rb_tree_RB_NEXT(entry); 2570 } 2571 2572 /* 2573 * If we failed fall through to the unwiring section to 2574 * unwire what we had wired so far. 'end' has already 2575 * been adjusted. 2576 */ 2577 if (rv) 2578 new_pageable = 1; 2579 2580 /* 2581 * start_entry might have been clipped if we unlocked the 2582 * map and blocked. No matter how clipped it has gotten 2583 * there should be a fragment that is on our start boundary. 2584 */ 2585 CLIP_CHECK_BACK(start_entry, start); 2586 } 2587 2588 /* 2589 * Deal with the unwiring case. 2590 */ 2591 if (new_pageable) { 2592 /* 2593 * This is the unwiring case. We must first ensure that the 2594 * range to be unwired is really wired down. We know there 2595 * are no holes. 2596 */ 2597 entry = start_entry; 2598 while (entry && entry->start < end) { 2599 if ((entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { 2600 rv = KERN_INVALID_ARGUMENT; 2601 goto done; 2602 } 2603 KASSERT(entry->wired_count != 0, 2604 ("wired count was 0 with USER_WIRED set! %p", 2605 entry)); 2606 entry = vm_map_rb_tree_RB_NEXT(entry); 2607 } 2608 2609 /* 2610 * Now decrement the wiring count for each region. If a region 2611 * becomes completely unwired, unwire its physical pages and 2612 * mappings. 2613 */ 2614 /* 2615 * The map entries are processed in a loop, checking to 2616 * make sure the entry is wired and asserting it has a wired 2617 * count. However, another loop was inserted more-or-less in 2618 * the middle of the unwiring path. This loop picks up the 2619 * "entry" loop variable from the first loop without first 2620 * setting it to start_entry. Naturally, the secound loop 2621 * is never entered and the pages backing the entries are 2622 * never unwired. This can lead to a leak of wired pages. 2623 */ 2624 entry = start_entry; 2625 while (entry && entry->start < end) { 2626 KASSERT(entry->eflags & MAP_ENTRY_USER_WIRED, 2627 ("expected USER_WIRED on entry %p", entry)); 2628 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2629 entry->wired_count--; 2630 if (entry->wired_count == 0) 2631 vm_fault_unwire(map, entry); 2632 entry = vm_map_rb_tree_RB_NEXT(entry); 2633 } 2634 } 2635 done: 2636 vm_map_unclip_range(map, start_entry, start, real_end, &count, 2637 MAP_CLIP_NO_HOLES); 2638 vm_map_unlock(map); 2639 vm_map_entry_release(count); 2640 2641 return (rv); 2642 } 2643 2644 /* 2645 * Sets the pageability of the specified address range in the target map. 2646 * Regions specified as not pageable require locked-down physical 2647 * memory and physical page maps. 2648 * 2649 * The map must not be locked, but a reference must remain to the map 2650 * throughout the call. 2651 * 2652 * This function may be called via the zalloc path and must properly 2653 * reserve map entries for kernel_map. 2654 * 2655 * No requirements. 2656 */ 2657 int 2658 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t real_end, int kmflags) 2659 { 2660 vm_map_entry_t entry; 2661 vm_map_entry_t start_entry; 2662 vm_offset_t end; 2663 int rv = KERN_SUCCESS; 2664 int count; 2665 2666 if (kmflags & KM_KRESERVE) 2667 count = vm_map_entry_kreserve(MAP_RESERVE_COUNT); 2668 else 2669 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 2670 vm_map_lock(map); 2671 VM_MAP_RANGE_CHECK(map, start, real_end); 2672 end = real_end; 2673 2674 start_entry = vm_map_clip_range(map, start, end, &count, 2675 MAP_CLIP_NO_HOLES); 2676 if (start_entry == NULL) { 2677 vm_map_unlock(map); 2678 rv = KERN_INVALID_ADDRESS; 2679 goto failure; 2680 } 2681 if ((kmflags & KM_PAGEABLE) == 0) { 2682 /* 2683 * Wiring. 2684 * 2685 * 1. Holding the write lock, we create any shadow or zero-fill 2686 * objects that need to be created. Then we clip each map 2687 * entry to the region to be wired and increment its wiring 2688 * count. We create objects before clipping the map entries 2689 * to avoid object proliferation. 2690 * 2691 * 2. We downgrade to a read lock, and call vm_fault_wire to 2692 * fault in the pages for any newly wired area (wired_count is 2693 * 1). 2694 * 2695 * Downgrading to a read lock for vm_fault_wire avoids a 2696 * possible deadlock with another process that may have faulted 2697 * on one of the pages to be wired (it would mark the page busy, 2698 * blocking us, then in turn block on the map lock that we 2699 * hold). Because of problems in the recursive lock package, 2700 * we cannot upgrade to a write lock in vm_map_lookup. Thus, 2701 * any actions that require the write lock must be done 2702 * beforehand. Because we keep the read lock on the map, the 2703 * copy-on-write status of the entries we modify here cannot 2704 * change. 2705 */ 2706 entry = start_entry; 2707 while (entry && entry->start < end) { 2708 /* 2709 * Trivial case if the entry is already wired 2710 */ 2711 if (entry->wired_count) { 2712 entry->wired_count++; 2713 entry = vm_map_rb_tree_RB_NEXT(entry); 2714 continue; 2715 } 2716 2717 /* 2718 * The entry is being newly wired, we have to setup 2719 * appropriate management structures. A shadow 2720 * object is required for a copy-on-write region, 2721 * or a normal object for a zero-fill region. We 2722 * do not have to do this for entries that point to sub 2723 * maps because we won't hold the lock on the sub map. 2724 */ 2725 if (entry->maptype == VM_MAPTYPE_NORMAL || 2726 entry->maptype == VM_MAPTYPE_VPAGETABLE) { 2727 int copyflag = entry->eflags & 2728 MAP_ENTRY_NEEDS_COPY; 2729 if (copyflag && ((entry->protection & 2730 VM_PROT_WRITE) != 0)) { 2731 vm_map_entry_shadow(entry); 2732 } else if (entry->ba.object == NULL && 2733 !map->system_map) { 2734 vm_map_entry_allocate_object(entry); 2735 } 2736 } 2737 entry->wired_count++; 2738 entry = vm_map_rb_tree_RB_NEXT(entry); 2739 } 2740 2741 /* 2742 * Pass 2. 2743 */ 2744 2745 /* 2746 * HACK HACK HACK HACK 2747 * 2748 * vm_fault_wire() temporarily unlocks the map to avoid 2749 * deadlocks. The in-transition flag from vm_map_clip_range 2750 * call should protect us from changes while the map is 2751 * unlocked. T 2752 * 2753 * NOTE: Previously this comment stated that clipping might 2754 * still occur while the entry is unlocked, but from 2755 * what I can tell it actually cannot. 2756 * 2757 * It is unclear whether the CLIP_CHECK_*() calls 2758 * are still needed but we keep them in anyway. 2759 * 2760 * HACK HACK HACK HACK 2761 */ 2762 2763 entry = start_entry; 2764 while (entry && entry->start < end) { 2765 /* 2766 * If vm_fault_wire fails for any page we need to undo 2767 * what has been done. We decrement the wiring count 2768 * for those pages which have not yet been wired (now) 2769 * and unwire those that have (later). 2770 */ 2771 vm_offset_t save_start = entry->start; 2772 vm_offset_t save_end = entry->end; 2773 2774 if (entry->wired_count == 1) 2775 rv = vm_fault_wire(map, entry, FALSE, kmflags); 2776 if (rv) { 2777 CLIP_CHECK_BACK(entry, save_start); 2778 for (;;) { 2779 KASSERT(entry->wired_count == 1, 2780 ("wired_count changed unexpectedly")); 2781 entry->wired_count = 0; 2782 if (entry->end == save_end) 2783 break; 2784 entry = vm_map_rb_tree_RB_NEXT(entry); 2785 KASSERT(entry, 2786 ("bad entry clip during backout")); 2787 } 2788 end = save_start; 2789 break; 2790 } 2791 CLIP_CHECK_FWD(entry, save_end); 2792 entry = vm_map_rb_tree_RB_NEXT(entry); 2793 } 2794 2795 /* 2796 * If a failure occured undo everything by falling through 2797 * to the unwiring code. 'end' has already been adjusted 2798 * appropriately. 2799 */ 2800 if (rv) 2801 kmflags |= KM_PAGEABLE; 2802 2803 /* 2804 * start_entry is still IN_TRANSITION but may have been 2805 * clipped since vm_fault_wire() unlocks and relocks the 2806 * map. No matter how clipped it has gotten there should 2807 * be a fragment that is on our start boundary. 2808 */ 2809 CLIP_CHECK_BACK(start_entry, start); 2810 } 2811 2812 if (kmflags & KM_PAGEABLE) { 2813 /* 2814 * This is the unwiring case. We must first ensure that the 2815 * range to be unwired is really wired down. We know there 2816 * are no holes. 2817 */ 2818 entry = start_entry; 2819 while (entry && entry->start < end) { 2820 if (entry->wired_count == 0) { 2821 rv = KERN_INVALID_ARGUMENT; 2822 goto done; 2823 } 2824 entry = vm_map_rb_tree_RB_NEXT(entry); 2825 } 2826 2827 /* 2828 * Now decrement the wiring count for each region. If a region 2829 * becomes completely unwired, unwire its physical pages and 2830 * mappings. 2831 */ 2832 entry = start_entry; 2833 while (entry && entry->start < end) { 2834 entry->wired_count--; 2835 if (entry->wired_count == 0) 2836 vm_fault_unwire(map, entry); 2837 entry = vm_map_rb_tree_RB_NEXT(entry); 2838 } 2839 } 2840 done: 2841 vm_map_unclip_range(map, start_entry, start, real_end, 2842 &count, MAP_CLIP_NO_HOLES); 2843 vm_map_unlock(map); 2844 failure: 2845 if (kmflags & KM_KRESERVE) 2846 vm_map_entry_krelease(count); 2847 else 2848 vm_map_entry_release(count); 2849 return (rv); 2850 } 2851 2852 /* 2853 * Mark a newly allocated address range as wired but do not fault in 2854 * the pages. The caller is expected to load the pages into the object. 2855 * 2856 * The map must be locked on entry and will remain locked on return. 2857 * No other requirements. 2858 */ 2859 void 2860 vm_map_set_wired_quick(vm_map_t map, vm_offset_t addr, vm_size_t size, 2861 int *countp) 2862 { 2863 vm_map_entry_t scan; 2864 vm_map_entry_t entry; 2865 2866 entry = vm_map_clip_range(map, addr, addr + size, 2867 countp, MAP_CLIP_NO_HOLES); 2868 scan = entry; 2869 while (scan && scan->start < addr + size) { 2870 KKASSERT(scan->wired_count == 0); 2871 scan->wired_count = 1; 2872 scan = vm_map_rb_tree_RB_NEXT(scan); 2873 } 2874 vm_map_unclip_range(map, entry, addr, addr + size, 2875 countp, MAP_CLIP_NO_HOLES); 2876 } 2877 2878 /* 2879 * Push any dirty cached pages in the address range to their pager. 2880 * If syncio is TRUE, dirty pages are written synchronously. 2881 * If invalidate is TRUE, any cached pages are freed as well. 2882 * 2883 * This routine is called by sys_msync() 2884 * 2885 * Returns an error if any part of the specified range is not mapped. 2886 * 2887 * No requirements. 2888 */ 2889 int 2890 vm_map_clean(vm_map_t map, vm_offset_t start, vm_offset_t end, 2891 boolean_t syncio, boolean_t invalidate) 2892 { 2893 vm_map_entry_t current; 2894 vm_map_entry_t next; 2895 vm_map_entry_t entry; 2896 vm_map_backing_t ba; 2897 vm_size_t size; 2898 vm_object_t object; 2899 vm_ooffset_t offset; 2900 2901 vm_map_lock_read(map); 2902 VM_MAP_RANGE_CHECK(map, start, end); 2903 if (!vm_map_lookup_entry(map, start, &entry)) { 2904 vm_map_unlock_read(map); 2905 return (KERN_INVALID_ADDRESS); 2906 } 2907 lwkt_gettoken(&map->token); 2908 2909 /* 2910 * Make a first pass to check for holes. 2911 */ 2912 current = entry; 2913 while (current && current->start < end) { 2914 if (current->maptype == VM_MAPTYPE_SUBMAP) { 2915 lwkt_reltoken(&map->token); 2916 vm_map_unlock_read(map); 2917 return (KERN_INVALID_ARGUMENT); 2918 } 2919 next = vm_map_rb_tree_RB_NEXT(current); 2920 if (end > current->end && 2921 (next == NULL || 2922 current->end != next->start)) { 2923 lwkt_reltoken(&map->token); 2924 vm_map_unlock_read(map); 2925 return (KERN_INVALID_ADDRESS); 2926 } 2927 current = next; 2928 } 2929 2930 if (invalidate) 2931 pmap_remove(vm_map_pmap(map), start, end); 2932 2933 /* 2934 * Make a second pass, cleaning/uncaching pages from the indicated 2935 * objects as we go. 2936 */ 2937 current = entry; 2938 while (current && current->start < end) { 2939 offset = current->ba.offset + (start - current->start); 2940 size = (end <= current->end ? end : current->end) - start; 2941 2942 switch(current->maptype) { 2943 case VM_MAPTYPE_SUBMAP: 2944 { 2945 vm_map_t smap; 2946 vm_map_entry_t tentry; 2947 vm_size_t tsize; 2948 2949 smap = current->ba.sub_map; 2950 vm_map_lock_read(smap); 2951 vm_map_lookup_entry(smap, offset, &tentry); 2952 if (tentry == NULL) { 2953 tsize = vm_map_max(smap) - offset; 2954 ba = NULL; 2955 offset = 0 + (offset - vm_map_min(smap)); 2956 } else { 2957 tsize = tentry->end - offset; 2958 ba = &tentry->ba; 2959 offset = tentry->ba.offset + 2960 (offset - tentry->start); 2961 } 2962 vm_map_unlock_read(smap); 2963 if (tsize < size) 2964 size = tsize; 2965 break; 2966 } 2967 case VM_MAPTYPE_NORMAL: 2968 case VM_MAPTYPE_VPAGETABLE: 2969 ba = ¤t->ba; 2970 break; 2971 default: 2972 ba = NULL; 2973 break; 2974 } 2975 if (ba) { 2976 object = ba->object; 2977 if (object) 2978 vm_object_hold(object); 2979 } else { 2980 object = NULL; 2981 } 2982 2983 /* 2984 * Note that there is absolutely no sense in writing out 2985 * anonymous objects, so we track down the vnode object 2986 * to write out. 2987 * We invalidate (remove) all pages from the address space 2988 * anyway, for semantic correctness. 2989 * 2990 * note: certain anonymous maps, such as MAP_NOSYNC maps, 2991 * may start out with a NULL object. 2992 * 2993 * XXX do we really want to stop at the first backing store 2994 * here if there are more? XXX 2995 */ 2996 if (ba) { 2997 vm_object_t tobj; 2998 2999 tobj = object; 3000 while (ba->backing_ba != NULL) { 3001 ba = ba->backing_ba; 3002 offset += ba->offset; 3003 tobj = ba->object; 3004 if (tobj->size < OFF_TO_IDX(offset + size)) 3005 size = IDX_TO_OFF(tobj->size) - offset; 3006 break; /* XXX this break is not correct */ 3007 } 3008 if (object != tobj) { 3009 if (object) 3010 vm_object_drop(object); 3011 object = tobj; 3012 vm_object_hold(object); 3013 } 3014 } 3015 3016 if (object && (object->type == OBJT_VNODE) && 3017 (current->protection & VM_PROT_WRITE) && 3018 (object->flags & OBJ_NOMSYNC) == 0) { 3019 /* 3020 * Flush pages if writing is allowed, invalidate them 3021 * if invalidation requested. Pages undergoing I/O 3022 * will be ignored by vm_object_page_remove(). 3023 * 3024 * We cannot lock the vnode and then wait for paging 3025 * to complete without deadlocking against vm_fault. 3026 * Instead we simply call vm_object_page_remove() and 3027 * allow it to block internally on a page-by-page 3028 * basis when it encounters pages undergoing async 3029 * I/O. 3030 */ 3031 int flags; 3032 3033 /* no chain wait needed for vnode objects */ 3034 vm_object_reference_locked(object); 3035 vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY); 3036 flags = (syncio || invalidate) ? OBJPC_SYNC : 0; 3037 flags |= invalidate ? OBJPC_INVAL : 0; 3038 3039 /* 3040 * When operating on a virtual page table just 3041 * flush the whole object. XXX we probably ought 3042 * to 3043 */ 3044 switch(current->maptype) { 3045 case VM_MAPTYPE_NORMAL: 3046 vm_object_page_clean(object, 3047 OFF_TO_IDX(offset), 3048 OFF_TO_IDX(offset + size + PAGE_MASK), 3049 flags); 3050 break; 3051 case VM_MAPTYPE_VPAGETABLE: 3052 vm_object_page_clean(object, 0, 0, flags); 3053 break; 3054 } 3055 vn_unlock(((struct vnode *)object->handle)); 3056 vm_object_deallocate_locked(object); 3057 } 3058 if (object && invalidate && 3059 ((object->type == OBJT_VNODE) || 3060 (object->type == OBJT_DEVICE) || 3061 (object->type == OBJT_MGTDEVICE))) { 3062 int clean_only = 3063 ((object->type == OBJT_DEVICE) || 3064 (object->type == OBJT_MGTDEVICE)) ? FALSE : TRUE; 3065 /* no chain wait needed for vnode/device objects */ 3066 vm_object_reference_locked(object); 3067 switch(current->maptype) { 3068 case VM_MAPTYPE_NORMAL: 3069 vm_object_page_remove(object, 3070 OFF_TO_IDX(offset), 3071 OFF_TO_IDX(offset + size + PAGE_MASK), 3072 clean_only); 3073 break; 3074 case VM_MAPTYPE_VPAGETABLE: 3075 vm_object_page_remove(object, 0, 0, clean_only); 3076 break; 3077 } 3078 vm_object_deallocate_locked(object); 3079 } 3080 start += size; 3081 if (object) 3082 vm_object_drop(object); 3083 current = vm_map_rb_tree_RB_NEXT(current); 3084 } 3085 3086 lwkt_reltoken(&map->token); 3087 vm_map_unlock_read(map); 3088 3089 return (KERN_SUCCESS); 3090 } 3091 3092 /* 3093 * Make the region specified by this entry pageable. 3094 * 3095 * The vm_map must be exclusively locked. 3096 */ 3097 static void 3098 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry) 3099 { 3100 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 3101 entry->wired_count = 0; 3102 vm_fault_unwire(map, entry); 3103 } 3104 3105 /* 3106 * Deallocate the given entry from the target map. 3107 * 3108 * The vm_map must be exclusively locked. 3109 */ 3110 static void 3111 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry, int *countp) 3112 { 3113 vm_map_entry_unlink(map, entry); 3114 map->size -= entry->end - entry->start; 3115 vm_map_entry_dispose(map, entry, countp); 3116 } 3117 3118 /* 3119 * Deallocates the given address range from the target map. 3120 * 3121 * The vm_map must be exclusively locked. 3122 */ 3123 int 3124 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end, int *countp) 3125 { 3126 vm_object_t object; 3127 vm_map_entry_t entry; 3128 vm_map_entry_t first_entry; 3129 vm_offset_t hole_start; 3130 3131 ASSERT_VM_MAP_LOCKED(map); 3132 lwkt_gettoken(&map->token); 3133 again: 3134 /* 3135 * Find the start of the region, and clip it. Set entry to point 3136 * at the first record containing the requested address or, if no 3137 * such record exists, the next record with a greater address. The 3138 * loop will run from this point until a record beyond the termination 3139 * address is encountered. 3140 * 3141 * Adjust freehint[] for either the clip case or the extension case. 3142 * 3143 * GGG see other GGG comment. 3144 */ 3145 if (vm_map_lookup_entry(map, start, &first_entry)) { 3146 entry = first_entry; 3147 vm_map_clip_start(map, entry, start, countp); 3148 hole_start = start; 3149 } else { 3150 if (first_entry) { 3151 entry = vm_map_rb_tree_RB_NEXT(first_entry); 3152 if (entry == NULL) 3153 hole_start = first_entry->start; 3154 else 3155 hole_start = first_entry->end; 3156 } else { 3157 entry = RB_MIN(vm_map_rb_tree, &map->rb_root); 3158 if (entry == NULL) 3159 hole_start = vm_map_min(map); 3160 else 3161 hole_start = vm_map_max(map); 3162 } 3163 } 3164 3165 /* 3166 * Step through all entries in this region 3167 */ 3168 while (entry && entry->start < end) { 3169 vm_map_entry_t next; 3170 vm_offset_t s, e; 3171 vm_pindex_t offidxstart, offidxend, count; 3172 3173 /* 3174 * If we hit an in-transition entry we have to sleep and 3175 * retry. It's easier (and not really slower) to just retry 3176 * since this case occurs so rarely and the hint is already 3177 * pointing at the right place. We have to reset the 3178 * start offset so as not to accidently delete an entry 3179 * another process just created in vacated space. 3180 */ 3181 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 3182 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 3183 start = entry->start; 3184 ++mycpu->gd_cnt.v_intrans_coll; 3185 ++mycpu->gd_cnt.v_intrans_wait; 3186 vm_map_transition_wait(map, 1); 3187 goto again; 3188 } 3189 vm_map_clip_end(map, entry, end, countp); 3190 3191 s = entry->start; 3192 e = entry->end; 3193 next = vm_map_rb_tree_RB_NEXT(entry); 3194 3195 offidxstart = OFF_TO_IDX(entry->ba.offset); 3196 count = OFF_TO_IDX(e - s); 3197 3198 switch(entry->maptype) { 3199 case VM_MAPTYPE_NORMAL: 3200 case VM_MAPTYPE_VPAGETABLE: 3201 case VM_MAPTYPE_SUBMAP: 3202 object = entry->ba.object; 3203 break; 3204 default: 3205 object = NULL; 3206 break; 3207 } 3208 3209 /* 3210 * Unwire before removing addresses from the pmap; otherwise, 3211 * unwiring will put the entries back in the pmap. 3212 * 3213 * Generally speaking, doing a bulk pmap_remove() before 3214 * removing the pages from the VM object is better at 3215 * reducing unnecessary IPIs. The pmap code is now optimized 3216 * to not blindly iterate the range when pt and pd pages 3217 * are missing. 3218 */ 3219 if (entry->wired_count != 0) 3220 vm_map_entry_unwire(map, entry); 3221 3222 offidxend = offidxstart + count; 3223 3224 if (object == &kernel_object) { 3225 pmap_remove(map->pmap, s, e); 3226 vm_object_hold(object); 3227 vm_object_page_remove(object, offidxstart, 3228 offidxend, FALSE); 3229 vm_object_drop(object); 3230 } else if (object && object->type != OBJT_DEFAULT && 3231 object->type != OBJT_SWAP) { 3232 /* 3233 * vnode object routines cannot be chain-locked, 3234 * but since we aren't removing pages from the 3235 * object here we can use a shared hold. 3236 */ 3237 vm_object_hold_shared(object); 3238 pmap_remove(map->pmap, s, e); 3239 vm_object_drop(object); 3240 } else if (object) { 3241 vm_object_hold(object); 3242 pmap_remove(map->pmap, s, e); 3243 3244 if (object != NULL && 3245 object->ref_count != 1 && 3246 (object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == 3247 OBJ_ONEMAPPING && 3248 (object->type == OBJT_DEFAULT || 3249 object->type == OBJT_SWAP)) { 3250 /* 3251 * When ONEMAPPING is set we can destroy the 3252 * pages underlying the entry's range. 3253 */ 3254 vm_object_page_remove(object, offidxstart, 3255 offidxend, FALSE); 3256 if (object->type == OBJT_SWAP) { 3257 swap_pager_freespace(object, 3258 offidxstart, 3259 count); 3260 } 3261 if (offidxend >= object->size && 3262 offidxstart < object->size) { 3263 object->size = offidxstart; 3264 } 3265 } 3266 vm_object_drop(object); 3267 } else if (entry->maptype == VM_MAPTYPE_UKSMAP) { 3268 pmap_remove(map->pmap, s, e); 3269 } 3270 3271 /* 3272 * Delete the entry (which may delete the object) only after 3273 * removing all pmap entries pointing to its pages. 3274 * (Otherwise, its page frames may be reallocated, and any 3275 * modify bits will be set in the wrong object!) 3276 */ 3277 vm_map_entry_delete(map, entry, countp); 3278 entry = next; 3279 } 3280 3281 /* 3282 * We either reached the end and use vm_map_max as the end 3283 * address, or we didn't and we use the next entry as the 3284 * end address. 3285 */ 3286 if (entry == NULL) { 3287 vm_map_freehint_hole(map, hole_start, 3288 vm_map_max(map) - hole_start); 3289 } else { 3290 vm_map_freehint_hole(map, hole_start, 3291 entry->start - hole_start); 3292 } 3293 3294 lwkt_reltoken(&map->token); 3295 3296 return (KERN_SUCCESS); 3297 } 3298 3299 /* 3300 * Remove the given address range from the target map. 3301 * This is the exported form of vm_map_delete. 3302 * 3303 * No requirements. 3304 */ 3305 int 3306 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end) 3307 { 3308 int result; 3309 int count; 3310 3311 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 3312 vm_map_lock(map); 3313 VM_MAP_RANGE_CHECK(map, start, end); 3314 result = vm_map_delete(map, start, end, &count); 3315 vm_map_unlock(map); 3316 vm_map_entry_release(count); 3317 3318 return (result); 3319 } 3320 3321 /* 3322 * Assert that the target map allows the specified privilege on the 3323 * entire address region given. The entire region must be allocated. 3324 * 3325 * The caller must specify whether the vm_map is already locked or not. 3326 */ 3327 boolean_t 3328 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end, 3329 vm_prot_t protection, boolean_t have_lock) 3330 { 3331 vm_map_entry_t entry; 3332 vm_map_entry_t tmp_entry; 3333 boolean_t result; 3334 3335 if (have_lock == FALSE) 3336 vm_map_lock_read(map); 3337 3338 if (!vm_map_lookup_entry(map, start, &tmp_entry)) { 3339 if (have_lock == FALSE) 3340 vm_map_unlock_read(map); 3341 return (FALSE); 3342 } 3343 entry = tmp_entry; 3344 3345 result = TRUE; 3346 while (start < end) { 3347 if (entry == NULL) { 3348 result = FALSE; 3349 break; 3350 } 3351 3352 /* 3353 * No holes allowed! 3354 */ 3355 3356 if (start < entry->start) { 3357 result = FALSE; 3358 break; 3359 } 3360 /* 3361 * Check protection associated with entry. 3362 */ 3363 3364 if ((entry->protection & protection) != protection) { 3365 result = FALSE; 3366 break; 3367 } 3368 /* go to next entry */ 3369 start = entry->end; 3370 entry = vm_map_rb_tree_RB_NEXT(entry); 3371 } 3372 if (have_lock == FALSE) 3373 vm_map_unlock_read(map); 3374 return (result); 3375 } 3376 3377 /* 3378 * vm_map_backing structures are not shared across forks. 3379 * 3380 * MAP_BACK_CLIPPED - Called as part of a clipping replication. 3381 * Do not clear OBJ_ONEMAPPING. 3382 * 3383 * MAP_BACK_BASEOBJREFD - Called from vm_map_insert(). The base object 3384 * has already been referenced. 3385 */ 3386 static 3387 void 3388 vm_map_backing_replicated(vm_map_t map, vm_map_entry_t entry, int flags) 3389 { 3390 vm_map_backing_t ba; 3391 vm_map_backing_t nba; 3392 vm_object_t object; 3393 3394 ba = &entry->ba; 3395 for (;;) { 3396 object = ba->object; 3397 ba->base_entry = entry; 3398 ba->refs = 1; 3399 if (object && 3400 (entry->maptype == VM_MAPTYPE_VPAGETABLE || 3401 entry->maptype == VM_MAPTYPE_NORMAL)) { 3402 if (ba != &entry->ba || 3403 (flags & MAP_BACK_BASEOBJREFD) == 0) { 3404 vm_object_reference_quick(object); 3405 } 3406 vm_map_backing_attach(ba); 3407 if ((flags & MAP_BACK_CLIPPED) == 0 && 3408 object->ref_count > 1) { 3409 vm_object_clear_flag(object, OBJ_ONEMAPPING); 3410 } 3411 } 3412 if (ba->backing_ba == NULL) 3413 break; 3414 nba = kmalloc(sizeof(*nba), M_MAP_BACKING, M_INTWAIT); 3415 *nba = *ba->backing_ba; 3416 ba->backing_ba = nba; 3417 ba = nba; 3418 } 3419 entry->ba.refs = 0; /* base entry refs is 0 */ 3420 } 3421 3422 /* 3423 * Handles the dirty work of making src_entry and dst_entry copy-on-write 3424 * after src_entry has been cloned to dst_entry. For normal entries only. 3425 * 3426 * The vm_maps must be exclusively locked. 3427 * The vm_map's token must be held. 3428 * 3429 * Because the maps are locked no faults can be in progress during the 3430 * operation. 3431 */ 3432 static void 3433 vm_map_copy_entry(vm_map_t src_map, vm_map_t dst_map, 3434 vm_map_entry_t src_entry, vm_map_entry_t dst_entry) 3435 { 3436 vm_object_t obj; 3437 3438 KKASSERT(dst_entry->maptype == VM_MAPTYPE_NORMAL || 3439 dst_entry->maptype == VM_MAPTYPE_VPAGETABLE); 3440 3441 if (src_entry->wired_count && 3442 src_entry->maptype != VM_MAPTYPE_VPAGETABLE) { 3443 /* 3444 * Of course, wired down pages can't be set copy-on-write. 3445 * Cause wired pages to be copied into the new map by 3446 * simulating faults (the new pages are pageable) 3447 * 3448 * Scrap ba.object (its ref-count has not yet been adjusted 3449 * so we can just NULL out the field). Remove the backing 3450 * store. 3451 * 3452 * Then call vm_fault_copy_entry() to create a new object 3453 * in dst_entry and copy the wired pages from src to dst. 3454 * 3455 * The fault-copy code doesn't work with virtual page 3456 * tables. 3457 */ 3458 if ((obj = dst_entry->ba.object) != NULL) { 3459 vm_map_backing_detach(&dst_entry->ba); 3460 dst_entry->ba.object = NULL; 3461 vm_map_entry_dispose_ba(dst_entry->ba.backing_ba); 3462 dst_entry->ba.backing_ba = NULL; 3463 dst_entry->ba.backing_count = 0; 3464 } 3465 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry); 3466 } else { 3467 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { 3468 /* 3469 * If the source entry is not already marked NEEDS_COPY 3470 * we need to write-protect the PTEs. 3471 */ 3472 pmap_protect(src_map->pmap, 3473 src_entry->start, 3474 src_entry->end, 3475 src_entry->protection & ~VM_PROT_WRITE); 3476 } 3477 3478 /* 3479 * dst_entry.ba_object might be stale. Update it (its 3480 * ref-count has not yet been updated so just overwrite 3481 * the field). 3482 * 3483 * If there is no object then we are golden. Also, in 3484 * this situation if there are no backing_ba linkages then 3485 * we can set ba.offset to 0 for debugging convenience. 3486 * 3487 * ba.offset cannot otherwise be modified because it effects 3488 * the offsets for the entire backing_ba chain. 3489 */ 3490 obj = src_entry->ba.object; 3491 3492 if (obj) { 3493 src_entry->eflags |= (MAP_ENTRY_COW | 3494 MAP_ENTRY_NEEDS_COPY); 3495 dst_entry->eflags |= (MAP_ENTRY_COW | 3496 MAP_ENTRY_NEEDS_COPY); 3497 KKASSERT(dst_entry->ba.offset == src_entry->ba.offset); 3498 } else { 3499 if (dst_entry->ba.backing_ba == NULL) 3500 dst_entry->ba.offset = 0; 3501 } 3502 3503 /* 3504 * Normal, allow the backing_ba link depth to 3505 * increase. 3506 */ 3507 pmap_copy(dst_map->pmap, src_map->pmap, 3508 dst_entry->start, 3509 dst_entry->end - dst_entry->start, 3510 src_entry->start); 3511 } 3512 } 3513 3514 /* 3515 * Create a vmspace for a new process and its related vm_map based on an 3516 * existing vmspace. The new map inherits information from the old map 3517 * according to inheritance settings. 3518 * 3519 * The source map must not be locked. 3520 * No requirements. 3521 */ 3522 static void vmspace_fork_normal_entry(vm_map_t old_map, vm_map_t new_map, 3523 vm_map_entry_t old_entry, int *countp); 3524 static void vmspace_fork_uksmap_entry(vm_map_t old_map, vm_map_t new_map, 3525 vm_map_entry_t old_entry, int *countp); 3526 3527 struct vmspace * 3528 vmspace_fork(struct vmspace *vm1) 3529 { 3530 struct vmspace *vm2; 3531 vm_map_t old_map = &vm1->vm_map; 3532 vm_map_t new_map; 3533 vm_map_entry_t old_entry; 3534 int count; 3535 3536 lwkt_gettoken(&vm1->vm_map.token); 3537 vm_map_lock(old_map); 3538 3539 vm2 = vmspace_alloc(vm_map_min(old_map), vm_map_max(old_map)); 3540 lwkt_gettoken(&vm2->vm_map.token); 3541 3542 /* 3543 * We must bump the timestamp to force any concurrent fault 3544 * to retry. 3545 */ 3546 bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy, 3547 (caddr_t)&vm1->vm_endcopy - (caddr_t)&vm1->vm_startcopy); 3548 new_map = &vm2->vm_map; /* XXX */ 3549 new_map->timestamp = 1; 3550 3551 vm_map_lock(new_map); 3552 3553 count = old_map->nentries; 3554 count = vm_map_entry_reserve(count + MAP_RESERVE_COUNT); 3555 3556 RB_FOREACH(old_entry, vm_map_rb_tree, &old_map->rb_root) { 3557 switch(old_entry->maptype) { 3558 case VM_MAPTYPE_SUBMAP: 3559 panic("vm_map_fork: encountered a submap"); 3560 break; 3561 case VM_MAPTYPE_UKSMAP: 3562 vmspace_fork_uksmap_entry(old_map, new_map, 3563 old_entry, &count); 3564 break; 3565 case VM_MAPTYPE_NORMAL: 3566 case VM_MAPTYPE_VPAGETABLE: 3567 vmspace_fork_normal_entry(old_map, new_map, 3568 old_entry, &count); 3569 break; 3570 } 3571 } 3572 3573 new_map->size = old_map->size; 3574 vm_map_unlock(new_map); 3575 vm_map_unlock(old_map); 3576 vm_map_entry_release(count); 3577 3578 lwkt_reltoken(&vm2->vm_map.token); 3579 lwkt_reltoken(&vm1->vm_map.token); 3580 3581 return (vm2); 3582 } 3583 3584 static 3585 void 3586 vmspace_fork_normal_entry(vm_map_t old_map, vm_map_t new_map, 3587 vm_map_entry_t old_entry, int *countp) 3588 { 3589 vm_map_entry_t new_entry; 3590 vm_map_backing_t ba; 3591 vm_object_t object; 3592 3593 /* 3594 * If the backing_ba link list gets too long then fault it 3595 * all into the head object and dispose of the list. We do 3596 * this in old_entry prior to cloning in order to benefit both 3597 * parent and child. 3598 * 3599 * We can test our fronting object's size against its 3600 * resident_page_count for a really cheap (but probably not perfect) 3601 * all-shadowed test, allowing us to disconnect the backing_ba 3602 * link list early. 3603 * 3604 * XXX Currently doesn't work for VPAGETABLEs (the entire object 3605 * would have to be copied). 3606 */ 3607 object = old_entry->ba.object; 3608 if (old_entry->ba.backing_ba && 3609 old_entry->maptype != VM_MAPTYPE_VPAGETABLE && 3610 (old_entry->ba.backing_count >= vm_map_backing_limit || 3611 (vm_map_backing_shadow_test && object && 3612 object->size == object->resident_page_count))) { 3613 /* 3614 * If there are too many backing_ba linkages we 3615 * collapse everything into the head 3616 * 3617 * This will also remove all the pte's. 3618 */ 3619 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) 3620 vm_map_entry_shadow(old_entry); 3621 if (object == NULL) 3622 vm_map_entry_allocate_object(old_entry); 3623 if (vm_fault_collapse(old_map, old_entry) == KERN_SUCCESS) { 3624 ba = old_entry->ba.backing_ba; 3625 old_entry->ba.backing_ba = NULL; 3626 old_entry->ba.backing_count = 0; 3627 vm_map_entry_dispose_ba(ba); 3628 } 3629 } 3630 object = NULL; /* object variable is now invalid */ 3631 3632 /* 3633 * Fork the entry 3634 */ 3635 switch (old_entry->inheritance) { 3636 case VM_INHERIT_NONE: 3637 break; 3638 case VM_INHERIT_SHARE: 3639 /* 3640 * Clone the entry as a shared entry. This will look like 3641 * shared memory across the old and the new process. We must 3642 * ensure that the object is allocated. 3643 */ 3644 if (old_entry->ba.object == NULL) 3645 vm_map_entry_allocate_object(old_entry); 3646 3647 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) { 3648 /* 3649 * Create the fronting vm_map_backing for 3650 * an entry which needs a copy, plus an extra 3651 * ref because we are going to duplicate it 3652 * in the fork. 3653 * 3654 * The call to vm_map_entry_shadow() will also clear 3655 * OBJ_ONEMAPPING. 3656 * 3657 * XXX no more collapse. Still need extra ref 3658 * for the fork. 3659 */ 3660 vm_map_entry_shadow(old_entry); 3661 } else if (old_entry->ba.object) { 3662 object = old_entry->ba.object; 3663 } 3664 3665 /* 3666 * Clone the entry. We've already bumped the ref on 3667 * the vm_object for our new entry. 3668 */ 3669 new_entry = vm_map_entry_create(countp); 3670 *new_entry = *old_entry; 3671 3672 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED; 3673 new_entry->wired_count = 0; 3674 new_entry->map = new_map; 3675 3676 /* 3677 * Replicate and index the vm_map_backing. Don't share 3678 * the vm_map_backing across vm_map's (only across clips). 3679 * 3680 * Insert the entry into the new map -- we know we're 3681 * inserting at the end of the new map. 3682 */ 3683 vm_map_backing_replicated(new_map, new_entry, 0); 3684 vm_map_entry_link(new_map, new_entry); 3685 3686 /* 3687 * Update the physical map 3688 */ 3689 pmap_copy(new_map->pmap, old_map->pmap, 3690 new_entry->start, 3691 (old_entry->end - old_entry->start), 3692 old_entry->start); 3693 break; 3694 case VM_INHERIT_COPY: 3695 /* 3696 * Clone the entry and link the copy into the new map. 3697 * 3698 * Note that ref-counting adjustment for old_entry->ba.object 3699 * (if it isn't a special map that is) is handled by 3700 * vm_map_copy_entry(). 3701 */ 3702 new_entry = vm_map_entry_create(countp); 3703 *new_entry = *old_entry; 3704 3705 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED; 3706 new_entry->wired_count = 0; 3707 new_entry->map = new_map; 3708 3709 vm_map_backing_replicated(new_map, new_entry, 0); 3710 vm_map_entry_link(new_map, new_entry); 3711 3712 /* 3713 * This does the actual dirty work of making both entries 3714 * copy-on-write, and will also handle the fronting object. 3715 */ 3716 vm_map_copy_entry(old_map, new_map, old_entry, new_entry); 3717 break; 3718 } 3719 } 3720 3721 /* 3722 * When forking user-kernel shared maps, the map might change in the 3723 * child so do not try to copy the underlying pmap entries. 3724 */ 3725 static 3726 void 3727 vmspace_fork_uksmap_entry(vm_map_t old_map, vm_map_t new_map, 3728 vm_map_entry_t old_entry, int *countp) 3729 { 3730 vm_map_entry_t new_entry; 3731 3732 new_entry = vm_map_entry_create(countp); 3733 *new_entry = *old_entry; 3734 3735 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED; 3736 new_entry->wired_count = 0; 3737 new_entry->map = new_map; 3738 KKASSERT(new_entry->ba.backing_ba == NULL); 3739 vm_map_backing_replicated(new_map, new_entry, 0); 3740 3741 vm_map_entry_link(new_map, new_entry); 3742 } 3743 3744 /* 3745 * Create an auto-grow stack entry 3746 * 3747 * No requirements. 3748 */ 3749 int 3750 vm_map_stack (vm_map_t map, vm_offset_t *addrbos, vm_size_t max_ssize, 3751 int flags, vm_prot_t prot, vm_prot_t max, int cow) 3752 { 3753 vm_map_entry_t prev_entry; 3754 vm_map_entry_t next; 3755 vm_size_t init_ssize; 3756 int rv; 3757 int count; 3758 vm_offset_t tmpaddr; 3759 3760 cow |= MAP_IS_STACK; 3761 3762 if (max_ssize < sgrowsiz) 3763 init_ssize = max_ssize; 3764 else 3765 init_ssize = sgrowsiz; 3766 3767 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 3768 vm_map_lock(map); 3769 3770 /* 3771 * Find space for the mapping 3772 */ 3773 if ((flags & (MAP_FIXED | MAP_TRYFIXED)) == 0) { 3774 if (vm_map_findspace(map, *addrbos, max_ssize, 1, 3775 flags, &tmpaddr)) { 3776 vm_map_unlock(map); 3777 vm_map_entry_release(count); 3778 return (KERN_NO_SPACE); 3779 } 3780 *addrbos = tmpaddr; 3781 } 3782 3783 /* If addr is already mapped, no go */ 3784 if (vm_map_lookup_entry(map, *addrbos, &prev_entry)) { 3785 vm_map_unlock(map); 3786 vm_map_entry_release(count); 3787 return (KERN_NO_SPACE); 3788 } 3789 3790 #if 0 3791 /* XXX already handled by kern_mmap() */ 3792 /* If we would blow our VMEM resource limit, no go */ 3793 if (map->size + init_ssize > 3794 curproc->p_rlimit[RLIMIT_VMEM].rlim_cur) { 3795 vm_map_unlock(map); 3796 vm_map_entry_release(count); 3797 return (KERN_NO_SPACE); 3798 } 3799 #endif 3800 3801 /* 3802 * If we can't accomodate max_ssize in the current mapping, 3803 * no go. However, we need to be aware that subsequent user 3804 * mappings might map into the space we have reserved for 3805 * stack, and currently this space is not protected. 3806 * 3807 * Hopefully we will at least detect this condition 3808 * when we try to grow the stack. 3809 */ 3810 if (prev_entry) 3811 next = vm_map_rb_tree_RB_NEXT(prev_entry); 3812 else 3813 next = RB_MIN(vm_map_rb_tree, &map->rb_root); 3814 3815 if (next && next->start < *addrbos + max_ssize) { 3816 vm_map_unlock(map); 3817 vm_map_entry_release(count); 3818 return (KERN_NO_SPACE); 3819 } 3820 3821 /* 3822 * We initially map a stack of only init_ssize. We will 3823 * grow as needed later. Since this is to be a grow 3824 * down stack, we map at the top of the range. 3825 * 3826 * Note: we would normally expect prot and max to be 3827 * VM_PROT_ALL, and cow to be 0. Possibly we should 3828 * eliminate these as input parameters, and just 3829 * pass these values here in the insert call. 3830 */ 3831 rv = vm_map_insert(map, &count, NULL, NULL, 3832 0, *addrbos + max_ssize - init_ssize, 3833 *addrbos + max_ssize, 3834 VM_MAPTYPE_NORMAL, 3835 VM_SUBSYS_STACK, prot, max, cow); 3836 3837 /* Now set the avail_ssize amount */ 3838 if (rv == KERN_SUCCESS) { 3839 if (prev_entry) 3840 next = vm_map_rb_tree_RB_NEXT(prev_entry); 3841 else 3842 next = RB_MIN(vm_map_rb_tree, &map->rb_root); 3843 if (prev_entry != NULL) { 3844 vm_map_clip_end(map, 3845 prev_entry, 3846 *addrbos + max_ssize - init_ssize, 3847 &count); 3848 } 3849 if (next->end != *addrbos + max_ssize || 3850 next->start != *addrbos + max_ssize - init_ssize){ 3851 panic ("Bad entry start/end for new stack entry"); 3852 } else { 3853 next->aux.avail_ssize = max_ssize - init_ssize; 3854 } 3855 } 3856 3857 vm_map_unlock(map); 3858 vm_map_entry_release(count); 3859 return (rv); 3860 } 3861 3862 /* 3863 * Attempts to grow a vm stack entry. Returns KERN_SUCCESS if the 3864 * desired address is already mapped, or if we successfully grow 3865 * the stack. Also returns KERN_SUCCESS if addr is outside the 3866 * stack range (this is strange, but preserves compatibility with 3867 * the grow function in vm_machdep.c). 3868 * 3869 * No requirements. 3870 */ 3871 int 3872 vm_map_growstack (vm_map_t map, vm_offset_t addr) 3873 { 3874 vm_map_entry_t prev_entry; 3875 vm_map_entry_t stack_entry; 3876 vm_map_entry_t next; 3877 struct vmspace *vm; 3878 struct lwp *lp; 3879 struct proc *p; 3880 vm_offset_t end; 3881 int grow_amount; 3882 int rv = KERN_SUCCESS; 3883 int is_procstack; 3884 int use_read_lock = 1; 3885 int count; 3886 3887 /* 3888 * Find the vm 3889 */ 3890 lp = curthread->td_lwp; 3891 p = curthread->td_proc; 3892 KKASSERT(lp != NULL); 3893 vm = lp->lwp_vmspace; 3894 3895 /* 3896 * Growstack is only allowed on the current process. We disallow 3897 * other use cases, e.g. trying to access memory via procfs that 3898 * the stack hasn't grown into. 3899 */ 3900 if (map != &vm->vm_map) { 3901 return KERN_FAILURE; 3902 } 3903 3904 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 3905 Retry: 3906 if (use_read_lock) 3907 vm_map_lock_read(map); 3908 else 3909 vm_map_lock(map); 3910 3911 /* 3912 * If addr is already in the entry range, no need to grow. 3913 * prev_entry returns NULL if addr is at the head. 3914 */ 3915 if (vm_map_lookup_entry(map, addr, &prev_entry)) 3916 goto done; 3917 if (prev_entry) 3918 stack_entry = vm_map_rb_tree_RB_NEXT(prev_entry); 3919 else 3920 stack_entry = RB_MIN(vm_map_rb_tree, &map->rb_root); 3921 3922 if (stack_entry == NULL) 3923 goto done; 3924 if (prev_entry == NULL) 3925 end = stack_entry->start - stack_entry->aux.avail_ssize; 3926 else 3927 end = prev_entry->end; 3928 3929 /* 3930 * This next test mimics the old grow function in vm_machdep.c. 3931 * It really doesn't quite make sense, but we do it anyway 3932 * for compatibility. 3933 * 3934 * If not growable stack, return success. This signals the 3935 * caller to proceed as he would normally with normal vm. 3936 */ 3937 if (stack_entry->aux.avail_ssize < 1 || 3938 addr >= stack_entry->start || 3939 addr < stack_entry->start - stack_entry->aux.avail_ssize) { 3940 goto done; 3941 } 3942 3943 /* Find the minimum grow amount */ 3944 grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE); 3945 if (grow_amount > stack_entry->aux.avail_ssize) { 3946 rv = KERN_NO_SPACE; 3947 goto done; 3948 } 3949 3950 /* 3951 * If there is no longer enough space between the entries 3952 * nogo, and adjust the available space. Note: this 3953 * should only happen if the user has mapped into the 3954 * stack area after the stack was created, and is 3955 * probably an error. 3956 * 3957 * This also effectively destroys any guard page the user 3958 * might have intended by limiting the stack size. 3959 */ 3960 if (grow_amount > stack_entry->start - end) { 3961 if (use_read_lock && vm_map_lock_upgrade(map)) { 3962 /* lost lock */ 3963 use_read_lock = 0; 3964 goto Retry; 3965 } 3966 use_read_lock = 0; 3967 stack_entry->aux.avail_ssize = stack_entry->start - end; 3968 rv = KERN_NO_SPACE; 3969 goto done; 3970 } 3971 3972 is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr; 3973 3974 /* If this is the main process stack, see if we're over the 3975 * stack limit. 3976 */ 3977 if (is_procstack && (vm->vm_ssize + grow_amount > 3978 p->p_rlimit[RLIMIT_STACK].rlim_cur)) { 3979 rv = KERN_NO_SPACE; 3980 goto done; 3981 } 3982 3983 /* Round up the grow amount modulo SGROWSIZ */ 3984 grow_amount = roundup (grow_amount, sgrowsiz); 3985 if (grow_amount > stack_entry->aux.avail_ssize) { 3986 grow_amount = stack_entry->aux.avail_ssize; 3987 } 3988 if (is_procstack && (vm->vm_ssize + grow_amount > 3989 p->p_rlimit[RLIMIT_STACK].rlim_cur)) { 3990 grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur - vm->vm_ssize; 3991 } 3992 3993 /* If we would blow our VMEM resource limit, no go */ 3994 if (map->size + grow_amount > p->p_rlimit[RLIMIT_VMEM].rlim_cur) { 3995 rv = KERN_NO_SPACE; 3996 goto done; 3997 } 3998 3999 if (use_read_lock && vm_map_lock_upgrade(map)) { 4000 /* lost lock */ 4001 use_read_lock = 0; 4002 goto Retry; 4003 } 4004 use_read_lock = 0; 4005 4006 /* Get the preliminary new entry start value */ 4007 addr = stack_entry->start - grow_amount; 4008 4009 /* If this puts us into the previous entry, cut back our growth 4010 * to the available space. Also, see the note above. 4011 */ 4012 if (addr < end) { 4013 stack_entry->aux.avail_ssize = stack_entry->start - end; 4014 addr = end; 4015 } 4016 4017 rv = vm_map_insert(map, &count, NULL, NULL, 4018 0, addr, stack_entry->start, 4019 VM_MAPTYPE_NORMAL, 4020 VM_SUBSYS_STACK, VM_PROT_ALL, VM_PROT_ALL, 0); 4021 4022 /* Adjust the available stack space by the amount we grew. */ 4023 if (rv == KERN_SUCCESS) { 4024 if (prev_entry) { 4025 vm_map_clip_end(map, prev_entry, addr, &count); 4026 next = vm_map_rb_tree_RB_NEXT(prev_entry); 4027 } else { 4028 next = RB_MIN(vm_map_rb_tree, &map->rb_root); 4029 } 4030 if (next->end != stack_entry->start || 4031 next->start != addr) { 4032 panic ("Bad stack grow start/end in new stack entry"); 4033 } else { 4034 next->aux.avail_ssize = 4035 stack_entry->aux.avail_ssize - 4036 (next->end - next->start); 4037 if (is_procstack) { 4038 vm->vm_ssize += next->end - 4039 next->start; 4040 } 4041 } 4042 4043 if (map->flags & MAP_WIREFUTURE) 4044 vm_map_unwire(map, next->start, next->end, FALSE); 4045 } 4046 4047 done: 4048 if (use_read_lock) 4049 vm_map_unlock_read(map); 4050 else 4051 vm_map_unlock(map); 4052 vm_map_entry_release(count); 4053 return (rv); 4054 } 4055 4056 /* 4057 * Unshare the specified VM space for exec. If other processes are 4058 * mapped to it, then create a new one. The new vmspace is null. 4059 * 4060 * No requirements. 4061 */ 4062 void 4063 vmspace_exec(struct proc *p, struct vmspace *vmcopy) 4064 { 4065 struct vmspace *oldvmspace = p->p_vmspace; 4066 struct vmspace *newvmspace; 4067 vm_map_t map = &p->p_vmspace->vm_map; 4068 4069 /* 4070 * If we are execing a resident vmspace we fork it, otherwise 4071 * we create a new vmspace. Note that exitingcnt is not 4072 * copied to the new vmspace. 4073 */ 4074 lwkt_gettoken(&oldvmspace->vm_map.token); 4075 if (vmcopy) { 4076 newvmspace = vmspace_fork(vmcopy); 4077 lwkt_gettoken(&newvmspace->vm_map.token); 4078 } else { 4079 newvmspace = vmspace_alloc(vm_map_min(map), vm_map_max(map)); 4080 lwkt_gettoken(&newvmspace->vm_map.token); 4081 bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy, 4082 (caddr_t)&oldvmspace->vm_endcopy - 4083 (caddr_t)&oldvmspace->vm_startcopy); 4084 } 4085 4086 /* 4087 * Finish initializing the vmspace before assigning it 4088 * to the process. The vmspace will become the current vmspace 4089 * if p == curproc. 4090 */ 4091 pmap_pinit2(vmspace_pmap(newvmspace)); 4092 pmap_replacevm(p, newvmspace, 0); 4093 lwkt_reltoken(&newvmspace->vm_map.token); 4094 lwkt_reltoken(&oldvmspace->vm_map.token); 4095 vmspace_rel(oldvmspace); 4096 } 4097 4098 /* 4099 * Unshare the specified VM space for forcing COW. This 4100 * is called by rfork, for the (RFMEM|RFPROC) == 0 case. 4101 */ 4102 void 4103 vmspace_unshare(struct proc *p) 4104 { 4105 struct vmspace *oldvmspace = p->p_vmspace; 4106 struct vmspace *newvmspace; 4107 4108 lwkt_gettoken(&oldvmspace->vm_map.token); 4109 if (vmspace_getrefs(oldvmspace) == 1) { 4110 lwkt_reltoken(&oldvmspace->vm_map.token); 4111 return; 4112 } 4113 newvmspace = vmspace_fork(oldvmspace); 4114 lwkt_gettoken(&newvmspace->vm_map.token); 4115 pmap_pinit2(vmspace_pmap(newvmspace)); 4116 pmap_replacevm(p, newvmspace, 0); 4117 lwkt_reltoken(&newvmspace->vm_map.token); 4118 lwkt_reltoken(&oldvmspace->vm_map.token); 4119 vmspace_rel(oldvmspace); 4120 } 4121 4122 /* 4123 * vm_map_hint: return the beginning of the best area suitable for 4124 * creating a new mapping with "prot" protection. 4125 * 4126 * No requirements. 4127 */ 4128 vm_offset_t 4129 vm_map_hint(struct proc *p, vm_offset_t addr, vm_prot_t prot) 4130 { 4131 struct vmspace *vms = p->p_vmspace; 4132 struct rlimit limit; 4133 rlim_t dsiz; 4134 4135 /* 4136 * Acquire datasize limit for mmap() operation, 4137 * calculate nearest power of 2. 4138 */ 4139 if (kern_getrlimit(RLIMIT_DATA, &limit)) 4140 limit.rlim_cur = maxdsiz; 4141 dsiz = limit.rlim_cur; 4142 4143 if (!randomize_mmap || addr != 0) { 4144 /* 4145 * Set a reasonable start point for the hint if it was 4146 * not specified or if it falls within the heap space. 4147 * Hinted mmap()s do not allocate out of the heap space. 4148 */ 4149 if (addr == 0 || 4150 (addr >= round_page((vm_offset_t)vms->vm_taddr) && 4151 addr < round_page((vm_offset_t)vms->vm_daddr + dsiz))) { 4152 addr = round_page((vm_offset_t)vms->vm_daddr + dsiz); 4153 } 4154 4155 return addr; 4156 } 4157 4158 /* 4159 * randomize_mmap && addr == 0. For now randomize the 4160 * address within a dsiz range beyond the data limit. 4161 */ 4162 addr = (vm_offset_t)vms->vm_daddr + dsiz; 4163 if (dsiz) 4164 addr += (karc4random64() & 0x7FFFFFFFFFFFFFFFLU) % dsiz; 4165 return (round_page(addr)); 4166 } 4167 4168 /* 4169 * Finds the VM object, offset, and protection for a given virtual address 4170 * in the specified map, assuming a page fault of the type specified. 4171 * 4172 * Leaves the map in question locked for read; return values are guaranteed 4173 * until a vm_map_lookup_done call is performed. Note that the map argument 4174 * is in/out; the returned map must be used in the call to vm_map_lookup_done. 4175 * 4176 * A handle (out_entry) is returned for use in vm_map_lookup_done, to make 4177 * that fast. 4178 * 4179 * If a lookup is requested with "write protection" specified, the map may 4180 * be changed to perform virtual copying operations, although the data 4181 * referenced will remain the same. 4182 * 4183 * No requirements. 4184 */ 4185 int 4186 vm_map_lookup(vm_map_t *var_map, /* IN/OUT */ 4187 vm_offset_t vaddr, 4188 vm_prot_t fault_typea, 4189 vm_map_entry_t *out_entry, /* OUT */ 4190 struct vm_map_backing **bap, /* OUT */ 4191 vm_pindex_t *pindex, /* OUT */ 4192 vm_prot_t *out_prot, /* OUT */ 4193 int *wflags) /* OUT */ 4194 { 4195 vm_map_entry_t entry; 4196 vm_map_t map = *var_map; 4197 vm_prot_t prot; 4198 vm_prot_t fault_type = fault_typea; 4199 int use_read_lock = 1; 4200 int rv = KERN_SUCCESS; 4201 int count; 4202 thread_t td = curthread; 4203 4204 /* 4205 * vm_map_entry_reserve() implements an important mitigation 4206 * against mmap() span running the kernel out of vm_map_entry 4207 * structures, but it can also cause an infinite call recursion. 4208 * Use td_nest_count to prevent an infinite recursion (allows 4209 * the vm_map code to dig into the pcpu vm_map_entry reserve). 4210 */ 4211 count = 0; 4212 if (td->td_nest_count == 0) { 4213 ++td->td_nest_count; 4214 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 4215 --td->td_nest_count; 4216 } 4217 RetryLookup: 4218 if (use_read_lock) 4219 vm_map_lock_read(map); 4220 else 4221 vm_map_lock(map); 4222 4223 /* 4224 * Always do a full lookup. The hint doesn't get us much anymore 4225 * now that the map is RB'd. 4226 */ 4227 cpu_ccfence(); 4228 *out_entry = NULL; 4229 *bap = NULL; 4230 4231 { 4232 vm_map_entry_t tmp_entry; 4233 4234 if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) { 4235 rv = KERN_INVALID_ADDRESS; 4236 goto done; 4237 } 4238 entry = tmp_entry; 4239 *out_entry = entry; 4240 } 4241 4242 /* 4243 * Handle submaps. 4244 */ 4245 if (entry->maptype == VM_MAPTYPE_SUBMAP) { 4246 vm_map_t old_map = map; 4247 4248 *var_map = map = entry->ba.sub_map; 4249 if (use_read_lock) 4250 vm_map_unlock_read(old_map); 4251 else 4252 vm_map_unlock(old_map); 4253 use_read_lock = 1; 4254 goto RetryLookup; 4255 } 4256 4257 /* 4258 * Check whether this task is allowed to have this page. 4259 * Note the special case for MAP_ENTRY_COW pages with an override. 4260 * This is to implement a forced COW for debuggers. 4261 */ 4262 if (fault_type & VM_PROT_OVERRIDE_WRITE) 4263 prot = entry->max_protection; 4264 else 4265 prot = entry->protection; 4266 4267 fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); 4268 if ((fault_type & prot) != fault_type) { 4269 rv = KERN_PROTECTION_FAILURE; 4270 goto done; 4271 } 4272 4273 if ((entry->eflags & MAP_ENTRY_USER_WIRED) && 4274 (entry->eflags & MAP_ENTRY_COW) && 4275 (fault_type & VM_PROT_WRITE) && 4276 (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) { 4277 rv = KERN_PROTECTION_FAILURE; 4278 goto done; 4279 } 4280 4281 /* 4282 * If this page is not pageable, we have to get it for all possible 4283 * accesses. 4284 */ 4285 *wflags = 0; 4286 if (entry->wired_count) { 4287 *wflags |= FW_WIRED; 4288 prot = fault_type = entry->protection; 4289 } 4290 4291 /* 4292 * Virtual page tables may need to update the accessed (A) bit 4293 * in a page table entry. Upgrade the fault to a write fault for 4294 * that case if the map will support it. If the map does not support 4295 * it the page table entry simply will not be updated. 4296 */ 4297 if (entry->maptype == VM_MAPTYPE_VPAGETABLE) { 4298 if (prot & VM_PROT_WRITE) 4299 fault_type |= VM_PROT_WRITE; 4300 } 4301 4302 if (curthread->td_lwp && curthread->td_lwp->lwp_vmspace && 4303 pmap_emulate_ad_bits(&curthread->td_lwp->lwp_vmspace->vm_pmap)) { 4304 if ((prot & VM_PROT_WRITE) == 0) 4305 fault_type |= VM_PROT_WRITE; 4306 } 4307 4308 /* 4309 * Only NORMAL and VPAGETABLE maps are object-based. UKSMAPs are not. 4310 */ 4311 if (entry->maptype != VM_MAPTYPE_NORMAL && 4312 entry->maptype != VM_MAPTYPE_VPAGETABLE) { 4313 *bap = NULL; 4314 goto skip; 4315 } 4316 4317 /* 4318 * If the entry was copy-on-write, we either ... 4319 */ 4320 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 4321 /* 4322 * If we want to write the page, we may as well handle that 4323 * now since we've got the map locked. 4324 * 4325 * If we don't need to write the page, we just demote the 4326 * permissions allowed. 4327 */ 4328 if (fault_type & VM_PROT_WRITE) { 4329 /* 4330 * Not allowed if TDF_NOFAULT is set as the shadowing 4331 * operation can deadlock against the faulting 4332 * function due to the copy-on-write. 4333 */ 4334 if (curthread->td_flags & TDF_NOFAULT) { 4335 rv = KERN_FAILURE_NOFAULT; 4336 goto done; 4337 } 4338 4339 /* 4340 * Make a new vm_map_backing + object, and place it 4341 * in the object chain. Note that no new references 4342 * have appeared -- one just moved from the map to 4343 * the new object. 4344 */ 4345 if (use_read_lock && vm_map_lock_upgrade(map)) { 4346 /* lost lock */ 4347 use_read_lock = 0; 4348 goto RetryLookup; 4349 } 4350 use_read_lock = 0; 4351 vm_map_entry_shadow(entry); 4352 *wflags |= FW_DIDCOW; 4353 } else { 4354 /* 4355 * We're attempting to read a copy-on-write page -- 4356 * don't allow writes. 4357 */ 4358 prot &= ~VM_PROT_WRITE; 4359 } 4360 } 4361 4362 /* 4363 * Create an object if necessary. This code also handles 4364 * partitioning large entries to improve vm_fault performance. 4365 */ 4366 if (entry->ba.object == NULL && !map->system_map) { 4367 if (use_read_lock && vm_map_lock_upgrade(map)) { 4368 /* lost lock */ 4369 use_read_lock = 0; 4370 goto RetryLookup; 4371 } 4372 use_read_lock = 0; 4373 4374 /* 4375 * Partition large entries, giving each its own VM object, 4376 * to improve concurrent fault performance. This is only 4377 * applicable to userspace. 4378 */ 4379 if (map != &kernel_map && 4380 entry->maptype == VM_MAPTYPE_NORMAL && 4381 ((entry->start ^ entry->end) & ~MAP_ENTRY_PARTITION_MASK) && 4382 vm_map_partition_enable) { 4383 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 4384 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 4385 ++mycpu->gd_cnt.v_intrans_coll; 4386 ++mycpu->gd_cnt.v_intrans_wait; 4387 vm_map_transition_wait(map, 0); 4388 goto RetryLookup; 4389 } 4390 vm_map_entry_partition(map, entry, vaddr, &count); 4391 } 4392 vm_map_entry_allocate_object(entry); 4393 } 4394 4395 /* 4396 * Return the object/offset from this entry. If the entry was 4397 * copy-on-write or empty, it has been fixed up. 4398 */ 4399 *bap = &entry->ba; 4400 4401 skip: 4402 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->ba.offset); 4403 4404 /* 4405 * Return whether this is the only map sharing this data. On 4406 * success we return with a read lock held on the map. On failure 4407 * we return with the map unlocked. 4408 */ 4409 *out_prot = prot; 4410 done: 4411 if (rv == KERN_SUCCESS) { 4412 if (use_read_lock == 0) 4413 vm_map_lock_downgrade(map); 4414 } else if (use_read_lock) { 4415 vm_map_unlock_read(map); 4416 } else { 4417 vm_map_unlock(map); 4418 } 4419 if (count > 0) 4420 vm_map_entry_release(count); 4421 4422 return (rv); 4423 } 4424 4425 /* 4426 * Releases locks acquired by a vm_map_lookup() 4427 * (according to the handle returned by that lookup). 4428 * 4429 * No other requirements. 4430 */ 4431 void 4432 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry, int count) 4433 { 4434 /* 4435 * Unlock the main-level map 4436 */ 4437 vm_map_unlock_read(map); 4438 if (count) 4439 vm_map_entry_release(count); 4440 } 4441 4442 static void 4443 vm_map_entry_partition(vm_map_t map, vm_map_entry_t entry, 4444 vm_offset_t vaddr, int *countp) 4445 { 4446 vaddr &= ~MAP_ENTRY_PARTITION_MASK; 4447 vm_map_clip_start(map, entry, vaddr, countp); 4448 vaddr += MAP_ENTRY_PARTITION_SIZE; 4449 vm_map_clip_end(map, entry, vaddr, countp); 4450 } 4451 4452 /* 4453 * Quick hack, needs some help to make it more SMP friendly. 4454 */ 4455 void 4456 vm_map_interlock(vm_map_t map, struct vm_map_ilock *ilock, 4457 vm_offset_t ran_beg, vm_offset_t ran_end) 4458 { 4459 struct vm_map_ilock *scan; 4460 4461 ilock->ran_beg = ran_beg; 4462 ilock->ran_end = ran_end; 4463 ilock->flags = 0; 4464 4465 spin_lock(&map->ilock_spin); 4466 restart: 4467 for (scan = map->ilock_base; scan; scan = scan->next) { 4468 if (ran_end > scan->ran_beg && ran_beg < scan->ran_end) { 4469 scan->flags |= ILOCK_WAITING; 4470 ssleep(scan, &map->ilock_spin, 0, "ilock", 0); 4471 goto restart; 4472 } 4473 } 4474 ilock->next = map->ilock_base; 4475 map->ilock_base = ilock; 4476 spin_unlock(&map->ilock_spin); 4477 } 4478 4479 void 4480 vm_map_deinterlock(vm_map_t map, struct vm_map_ilock *ilock) 4481 { 4482 struct vm_map_ilock *scan; 4483 struct vm_map_ilock **scanp; 4484 4485 spin_lock(&map->ilock_spin); 4486 scanp = &map->ilock_base; 4487 while ((scan = *scanp) != NULL) { 4488 if (scan == ilock) { 4489 *scanp = ilock->next; 4490 spin_unlock(&map->ilock_spin); 4491 if (ilock->flags & ILOCK_WAITING) 4492 wakeup(ilock); 4493 return; 4494 } 4495 scanp = &scan->next; 4496 } 4497 spin_unlock(&map->ilock_spin); 4498 panic("vm_map_deinterlock: missing ilock!"); 4499 } 4500 4501 #include "opt_ddb.h" 4502 #ifdef DDB 4503 #include <ddb/ddb.h> 4504 4505 /* 4506 * Debugging only 4507 */ 4508 DB_SHOW_COMMAND(map, vm_map_print) 4509 { 4510 static int nlines; 4511 /* XXX convert args. */ 4512 vm_map_t map = (vm_map_t)addr; 4513 boolean_t full = have_addr; 4514 4515 vm_map_entry_t entry; 4516 4517 db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n", 4518 (void *)map, 4519 (void *)map->pmap, map->nentries, map->timestamp); 4520 nlines++; 4521 4522 if (!full && db_indent) 4523 return; 4524 4525 db_indent += 2; 4526 RB_FOREACH(entry, vm_map_rb_tree, &map->rb_root) { 4527 db_iprintf("map entry %p: start=%p, end=%p\n", 4528 (void *)entry, (void *)entry->start, (void *)entry->end); 4529 nlines++; 4530 { 4531 static char *inheritance_name[4] = 4532 {"share", "copy", "none", "donate_copy"}; 4533 4534 db_iprintf(" prot=%x/%x/%s", 4535 entry->protection, 4536 entry->max_protection, 4537 inheritance_name[(int)(unsigned char) 4538 entry->inheritance]); 4539 if (entry->wired_count != 0) 4540 db_printf(", wired"); 4541 } 4542 switch(entry->maptype) { 4543 case VM_MAPTYPE_SUBMAP: 4544 /* XXX no %qd in kernel. Truncate entry->ba.offset. */ 4545 db_printf(", share=%p, offset=0x%lx\n", 4546 (void *)entry->ba.sub_map, 4547 (long)entry->ba.offset); 4548 nlines++; 4549 4550 db_indent += 2; 4551 vm_map_print((db_expr_t)(intptr_t)entry->ba.sub_map, 4552 full, 0, NULL); 4553 db_indent -= 2; 4554 break; 4555 case VM_MAPTYPE_NORMAL: 4556 case VM_MAPTYPE_VPAGETABLE: 4557 /* XXX no %qd in kernel. Truncate entry->ba.offset. */ 4558 db_printf(", object=%p, offset=0x%lx", 4559 (void *)entry->ba.object, 4560 (long)entry->ba.offset); 4561 if (entry->eflags & MAP_ENTRY_COW) 4562 db_printf(", copy (%s)", 4563 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); 4564 db_printf("\n"); 4565 nlines++; 4566 4567 if (entry->ba.object) { 4568 db_indent += 2; 4569 vm_object_print((db_expr_t)(intptr_t) 4570 entry->ba.object, 4571 full, 0, NULL); 4572 nlines += 4; 4573 db_indent -= 2; 4574 } 4575 break; 4576 case VM_MAPTYPE_UKSMAP: 4577 db_printf(", uksmap=%p, offset=0x%lx", 4578 (void *)entry->ba.uksmap, 4579 (long)entry->ba.offset); 4580 if (entry->eflags & MAP_ENTRY_COW) 4581 db_printf(", copy (%s)", 4582 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); 4583 db_printf("\n"); 4584 nlines++; 4585 break; 4586 default: 4587 break; 4588 } 4589 } 4590 db_indent -= 2; 4591 if (db_indent == 0) 4592 nlines = 0; 4593 } 4594 4595 /* 4596 * Debugging only 4597 */ 4598 DB_SHOW_COMMAND(procvm, procvm) 4599 { 4600 struct proc *p; 4601 4602 if (have_addr) { 4603 p = (struct proc *) addr; 4604 } else { 4605 p = curproc; 4606 } 4607 4608 db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n", 4609 (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map, 4610 (void *)vmspace_pmap(p->p_vmspace)); 4611 4612 vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL); 4613 } 4614 4615 #endif /* DDB */ 4616