1 /* $NetBSD: uvm_map.c,v 1.118 2002/03/08 20:48:47 thorpej Exp $ */ 2 3 /* 4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * Copyright (c) 1991, 1993, The Regents of the University of California. 6 * 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by Charles D. Cranor, 23 * Washington University, the University of California, Berkeley and 24 * its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * @(#)vm_map.c 8.3 (Berkeley) 1/12/94 42 * from: Id: uvm_map.c,v 1.1.2.27 1998/02/07 01:16:54 chs Exp 43 * 44 * 45 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 46 * All rights reserved. 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 */ 68 69 /* 70 * uvm_map.c: uvm map operations 71 */ 72 73 #include <sys/cdefs.h> 74 __KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.118 2002/03/08 20:48:47 thorpej Exp $"); 75 76 #include "opt_ddb.h" 77 #include "opt_uvmhist.h" 78 #include "opt_sysv.h" 79 80 #include <sys/param.h> 81 #include <sys/systm.h> 82 #include <sys/mman.h> 83 #include <sys/proc.h> 84 #include <sys/malloc.h> 85 #include <sys/pool.h> 86 #include <sys/kernel.h> 87 #include <sys/mount.h> 88 #include <sys/vnode.h> 89 90 #ifdef SYSVSHM 91 #include <sys/shm.h> 92 #endif 93 94 #define UVM_MAP 95 #include <uvm/uvm.h> 96 97 #ifdef DDB 98 #include <uvm/uvm_ddb.h> 99 #endif 100 101 extern struct vm_map *pager_map; 102 103 struct uvm_cnt uvm_map_call, map_backmerge, map_forwmerge; 104 struct uvm_cnt uvm_mlk_call, uvm_mlk_hint; 105 const char vmmapbsy[] = "vmmapbsy"; 106 107 /* 108 * pool for vmspace structures. 109 */ 110 111 struct pool uvm_vmspace_pool; 112 113 /* 114 * pool for dynamically-allocated map entries. 115 */ 116 117 struct pool uvm_map_entry_pool; 118 struct pool uvm_map_entry_kmem_pool; 119 120 #ifdef PMAP_GROWKERNEL 121 /* 122 * This global represents the end of the kernel virtual address 123 * space. If we want to exceed this, we must grow the kernel 124 * virtual address space dynamically. 125 * 126 * Note, this variable is locked by kernel_map's lock. 127 */ 128 vaddr_t uvm_maxkaddr; 129 #endif 130 131 /* 132 * macros 133 */ 134 135 /* 136 * uvm_map_entry_link: insert entry into a map 137 * 138 * => map must be locked 139 */ 140 #define uvm_map_entry_link(map, after_where, entry) do { \ 141 (map)->nentries++; \ 142 (entry)->prev = (after_where); \ 143 (entry)->next = (after_where)->next; \ 144 (entry)->prev->next = (entry); \ 145 (entry)->next->prev = (entry); \ 146 } while (0) 147 148 /* 149 * uvm_map_entry_unlink: remove entry from a map 150 * 151 * => map must be locked 152 */ 153 #define uvm_map_entry_unlink(map, entry) do { \ 154 (map)->nentries--; \ 155 (entry)->next->prev = (entry)->prev; \ 156 (entry)->prev->next = (entry)->next; \ 157 } while (0) 158 159 /* 160 * SAVE_HINT: saves the specified entry as the hint for future lookups. 161 * 162 * => map need not be locked (protected by hint_lock). 163 */ 164 #define SAVE_HINT(map,check,value) do { \ 165 simple_lock(&(map)->hint_lock); \ 166 if ((map)->hint == (check)) \ 167 (map)->hint = (value); \ 168 simple_unlock(&(map)->hint_lock); \ 169 } while (0) 170 171 /* 172 * VM_MAP_RANGE_CHECK: check and correct range 173 * 174 * => map must at least be read locked 175 */ 176 177 #define VM_MAP_RANGE_CHECK(map, start, end) do { \ 178 if (start < vm_map_min(map)) \ 179 start = vm_map_min(map); \ 180 if (end > vm_map_max(map)) \ 181 end = vm_map_max(map); \ 182 if (start > end) \ 183 start = end; \ 184 } while (0) 185 186 /* 187 * local prototypes 188 */ 189 190 static struct vm_map_entry *uvm_mapent_alloc __P((struct vm_map *)); 191 static void uvm_mapent_copy __P((struct vm_map_entry *, struct vm_map_entry *)); 192 static void uvm_mapent_free __P((struct vm_map_entry *)); 193 static void uvm_map_entry_unwire __P((struct vm_map *, struct vm_map_entry *)); 194 static void uvm_map_reference_amap __P((struct vm_map_entry *, int)); 195 static void uvm_map_unreference_amap __P((struct vm_map_entry *, int)); 196 197 /* 198 * local inlines 199 */ 200 201 /* 202 * uvm_mapent_alloc: allocate a map entry 203 */ 204 205 static __inline struct vm_map_entry * 206 uvm_mapent_alloc(map) 207 struct vm_map *map; 208 { 209 struct vm_map_entry *me; 210 int s; 211 UVMHIST_FUNC("uvm_mapent_alloc"); UVMHIST_CALLED(maphist); 212 213 if (map->flags & VM_MAP_INTRSAFE || cold) { 214 s = splvm(); 215 simple_lock(&uvm.kentry_lock); 216 me = uvm.kentry_free; 217 if (me) uvm.kentry_free = me->next; 218 simple_unlock(&uvm.kentry_lock); 219 splx(s); 220 if (me == NULL) { 221 panic("uvm_mapent_alloc: out of static map entries, " 222 "check MAX_KMAPENT (currently %d)", 223 MAX_KMAPENT); 224 } 225 me->flags = UVM_MAP_STATIC; 226 } else if (map == kernel_map) { 227 me = pool_get(&uvm_map_entry_kmem_pool, PR_WAITOK); 228 me->flags = UVM_MAP_KMEM; 229 } else { 230 me = pool_get(&uvm_map_entry_pool, PR_WAITOK); 231 me->flags = 0; 232 } 233 234 UVMHIST_LOG(maphist, "<- new entry=0x%x [kentry=%d]", me, 235 ((map->flags & VM_MAP_INTRSAFE) != 0 || map == kernel_map), 0, 0); 236 return(me); 237 } 238 239 /* 240 * uvm_mapent_free: free map entry 241 */ 242 243 static __inline void 244 uvm_mapent_free(me) 245 struct vm_map_entry *me; 246 { 247 int s; 248 UVMHIST_FUNC("uvm_mapent_free"); UVMHIST_CALLED(maphist); 249 250 UVMHIST_LOG(maphist,"<- freeing map entry=0x%x [flags=%d]", 251 me, me->flags, 0, 0); 252 if (me->flags & UVM_MAP_STATIC) { 253 s = splvm(); 254 simple_lock(&uvm.kentry_lock); 255 me->next = uvm.kentry_free; 256 uvm.kentry_free = me; 257 simple_unlock(&uvm.kentry_lock); 258 splx(s); 259 } else if (me->flags & UVM_MAP_KMEM) { 260 pool_put(&uvm_map_entry_kmem_pool, me); 261 } else { 262 pool_put(&uvm_map_entry_pool, me); 263 } 264 } 265 266 /* 267 * uvm_mapent_copy: copy a map entry, preserving flags 268 */ 269 270 static __inline void 271 uvm_mapent_copy(src, dst) 272 struct vm_map_entry *src; 273 struct vm_map_entry *dst; 274 { 275 memcpy(dst, src, ((char *)&src->uvm_map_entry_stop_copy) - 276 ((char *)src)); 277 } 278 279 /* 280 * uvm_map_entry_unwire: unwire a map entry 281 * 282 * => map should be locked by caller 283 */ 284 285 static __inline void 286 uvm_map_entry_unwire(map, entry) 287 struct vm_map *map; 288 struct vm_map_entry *entry; 289 { 290 entry->wired_count = 0; 291 uvm_fault_unwire_locked(map, entry->start, entry->end); 292 } 293 294 295 /* 296 * wrapper for calling amap_ref() 297 */ 298 static __inline void 299 uvm_map_reference_amap(entry, flags) 300 struct vm_map_entry *entry; 301 int flags; 302 { 303 amap_ref(entry->aref.ar_amap, entry->aref.ar_pageoff, 304 (entry->end - entry->start) >> PAGE_SHIFT, flags); 305 } 306 307 308 /* 309 * wrapper for calling amap_unref() 310 */ 311 static __inline void 312 uvm_map_unreference_amap(entry, flags) 313 struct vm_map_entry *entry; 314 int flags; 315 { 316 amap_unref(entry->aref.ar_amap, entry->aref.ar_pageoff, 317 (entry->end - entry->start) >> PAGE_SHIFT, flags); 318 } 319 320 321 /* 322 * uvm_map_init: init mapping system at boot time. note that we allocate 323 * and init the static pool of struct vm_map_entry *'s for the kernel here. 324 */ 325 326 void 327 uvm_map_init() 328 { 329 static struct vm_map_entry kernel_map_entry[MAX_KMAPENT]; 330 #if defined(UVMHIST) 331 static struct uvm_history_ent maphistbuf[100]; 332 static struct uvm_history_ent pdhistbuf[100]; 333 #endif 334 int lcv; 335 336 /* 337 * first, init logging system. 338 */ 339 340 UVMHIST_FUNC("uvm_map_init"); 341 UVMHIST_INIT_STATIC(maphist, maphistbuf); 342 UVMHIST_INIT_STATIC(pdhist, pdhistbuf); 343 UVMHIST_CALLED(maphist); 344 UVMHIST_LOG(maphist,"<starting uvm map system>", 0, 0, 0, 0); 345 UVMCNT_INIT(uvm_map_call, UVMCNT_CNT, 0, 346 "# uvm_map() successful calls", 0); 347 UVMCNT_INIT(map_backmerge, UVMCNT_CNT, 0, "# uvm_map() back merges", 0); 348 UVMCNT_INIT(map_forwmerge, UVMCNT_CNT, 0, "# uvm_map() missed forward", 349 0); 350 UVMCNT_INIT(uvm_mlk_call, UVMCNT_CNT, 0, "# map lookup calls", 0); 351 UVMCNT_INIT(uvm_mlk_hint, UVMCNT_CNT, 0, "# map lookup hint hits", 0); 352 353 /* 354 * now set up static pool of kernel map entrys ... 355 */ 356 357 simple_lock_init(&uvm.kentry_lock); 358 uvm.kentry_free = NULL; 359 for (lcv = 0 ; lcv < MAX_KMAPENT ; lcv++) { 360 kernel_map_entry[lcv].next = uvm.kentry_free; 361 uvm.kentry_free = &kernel_map_entry[lcv]; 362 } 363 364 /* 365 * initialize the map-related pools. 366 */ 367 pool_init(&uvm_vmspace_pool, sizeof(struct vmspace), 368 0, 0, 0, "vmsppl", &pool_allocator_nointr); 369 pool_init(&uvm_map_entry_pool, sizeof(struct vm_map_entry), 370 0, 0, 0, "vmmpepl", &pool_allocator_nointr); 371 pool_init(&uvm_map_entry_kmem_pool, sizeof(struct vm_map_entry), 372 0, 0, 0, "vmmpekpl", NULL); 373 } 374 375 /* 376 * clippers 377 */ 378 379 /* 380 * uvm_map_clip_start: ensure that the entry begins at or after 381 * the starting address, if it doesn't we split the entry. 382 * 383 * => caller should use UVM_MAP_CLIP_START macro rather than calling 384 * this directly 385 * => map must be locked by caller 386 */ 387 388 void 389 uvm_map_clip_start(map, entry, start) 390 struct vm_map *map; 391 struct vm_map_entry *entry; 392 vaddr_t start; 393 { 394 struct vm_map_entry *new_entry; 395 vaddr_t new_adj; 396 397 /* uvm_map_simplify_entry(map, entry); */ /* XXX */ 398 399 /* 400 * Split off the front portion. note that we must insert the new 401 * entry BEFORE this one, so that this entry has the specified 402 * starting address. 403 */ 404 405 new_entry = uvm_mapent_alloc(map); 406 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */ 407 408 new_entry->end = start; 409 new_adj = start - new_entry->start; 410 if (entry->object.uvm_obj) 411 entry->offset += new_adj; /* shift start over */ 412 entry->start = start; 413 414 if (new_entry->aref.ar_amap) { 415 amap_splitref(&new_entry->aref, &entry->aref, new_adj); 416 } 417 418 uvm_map_entry_link(map, entry->prev, new_entry); 419 420 if (UVM_ET_ISSUBMAP(entry)) { 421 /* ... unlikely to happen, but play it safe */ 422 uvm_map_reference(new_entry->object.sub_map); 423 } else { 424 if (UVM_ET_ISOBJ(entry) && 425 entry->object.uvm_obj->pgops && 426 entry->object.uvm_obj->pgops->pgo_reference) 427 entry->object.uvm_obj->pgops->pgo_reference( 428 entry->object.uvm_obj); 429 } 430 } 431 432 /* 433 * uvm_map_clip_end: ensure that the entry ends at or before 434 * the ending address, if it does't we split the reference 435 * 436 * => caller should use UVM_MAP_CLIP_END macro rather than calling 437 * this directly 438 * => map must be locked by caller 439 */ 440 441 void 442 uvm_map_clip_end(map, entry, end) 443 struct vm_map *map; 444 struct vm_map_entry *entry; 445 vaddr_t end; 446 { 447 struct vm_map_entry * new_entry; 448 vaddr_t new_adj; /* #bytes we move start forward */ 449 450 /* 451 * Create a new entry and insert it 452 * AFTER the specified entry 453 */ 454 455 new_entry = uvm_mapent_alloc(map); 456 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */ 457 458 new_entry->start = entry->end = end; 459 new_adj = end - entry->start; 460 if (new_entry->object.uvm_obj) 461 new_entry->offset += new_adj; 462 463 if (entry->aref.ar_amap) 464 amap_splitref(&entry->aref, &new_entry->aref, new_adj); 465 466 uvm_map_entry_link(map, entry, new_entry); 467 468 if (UVM_ET_ISSUBMAP(entry)) { 469 /* ... unlikely to happen, but play it safe */ 470 uvm_map_reference(new_entry->object.sub_map); 471 } else { 472 if (UVM_ET_ISOBJ(entry) && 473 entry->object.uvm_obj->pgops && 474 entry->object.uvm_obj->pgops->pgo_reference) 475 entry->object.uvm_obj->pgops->pgo_reference( 476 entry->object.uvm_obj); 477 } 478 } 479 480 481 /* 482 * M A P - m a i n e n t r y p o i n t 483 */ 484 /* 485 * uvm_map: establish a valid mapping in a map 486 * 487 * => assume startp is page aligned. 488 * => assume size is a multiple of PAGE_SIZE. 489 * => assume sys_mmap provides enough of a "hint" to have us skip 490 * over text/data/bss area. 491 * => map must be unlocked (we will lock it) 492 * => <uobj,uoffset> value meanings (4 cases): 493 * [1] <NULL,uoffset> == uoffset is a hint for PMAP_PREFER 494 * [2] <NULL,UVM_UNKNOWN_OFFSET> == don't PMAP_PREFER 495 * [3] <uobj,uoffset> == normal mapping 496 * [4] <uobj,UVM_UNKNOWN_OFFSET> == uvm_map finds offset based on VA 497 * 498 * case [4] is for kernel mappings where we don't know the offset until 499 * we've found a virtual address. note that kernel object offsets are 500 * always relative to vm_map_min(kernel_map). 501 * 502 * => if `align' is non-zero, we try to align the virtual address to 503 * the specified alignment. this is only a hint; if we can't 504 * do it, the address will be unaligned. this is provided as 505 * a mechanism for large pages. 506 * 507 * => XXXCDC: need way to map in external amap? 508 */ 509 510 int 511 uvm_map(map, startp, size, uobj, uoffset, align, flags) 512 struct vm_map *map; 513 vaddr_t *startp; /* IN/OUT */ 514 vsize_t size; 515 struct uvm_object *uobj; 516 voff_t uoffset; 517 vsize_t align; 518 uvm_flag_t flags; 519 { 520 struct vm_map_entry *prev_entry, *new_entry; 521 vm_prot_t prot = UVM_PROTECTION(flags), maxprot = 522 UVM_MAXPROTECTION(flags); 523 vm_inherit_t inherit = UVM_INHERIT(flags); 524 int advice = UVM_ADVICE(flags); 525 UVMHIST_FUNC("uvm_map"); 526 UVMHIST_CALLED(maphist); 527 528 UVMHIST_LOG(maphist, "(map=0x%x, *startp=0x%x, size=%d, flags=0x%x)", 529 map, *startp, size, flags); 530 UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0); 531 532 /* 533 * detect a popular device driver bug. 534 */ 535 536 KASSERT(curproc != NULL || map->flags & VM_MAP_INTRSAFE); 537 538 /* 539 * check sanity of protection code 540 */ 541 542 if ((prot & maxprot) != prot) { 543 UVMHIST_LOG(maphist, "<- prot. failure: prot=0x%x, max=0x%x", 544 prot, maxprot,0,0); 545 return EACCES; 546 } 547 548 /* 549 * for pager_map, allocate the new entry first to avoid sleeping 550 * for memory while we have the map locked. 551 */ 552 553 new_entry = NULL; 554 if (map == pager_map) { 555 new_entry = uvm_mapent_alloc(map); 556 } 557 558 /* 559 * figure out where to put new VM range 560 */ 561 562 if (vm_map_lock_try(map) == FALSE) { 563 if (flags & UVM_FLAG_TRYLOCK) { 564 if (new_entry) { 565 uvm_mapent_free(new_entry); 566 } 567 return EAGAIN; 568 } 569 vm_map_lock(map); /* could sleep here */ 570 } 571 if ((prev_entry = uvm_map_findspace(map, *startp, size, startp, 572 uobj, uoffset, align, flags)) == NULL) { 573 UVMHIST_LOG(maphist,"<- uvm_map_findspace failed!",0,0,0,0); 574 vm_map_unlock(map); 575 if (new_entry) { 576 uvm_mapent_free(new_entry); 577 } 578 return ENOMEM; 579 } 580 581 #ifdef PMAP_GROWKERNEL 582 { 583 /* 584 * If the kernel pmap can't map the requested space, 585 * then allocate more resources for it. 586 */ 587 if (map == kernel_map && uvm_maxkaddr < (*startp + size)) 588 uvm_maxkaddr = pmap_growkernel(*startp + size); 589 } 590 #endif 591 592 UVMCNT_INCR(uvm_map_call); 593 594 /* 595 * if uobj is null, then uoffset is either a VAC hint for PMAP_PREFER 596 * [typically from uvm_map_reserve] or it is UVM_UNKNOWN_OFFSET. in 597 * either case we want to zero it before storing it in the map entry 598 * (because it looks strange and confusing when debugging...) 599 * 600 * if uobj is not null 601 * if uoffset is not UVM_UNKNOWN_OFFSET then we have a normal mapping 602 * and we do not need to change uoffset. 603 * if uoffset is UVM_UNKNOWN_OFFSET then we need to find the offset 604 * now (based on the starting address of the map). this case is 605 * for kernel object mappings where we don't know the offset until 606 * the virtual address is found (with uvm_map_findspace). the 607 * offset is the distance we are from the start of the map. 608 */ 609 610 if (uobj == NULL) { 611 uoffset = 0; 612 } else { 613 if (uoffset == UVM_UNKNOWN_OFFSET) { 614 KASSERT(UVM_OBJ_IS_KERN_OBJECT(uobj)); 615 uoffset = *startp - vm_map_min(kernel_map); 616 } 617 } 618 619 /* 620 * try and insert in map by extending previous entry, if possible. 621 * XXX: we don't try and pull back the next entry. might be useful 622 * for a stack, but we are currently allocating our stack in advance. 623 */ 624 625 if ((flags & UVM_FLAG_NOMERGE) == 0 && 626 prev_entry->end == *startp && prev_entry != &map->header && 627 prev_entry->object.uvm_obj == uobj) { 628 629 if (uobj && prev_entry->offset + 630 (prev_entry->end - prev_entry->start) != uoffset) 631 goto nomerge; 632 633 if (UVM_ET_ISSUBMAP(prev_entry)) 634 goto nomerge; 635 636 if (prev_entry->protection != prot || 637 prev_entry->max_protection != maxprot) 638 goto nomerge; 639 640 if (prev_entry->inheritance != inherit || 641 prev_entry->advice != advice) 642 goto nomerge; 643 644 /* wiring status must match (new area is unwired) */ 645 if (VM_MAPENT_ISWIRED(prev_entry)) 646 goto nomerge; 647 648 /* 649 * can't extend a shared amap. note: no need to lock amap to 650 * look at refs since we don't care about its exact value. 651 * if it is one (i.e. we have only reference) it will stay there 652 */ 653 654 if (prev_entry->aref.ar_amap && 655 amap_refs(prev_entry->aref.ar_amap) != 1) { 656 goto nomerge; 657 } 658 659 /* got it! */ 660 661 UVMCNT_INCR(map_backmerge); 662 UVMHIST_LOG(maphist," starting back merge", 0, 0, 0, 0); 663 664 /* 665 * drop our reference to uobj since we are extending a reference 666 * that we already have (the ref count can not drop to zero). 667 */ 668 if (uobj && uobj->pgops->pgo_detach) 669 uobj->pgops->pgo_detach(uobj); 670 671 if (prev_entry->aref.ar_amap) { 672 amap_extend(prev_entry, size); 673 } 674 675 prev_entry->end += size; 676 map->size += size; 677 678 UVMHIST_LOG(maphist,"<- done (via backmerge)!", 0, 0, 0, 0); 679 vm_map_unlock(map); 680 if (new_entry) { 681 uvm_mapent_free(new_entry); 682 } 683 return 0; 684 } 685 686 nomerge: 687 UVMHIST_LOG(maphist," allocating new map entry", 0, 0, 0, 0); 688 689 /* 690 * check for possible forward merge (which we don't do) and count 691 * the number of times we missed a *possible* chance to merge more 692 */ 693 694 if ((flags & UVM_FLAG_NOMERGE) == 0 && 695 prev_entry->next != &map->header && 696 prev_entry->next->start == (*startp + size)) 697 UVMCNT_INCR(map_forwmerge); 698 699 /* 700 * allocate new entry and link it in. 701 */ 702 703 if (new_entry == NULL) { 704 new_entry = uvm_mapent_alloc(map); 705 } 706 new_entry->start = *startp; 707 new_entry->end = new_entry->start + size; 708 new_entry->object.uvm_obj = uobj; 709 new_entry->offset = uoffset; 710 711 if (uobj) 712 new_entry->etype = UVM_ET_OBJ; 713 else 714 new_entry->etype = 0; 715 716 if (flags & UVM_FLAG_COPYONW) { 717 new_entry->etype |= UVM_ET_COPYONWRITE; 718 if ((flags & UVM_FLAG_OVERLAY) == 0) 719 new_entry->etype |= UVM_ET_NEEDSCOPY; 720 } 721 722 new_entry->protection = prot; 723 new_entry->max_protection = maxprot; 724 new_entry->inheritance = inherit; 725 new_entry->wired_count = 0; 726 new_entry->advice = advice; 727 if (flags & UVM_FLAG_OVERLAY) { 728 729 /* 730 * to_add: for BSS we overallocate a little since we 731 * are likely to extend 732 */ 733 734 vaddr_t to_add = (flags & UVM_FLAG_AMAPPAD) ? 735 UVM_AMAP_CHUNK << PAGE_SHIFT : 0; 736 struct vm_amap *amap = amap_alloc(size, to_add, M_WAITOK); 737 new_entry->aref.ar_pageoff = 0; 738 new_entry->aref.ar_amap = amap; 739 } else { 740 new_entry->aref.ar_pageoff = 0; 741 new_entry->aref.ar_amap = NULL; 742 } 743 uvm_map_entry_link(map, prev_entry, new_entry); 744 map->size += size; 745 746 /* 747 * Update the free space hint 748 */ 749 750 if ((map->first_free == prev_entry) && 751 (prev_entry->end >= new_entry->start)) 752 map->first_free = new_entry; 753 754 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0); 755 vm_map_unlock(map); 756 return 0; 757 } 758 759 /* 760 * uvm_map_lookup_entry: find map entry at or before an address 761 * 762 * => map must at least be read-locked by caller 763 * => entry is returned in "entry" 764 * => return value is true if address is in the returned entry 765 */ 766 767 boolean_t 768 uvm_map_lookup_entry(map, address, entry) 769 struct vm_map *map; 770 vaddr_t address; 771 struct vm_map_entry **entry; /* OUT */ 772 { 773 struct vm_map_entry *cur; 774 struct vm_map_entry *last; 775 UVMHIST_FUNC("uvm_map_lookup_entry"); 776 UVMHIST_CALLED(maphist); 777 778 UVMHIST_LOG(maphist,"(map=0x%x,addr=0x%x,ent=0x%x)", 779 map, address, entry, 0); 780 781 /* 782 * start looking either from the head of the 783 * list, or from the hint. 784 */ 785 786 simple_lock(&map->hint_lock); 787 cur = map->hint; 788 simple_unlock(&map->hint_lock); 789 790 if (cur == &map->header) 791 cur = cur->next; 792 793 UVMCNT_INCR(uvm_mlk_call); 794 if (address >= cur->start) { 795 796 /* 797 * go from hint to end of list. 798 * 799 * but first, make a quick check to see if 800 * we are already looking at the entry we 801 * want (which is usually the case). 802 * note also that we don't need to save the hint 803 * here... it is the same hint (unless we are 804 * at the header, in which case the hint didn't 805 * buy us anything anyway). 806 */ 807 808 last = &map->header; 809 if ((cur != last) && (cur->end > address)) { 810 UVMCNT_INCR(uvm_mlk_hint); 811 *entry = cur; 812 UVMHIST_LOG(maphist,"<- got it via hint (0x%x)", 813 cur, 0, 0, 0); 814 return (TRUE); 815 } 816 } else { 817 818 /* 819 * go from start to hint, *inclusively* 820 */ 821 822 last = cur->next; 823 cur = map->header.next; 824 } 825 826 /* 827 * search linearly 828 */ 829 830 while (cur != last) { 831 if (cur->end > address) { 832 if (address >= cur->start) { 833 /* 834 * save this lookup for future 835 * hints, and return 836 */ 837 838 *entry = cur; 839 SAVE_HINT(map, map->hint, cur); 840 UVMHIST_LOG(maphist,"<- search got it (0x%x)", 841 cur, 0, 0, 0); 842 return (TRUE); 843 } 844 break; 845 } 846 cur = cur->next; 847 } 848 *entry = cur->prev; 849 SAVE_HINT(map, map->hint, *entry); 850 UVMHIST_LOG(maphist,"<- failed!",0,0,0,0); 851 return (FALSE); 852 } 853 854 /* 855 * uvm_map_findspace: find "length" sized space in "map". 856 * 857 * => "hint" is a hint about where we want it, unless FINDSPACE_FIXED is 858 * set (in which case we insist on using "hint"). 859 * => "result" is VA returned 860 * => uobj/uoffset are to be used to handle VAC alignment, if required 861 * => if `align' is non-zero, we attempt to align to that value. 862 * => caller must at least have read-locked map 863 * => returns NULL on failure, or pointer to prev. map entry if success 864 * => note this is a cross between the old vm_map_findspace and vm_map_find 865 */ 866 867 struct vm_map_entry * 868 uvm_map_findspace(map, hint, length, result, uobj, uoffset, align, flags) 869 struct vm_map *map; 870 vaddr_t hint; 871 vsize_t length; 872 vaddr_t *result; /* OUT */ 873 struct uvm_object *uobj; 874 voff_t uoffset; 875 vsize_t align; 876 int flags; 877 { 878 struct vm_map_entry *entry, *next, *tmp; 879 vaddr_t end, orig_hint; 880 UVMHIST_FUNC("uvm_map_findspace"); 881 UVMHIST_CALLED(maphist); 882 883 UVMHIST_LOG(maphist, "(map=0x%x, hint=0x%x, len=%d, flags=0x%x)", 884 map, hint, length, flags); 885 KASSERT((align & (align - 1)) == 0); 886 KASSERT((flags & UVM_FLAG_FIXED) == 0 || align == 0); 887 888 /* 889 * remember the original hint. if we are aligning, then we 890 * may have to try again with no alignment constraint if 891 * we fail the first time. 892 */ 893 894 orig_hint = hint; 895 if (hint < map->min_offset) { /* check ranges ... */ 896 if (flags & UVM_FLAG_FIXED) { 897 UVMHIST_LOG(maphist,"<- VA below map range",0,0,0,0); 898 return(NULL); 899 } 900 hint = map->min_offset; 901 } 902 if (hint > map->max_offset) { 903 UVMHIST_LOG(maphist,"<- VA 0x%x > range [0x%x->0x%x]", 904 hint, map->min_offset, map->max_offset, 0); 905 return(NULL); 906 } 907 908 /* 909 * Look for the first possible address; if there's already 910 * something at this address, we have to start after it. 911 */ 912 913 if ((flags & UVM_FLAG_FIXED) == 0 && hint == map->min_offset) { 914 if ((entry = map->first_free) != &map->header) 915 hint = entry->end; 916 } else { 917 if (uvm_map_lookup_entry(map, hint, &tmp)) { 918 /* "hint" address already in use ... */ 919 if (flags & UVM_FLAG_FIXED) { 920 UVMHIST_LOG(maphist,"<- fixed & VA in use", 921 0, 0, 0, 0); 922 return(NULL); 923 } 924 hint = tmp->end; 925 } 926 entry = tmp; 927 } 928 929 /* 930 * Look through the rest of the map, trying to fit a new region in 931 * the gap between existing regions, or after the very last region. 932 * note: entry->end = base VA of current gap, 933 * next->start = VA of end of current gap 934 */ 935 936 for (;; hint = (entry = next)->end) { 937 938 /* 939 * Find the end of the proposed new region. Be sure we didn't 940 * go beyond the end of the map, or wrap around the address; 941 * if so, we lose. Otherwise, if this is the last entry, or 942 * if the proposed new region fits before the next entry, we 943 * win. 944 */ 945 946 #ifdef PMAP_PREFER 947 /* 948 * push hint forward as needed to avoid VAC alias problems. 949 * we only do this if a valid offset is specified. 950 */ 951 952 if ((flags & UVM_FLAG_FIXED) == 0 && 953 uoffset != UVM_UNKNOWN_OFFSET) 954 PMAP_PREFER(uoffset, &hint); 955 #endif 956 if (align != 0) { 957 if ((hint & (align - 1)) != 0) 958 hint = roundup(hint, align); 959 /* 960 * XXX Should we PMAP_PREFER() here again? 961 */ 962 } 963 end = hint + length; 964 if (end > map->max_offset || end < hint) { 965 UVMHIST_LOG(maphist,"<- failed (off end)", 0,0,0,0); 966 if (align != 0) { 967 UVMHIST_LOG(maphist, 968 "calling recursively, no align", 969 0,0,0,0); 970 return (uvm_map_findspace(map, orig_hint, 971 length, result, uobj, uoffset, 0, flags)); 972 } 973 return (NULL); 974 } 975 next = entry->next; 976 if (next == &map->header || next->start >= end) 977 break; 978 if (flags & UVM_FLAG_FIXED) { 979 UVMHIST_LOG(maphist,"<- fixed mapping failed", 0,0,0,0); 980 return(NULL); /* only one shot at it ... */ 981 } 982 } 983 SAVE_HINT(map, map->hint, entry); 984 *result = hint; 985 UVMHIST_LOG(maphist,"<- got it! (result=0x%x)", hint, 0,0,0); 986 return (entry); 987 } 988 989 /* 990 * U N M A P - m a i n h e l p e r f u n c t i o n s 991 */ 992 993 /* 994 * uvm_unmap_remove: remove mappings from a vm_map (from "start" up to "stop") 995 * 996 * => caller must check alignment and size 997 * => map must be locked by caller 998 * => we return a list of map entries that we've remove from the map 999 * in "entry_list" 1000 */ 1001 1002 void 1003 uvm_unmap_remove(map, start, end, entry_list) 1004 struct vm_map *map; 1005 vaddr_t start, end; 1006 struct vm_map_entry **entry_list; /* OUT */ 1007 { 1008 struct vm_map_entry *entry, *first_entry, *next; 1009 vaddr_t len; 1010 UVMHIST_FUNC("uvm_unmap_remove"); UVMHIST_CALLED(maphist); 1011 1012 UVMHIST_LOG(maphist,"(map=0x%x, start=0x%x, end=0x%x)", 1013 map, start, end, 0); 1014 VM_MAP_RANGE_CHECK(map, start, end); 1015 1016 /* 1017 * find first entry 1018 */ 1019 1020 if (uvm_map_lookup_entry(map, start, &first_entry) == TRUE) { 1021 /* clip and go... */ 1022 entry = first_entry; 1023 UVM_MAP_CLIP_START(map, entry, start); 1024 /* critical! prevents stale hint */ 1025 SAVE_HINT(map, entry, entry->prev); 1026 } else { 1027 entry = first_entry->next; 1028 } 1029 1030 /* 1031 * Save the free space hint 1032 */ 1033 1034 if (map->first_free->start >= start) 1035 map->first_free = entry->prev; 1036 1037 /* 1038 * note: we now re-use first_entry for a different task. we remove 1039 * a number of map entries from the map and save them in a linked 1040 * list headed by "first_entry". once we remove them from the map 1041 * the caller should unlock the map and drop the references to the 1042 * backing objects [c.f. uvm_unmap_detach]. the object is to 1043 * separate unmapping from reference dropping. why? 1044 * [1] the map has to be locked for unmapping 1045 * [2] the map need not be locked for reference dropping 1046 * [3] dropping references may trigger pager I/O, and if we hit 1047 * a pager that does synchronous I/O we may have to wait for it. 1048 * [4] we would like all waiting for I/O to occur with maps unlocked 1049 * so that we don't block other threads. 1050 */ 1051 1052 first_entry = NULL; 1053 *entry_list = NULL; 1054 1055 /* 1056 * break up the area into map entry sized regions and unmap. note 1057 * that all mappings have to be removed before we can even consider 1058 * dropping references to amaps or VM objects (otherwise we could end 1059 * up with a mapping to a page on the free list which would be very bad) 1060 */ 1061 1062 while ((entry != &map->header) && (entry->start < end)) { 1063 UVM_MAP_CLIP_END(map, entry, end); 1064 next = entry->next; 1065 len = entry->end - entry->start; 1066 1067 /* 1068 * unwire before removing addresses from the pmap; otherwise 1069 * unwiring will put the entries back into the pmap (XXX). 1070 */ 1071 1072 if (VM_MAPENT_ISWIRED(entry)) { 1073 uvm_map_entry_unwire(map, entry); 1074 } 1075 if ((map->flags & VM_MAP_PAGEABLE) == 0) { 1076 1077 /* 1078 * if the map is non-pageable, any pages mapped there 1079 * must be wired and entered with pmap_kenter_pa(), 1080 * and we should free any such pages immediately. 1081 * this is mostly used for kmem_map and mb_map. 1082 */ 1083 1084 uvm_km_pgremove_intrsafe(entry->start, entry->end); 1085 pmap_kremove(entry->start, len); 1086 } else if (UVM_ET_ISOBJ(entry) && 1087 UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) { 1088 KASSERT(vm_map_pmap(map) == pmap_kernel()); 1089 1090 /* 1091 * note: kernel object mappings are currently used in 1092 * two ways: 1093 * [1] "normal" mappings of pages in the kernel object 1094 * [2] uvm_km_valloc'd allocations in which we 1095 * pmap_enter in some non-kernel-object page 1096 * (e.g. vmapbuf). 1097 * 1098 * for case [1], we need to remove the mapping from 1099 * the pmap and then remove the page from the kernel 1100 * object (because, once pages in a kernel object are 1101 * unmapped they are no longer needed, unlike, say, 1102 * a vnode where you might want the data to persist 1103 * until flushed out of a queue). 1104 * 1105 * for case [2], we need to remove the mapping from 1106 * the pmap. there shouldn't be any pages at the 1107 * specified offset in the kernel object [but it 1108 * doesn't hurt to call uvm_km_pgremove just to be 1109 * safe?] 1110 * 1111 * uvm_km_pgremove currently does the following: 1112 * for pages in the kernel object in range: 1113 * - drops the swap slot 1114 * - uvm_pagefree the page 1115 */ 1116 1117 /* 1118 * remove mappings from pmap and drop the pages 1119 * from the object. offsets are always relative 1120 * to vm_map_min(kernel_map). 1121 */ 1122 1123 pmap_remove(pmap_kernel(), entry->start, 1124 entry->start + len); 1125 uvm_km_pgremove(entry->object.uvm_obj, 1126 entry->start - vm_map_min(kernel_map), 1127 entry->end - vm_map_min(kernel_map)); 1128 1129 /* 1130 * null out kernel_object reference, we've just 1131 * dropped it 1132 */ 1133 1134 entry->etype &= ~UVM_ET_OBJ; 1135 entry->object.uvm_obj = NULL; 1136 } else if (UVM_ET_ISOBJ(entry) || entry->aref.ar_amap) { 1137 1138 /* 1139 * remove mappings the standard way. 1140 */ 1141 1142 pmap_remove(map->pmap, entry->start, entry->end); 1143 } 1144 1145 /* 1146 * remove entry from map and put it on our list of entries 1147 * that we've nuked. then go to next entry. 1148 */ 1149 1150 UVMHIST_LOG(maphist, " removed map entry 0x%x", entry, 0, 0,0); 1151 1152 /* critical! prevents stale hint */ 1153 SAVE_HINT(map, entry, entry->prev); 1154 1155 uvm_map_entry_unlink(map, entry); 1156 map->size -= len; 1157 entry->next = first_entry; 1158 first_entry = entry; 1159 entry = next; 1160 } 1161 pmap_update(vm_map_pmap(map)); 1162 1163 /* 1164 * now we've cleaned up the map and are ready for the caller to drop 1165 * references to the mapped objects. 1166 */ 1167 1168 *entry_list = first_entry; 1169 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0); 1170 } 1171 1172 /* 1173 * uvm_unmap_detach: drop references in a chain of map entries 1174 * 1175 * => we will free the map entries as we traverse the list. 1176 */ 1177 1178 void 1179 uvm_unmap_detach(first_entry, flags) 1180 struct vm_map_entry *first_entry; 1181 int flags; 1182 { 1183 struct vm_map_entry *next_entry; 1184 UVMHIST_FUNC("uvm_unmap_detach"); UVMHIST_CALLED(maphist); 1185 1186 while (first_entry) { 1187 KASSERT(!VM_MAPENT_ISWIRED(first_entry)); 1188 UVMHIST_LOG(maphist, 1189 " detach 0x%x: amap=0x%x, obj=0x%x, submap?=%d", 1190 first_entry, first_entry->aref.ar_amap, 1191 first_entry->object.uvm_obj, 1192 UVM_ET_ISSUBMAP(first_entry)); 1193 1194 /* 1195 * drop reference to amap, if we've got one 1196 */ 1197 1198 if (first_entry->aref.ar_amap) 1199 uvm_map_unreference_amap(first_entry, flags); 1200 1201 /* 1202 * drop reference to our backing object, if we've got one 1203 */ 1204 1205 if (UVM_ET_ISSUBMAP(first_entry)) { 1206 /* ... unlikely to happen, but play it safe */ 1207 uvm_map_deallocate(first_entry->object.sub_map); 1208 } else { 1209 if (UVM_ET_ISOBJ(first_entry) && 1210 first_entry->object.uvm_obj->pgops->pgo_detach) 1211 first_entry->object.uvm_obj->pgops-> 1212 pgo_detach(first_entry->object.uvm_obj); 1213 } 1214 next_entry = first_entry->next; 1215 uvm_mapent_free(first_entry); 1216 first_entry = next_entry; 1217 } 1218 UVMHIST_LOG(maphist, "<- done", 0,0,0,0); 1219 } 1220 1221 /* 1222 * E X T R A C T I O N F U N C T I O N S 1223 */ 1224 1225 /* 1226 * uvm_map_reserve: reserve space in a vm_map for future use. 1227 * 1228 * => we reserve space in a map by putting a dummy map entry in the 1229 * map (dummy means obj=NULL, amap=NULL, prot=VM_PROT_NONE) 1230 * => map should be unlocked (we will write lock it) 1231 * => we return true if we were able to reserve space 1232 * => XXXCDC: should be inline? 1233 */ 1234 1235 int 1236 uvm_map_reserve(map, size, offset, align, raddr) 1237 struct vm_map *map; 1238 vsize_t size; 1239 vaddr_t offset; /* hint for pmap_prefer */ 1240 vsize_t align; /* alignment hint */ 1241 vaddr_t *raddr; /* IN:hint, OUT: reserved VA */ 1242 { 1243 UVMHIST_FUNC("uvm_map_reserve"); UVMHIST_CALLED(maphist); 1244 1245 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x, offset=0x%x,addr=0x%x)", 1246 map,size,offset,raddr); 1247 1248 size = round_page(size); 1249 if (*raddr < vm_map_min(map)) 1250 *raddr = vm_map_min(map); /* hint */ 1251 1252 /* 1253 * reserve some virtual space. 1254 */ 1255 1256 if (uvm_map(map, raddr, size, NULL, offset, 0, 1257 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, 1258 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) { 1259 UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0); 1260 return (FALSE); 1261 } 1262 1263 UVMHIST_LOG(maphist, "<- done (*raddr=0x%x)", *raddr,0,0,0); 1264 return (TRUE); 1265 } 1266 1267 /* 1268 * uvm_map_replace: replace a reserved (blank) area of memory with 1269 * real mappings. 1270 * 1271 * => caller must WRITE-LOCK the map 1272 * => we return TRUE if replacement was a success 1273 * => we expect the newents chain to have nnewents entrys on it and 1274 * we expect newents->prev to point to the last entry on the list 1275 * => note newents is allowed to be NULL 1276 */ 1277 1278 int 1279 uvm_map_replace(map, start, end, newents, nnewents) 1280 struct vm_map *map; 1281 vaddr_t start, end; 1282 struct vm_map_entry *newents; 1283 int nnewents; 1284 { 1285 struct vm_map_entry *oldent, *last; 1286 1287 /* 1288 * first find the blank map entry at the specified address 1289 */ 1290 1291 if (!uvm_map_lookup_entry(map, start, &oldent)) { 1292 return(FALSE); 1293 } 1294 1295 /* 1296 * check to make sure we have a proper blank entry 1297 */ 1298 1299 if (oldent->start != start || oldent->end != end || 1300 oldent->object.uvm_obj != NULL || oldent->aref.ar_amap != NULL) { 1301 return (FALSE); 1302 } 1303 1304 #ifdef DIAGNOSTIC 1305 1306 /* 1307 * sanity check the newents chain 1308 */ 1309 1310 { 1311 struct vm_map_entry *tmpent = newents; 1312 int nent = 0; 1313 vaddr_t cur = start; 1314 1315 while (tmpent) { 1316 nent++; 1317 if (tmpent->start < cur) 1318 panic("uvm_map_replace1"); 1319 if (tmpent->start > tmpent->end || tmpent->end > end) { 1320 printf("tmpent->start=0x%lx, tmpent->end=0x%lx, end=0x%lx\n", 1321 tmpent->start, tmpent->end, end); 1322 panic("uvm_map_replace2"); 1323 } 1324 cur = tmpent->end; 1325 if (tmpent->next) { 1326 if (tmpent->next->prev != tmpent) 1327 panic("uvm_map_replace3"); 1328 } else { 1329 if (newents->prev != tmpent) 1330 panic("uvm_map_replace4"); 1331 } 1332 tmpent = tmpent->next; 1333 } 1334 if (nent != nnewents) 1335 panic("uvm_map_replace5"); 1336 } 1337 #endif 1338 1339 /* 1340 * map entry is a valid blank! replace it. (this does all the 1341 * work of map entry link/unlink...). 1342 */ 1343 1344 if (newents) { 1345 last = newents->prev; 1346 1347 /* critical: flush stale hints out of map */ 1348 SAVE_HINT(map, map->hint, newents); 1349 if (map->first_free == oldent) 1350 map->first_free = last; 1351 1352 last->next = oldent->next; 1353 last->next->prev = last; 1354 newents->prev = oldent->prev; 1355 newents->prev->next = newents; 1356 map->nentries = map->nentries + (nnewents - 1); 1357 1358 } else { 1359 1360 /* critical: flush stale hints out of map */ 1361 SAVE_HINT(map, map->hint, oldent->prev); 1362 if (map->first_free == oldent) 1363 map->first_free = oldent->prev; 1364 1365 /* NULL list of new entries: just remove the old one */ 1366 uvm_map_entry_unlink(map, oldent); 1367 } 1368 1369 1370 /* 1371 * now we can free the old blank entry, unlock the map and return. 1372 */ 1373 1374 uvm_mapent_free(oldent); 1375 return(TRUE); 1376 } 1377 1378 /* 1379 * uvm_map_extract: extract a mapping from a map and put it somewhere 1380 * (maybe removing the old mapping) 1381 * 1382 * => maps should be unlocked (we will write lock them) 1383 * => returns 0 on success, error code otherwise 1384 * => start must be page aligned 1385 * => len must be page sized 1386 * => flags: 1387 * UVM_EXTRACT_REMOVE: remove mappings from srcmap 1388 * UVM_EXTRACT_CONTIG: abort if unmapped area (advisory only) 1389 * UVM_EXTRACT_QREF: for a temporary extraction do quick obj refs 1390 * UVM_EXTRACT_FIXPROT: set prot to maxprot as we go 1391 * >>>NOTE: if you set REMOVE, you are not allowed to use CONTIG or QREF!<<< 1392 * >>>NOTE: QREF's must be unmapped via the QREF path, thus should only 1393 * be used from within the kernel in a kernel level map <<< 1394 */ 1395 1396 int 1397 uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags) 1398 struct vm_map *srcmap, *dstmap; 1399 vaddr_t start, *dstaddrp; 1400 vsize_t len; 1401 int flags; 1402 { 1403 vaddr_t dstaddr, end, newend, oldoffset, fudge, orig_fudge, 1404 oldstart; 1405 struct vm_map_entry *chain, *endchain, *entry, *orig_entry, *newentry, 1406 *deadentry, *oldentry; 1407 vsize_t elen; 1408 int nchain, error, copy_ok; 1409 UVMHIST_FUNC("uvm_map_extract"); UVMHIST_CALLED(maphist); 1410 1411 UVMHIST_LOG(maphist,"(srcmap=0x%x,start=0x%x, len=0x%x", srcmap, start, 1412 len,0); 1413 UVMHIST_LOG(maphist," ...,dstmap=0x%x, flags=0x%x)", dstmap,flags,0,0); 1414 1415 /* 1416 * step 0: sanity check: start must be on a page boundary, length 1417 * must be page sized. can't ask for CONTIG/QREF if you asked for 1418 * REMOVE. 1419 */ 1420 1421 KASSERT((start & PAGE_MASK) == 0 && (len & PAGE_MASK) == 0); 1422 KASSERT((flags & UVM_EXTRACT_REMOVE) == 0 || 1423 (flags & (UVM_EXTRACT_CONTIG|UVM_EXTRACT_QREF)) == 0); 1424 1425 /* 1426 * step 1: reserve space in the target map for the extracted area 1427 */ 1428 1429 dstaddr = vm_map_min(dstmap); 1430 if (uvm_map_reserve(dstmap, len, start, 0, &dstaddr) == FALSE) 1431 return(ENOMEM); 1432 *dstaddrp = dstaddr; /* pass address back to caller */ 1433 UVMHIST_LOG(maphist, " dstaddr=0x%x", dstaddr,0,0,0); 1434 1435 /* 1436 * step 2: setup for the extraction process loop by init'ing the 1437 * map entry chain, locking src map, and looking up the first useful 1438 * entry in the map. 1439 */ 1440 1441 end = start + len; 1442 newend = dstaddr + len; 1443 chain = endchain = NULL; 1444 nchain = 0; 1445 vm_map_lock(srcmap); 1446 1447 if (uvm_map_lookup_entry(srcmap, start, &entry)) { 1448 1449 /* "start" is within an entry */ 1450 if (flags & UVM_EXTRACT_QREF) { 1451 1452 /* 1453 * for quick references we don't clip the entry, so 1454 * the entry may map space "before" the starting 1455 * virtual address... this is the "fudge" factor 1456 * (which can be non-zero only the first time 1457 * through the "while" loop in step 3). 1458 */ 1459 1460 fudge = start - entry->start; 1461 } else { 1462 1463 /* 1464 * normal reference: we clip the map to fit (thus 1465 * fudge is zero) 1466 */ 1467 1468 UVM_MAP_CLIP_START(srcmap, entry, start); 1469 SAVE_HINT(srcmap, srcmap->hint, entry->prev); 1470 fudge = 0; 1471 } 1472 } else { 1473 1474 /* "start" is not within an entry ... skip to next entry */ 1475 if (flags & UVM_EXTRACT_CONTIG) { 1476 error = EINVAL; 1477 goto bad; /* definite hole here ... */ 1478 } 1479 1480 entry = entry->next; 1481 fudge = 0; 1482 } 1483 1484 /* save values from srcmap for step 6 */ 1485 orig_entry = entry; 1486 orig_fudge = fudge; 1487 1488 /* 1489 * step 3: now start looping through the map entries, extracting 1490 * as we go. 1491 */ 1492 1493 while (entry->start < end && entry != &srcmap->header) { 1494 1495 /* if we are not doing a quick reference, clip it */ 1496 if ((flags & UVM_EXTRACT_QREF) == 0) 1497 UVM_MAP_CLIP_END(srcmap, entry, end); 1498 1499 /* clear needs_copy (allow chunking) */ 1500 if (UVM_ET_ISNEEDSCOPY(entry)) { 1501 if (fudge) 1502 oldstart = entry->start; 1503 else 1504 oldstart = 0; /* XXX: gcc */ 1505 amap_copy(srcmap, entry, M_NOWAIT, TRUE, start, end); 1506 if (UVM_ET_ISNEEDSCOPY(entry)) { /* failed? */ 1507 error = ENOMEM; 1508 goto bad; 1509 } 1510 1511 /* amap_copy could clip (during chunk)! update fudge */ 1512 if (fudge) { 1513 fudge = fudge - (entry->start - oldstart); 1514 orig_fudge = fudge; 1515 } 1516 } 1517 1518 /* calculate the offset of this from "start" */ 1519 oldoffset = (entry->start + fudge) - start; 1520 1521 /* allocate a new map entry */ 1522 newentry = uvm_mapent_alloc(dstmap); 1523 if (newentry == NULL) { 1524 error = ENOMEM; 1525 goto bad; 1526 } 1527 1528 /* set up new map entry */ 1529 newentry->next = NULL; 1530 newentry->prev = endchain; 1531 newentry->start = dstaddr + oldoffset; 1532 newentry->end = 1533 newentry->start + (entry->end - (entry->start + fudge)); 1534 if (newentry->end > newend || newentry->end < newentry->start) 1535 newentry->end = newend; 1536 newentry->object.uvm_obj = entry->object.uvm_obj; 1537 if (newentry->object.uvm_obj) { 1538 if (newentry->object.uvm_obj->pgops->pgo_reference) 1539 newentry->object.uvm_obj->pgops-> 1540 pgo_reference(newentry->object.uvm_obj); 1541 newentry->offset = entry->offset + fudge; 1542 } else { 1543 newentry->offset = 0; 1544 } 1545 newentry->etype = entry->etype; 1546 newentry->protection = (flags & UVM_EXTRACT_FIXPROT) ? 1547 entry->max_protection : entry->protection; 1548 newentry->max_protection = entry->max_protection; 1549 newentry->inheritance = entry->inheritance; 1550 newentry->wired_count = 0; 1551 newentry->aref.ar_amap = entry->aref.ar_amap; 1552 if (newentry->aref.ar_amap) { 1553 newentry->aref.ar_pageoff = 1554 entry->aref.ar_pageoff + (fudge >> PAGE_SHIFT); 1555 uvm_map_reference_amap(newentry, AMAP_SHARED | 1556 ((flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0)); 1557 } else { 1558 newentry->aref.ar_pageoff = 0; 1559 } 1560 newentry->advice = entry->advice; 1561 1562 /* now link it on the chain */ 1563 nchain++; 1564 if (endchain == NULL) { 1565 chain = endchain = newentry; 1566 } else { 1567 endchain->next = newentry; 1568 endchain = newentry; 1569 } 1570 1571 /* end of 'while' loop! */ 1572 if ((flags & UVM_EXTRACT_CONTIG) && entry->end < end && 1573 (entry->next == &srcmap->header || 1574 entry->next->start != entry->end)) { 1575 error = EINVAL; 1576 goto bad; 1577 } 1578 entry = entry->next; 1579 fudge = 0; 1580 } 1581 1582 /* 1583 * step 4: close off chain (in format expected by uvm_map_replace) 1584 */ 1585 1586 if (chain) 1587 chain->prev = endchain; 1588 1589 /* 1590 * step 5: attempt to lock the dest map so we can pmap_copy. 1591 * note usage of copy_ok: 1592 * 1 => dstmap locked, pmap_copy ok, and we "replace" here (step 5) 1593 * 0 => dstmap unlocked, NO pmap_copy, and we will "replace" in step 7 1594 */ 1595 1596 if (srcmap == dstmap || vm_map_lock_try(dstmap) == TRUE) { 1597 copy_ok = 1; 1598 if (!uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain, 1599 nchain)) { 1600 if (srcmap != dstmap) 1601 vm_map_unlock(dstmap); 1602 error = EIO; 1603 goto bad; 1604 } 1605 } else { 1606 copy_ok = 0; 1607 /* replace defered until step 7 */ 1608 } 1609 1610 /* 1611 * step 6: traverse the srcmap a second time to do the following: 1612 * - if we got a lock on the dstmap do pmap_copy 1613 * - if UVM_EXTRACT_REMOVE remove the entries 1614 * we make use of orig_entry and orig_fudge (saved in step 2) 1615 */ 1616 1617 if (copy_ok || (flags & UVM_EXTRACT_REMOVE)) { 1618 1619 /* purge possible stale hints from srcmap */ 1620 if (flags & UVM_EXTRACT_REMOVE) { 1621 SAVE_HINT(srcmap, srcmap->hint, orig_entry->prev); 1622 if (srcmap->first_free->start >= start) 1623 srcmap->first_free = orig_entry->prev; 1624 } 1625 1626 entry = orig_entry; 1627 fudge = orig_fudge; 1628 deadentry = NULL; /* for UVM_EXTRACT_REMOVE */ 1629 1630 while (entry->start < end && entry != &srcmap->header) { 1631 if (copy_ok) { 1632 oldoffset = (entry->start + fudge) - start; 1633 elen = MIN(end, entry->end) - 1634 (entry->start + fudge); 1635 pmap_copy(dstmap->pmap, srcmap->pmap, 1636 dstaddr + oldoffset, elen, 1637 entry->start + fudge); 1638 } 1639 1640 /* we advance "entry" in the following if statement */ 1641 if (flags & UVM_EXTRACT_REMOVE) { 1642 pmap_remove(srcmap->pmap, entry->start, 1643 entry->end); 1644 oldentry = entry; /* save entry */ 1645 entry = entry->next; /* advance */ 1646 uvm_map_entry_unlink(srcmap, oldentry); 1647 /* add to dead list */ 1648 oldentry->next = deadentry; 1649 deadentry = oldentry; 1650 } else { 1651 entry = entry->next; /* advance */ 1652 } 1653 1654 /* end of 'while' loop */ 1655 fudge = 0; 1656 } 1657 pmap_update(srcmap->pmap); 1658 1659 /* 1660 * unlock dstmap. we will dispose of deadentry in 1661 * step 7 if needed 1662 */ 1663 1664 if (copy_ok && srcmap != dstmap) 1665 vm_map_unlock(dstmap); 1666 1667 } else { 1668 deadentry = NULL; 1669 } 1670 1671 /* 1672 * step 7: we are done with the source map, unlock. if copy_ok 1673 * is 0 then we have not replaced the dummy mapping in dstmap yet 1674 * and we need to do so now. 1675 */ 1676 1677 vm_map_unlock(srcmap); 1678 if ((flags & UVM_EXTRACT_REMOVE) && deadentry) 1679 uvm_unmap_detach(deadentry, 0); /* dispose of old entries */ 1680 1681 /* now do the replacement if we didn't do it in step 5 */ 1682 if (copy_ok == 0) { 1683 vm_map_lock(dstmap); 1684 error = uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain, 1685 nchain); 1686 vm_map_unlock(dstmap); 1687 1688 if (error == FALSE) { 1689 error = EIO; 1690 goto bad2; 1691 } 1692 } 1693 return(0); 1694 1695 /* 1696 * bad: failure recovery 1697 */ 1698 bad: 1699 vm_map_unlock(srcmap); 1700 bad2: /* src already unlocked */ 1701 if (chain) 1702 uvm_unmap_detach(chain, 1703 (flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0); 1704 uvm_unmap(dstmap, dstaddr, dstaddr+len); /* ??? */ 1705 return(error); 1706 } 1707 1708 /* end of extraction functions */ 1709 1710 /* 1711 * uvm_map_submap: punch down part of a map into a submap 1712 * 1713 * => only the kernel_map is allowed to be submapped 1714 * => the purpose of submapping is to break up the locking granularity 1715 * of a larger map 1716 * => the range specified must have been mapped previously with a uvm_map() 1717 * call [with uobj==NULL] to create a blank map entry in the main map. 1718 * [And it had better still be blank!] 1719 * => maps which contain submaps should never be copied or forked. 1720 * => to remove a submap, use uvm_unmap() on the main map 1721 * and then uvm_map_deallocate() the submap. 1722 * => main map must be unlocked. 1723 * => submap must have been init'd and have a zero reference count. 1724 * [need not be locked as we don't actually reference it] 1725 */ 1726 1727 int 1728 uvm_map_submap(map, start, end, submap) 1729 struct vm_map *map, *submap; 1730 vaddr_t start, end; 1731 { 1732 struct vm_map_entry *entry; 1733 int error; 1734 1735 vm_map_lock(map); 1736 VM_MAP_RANGE_CHECK(map, start, end); 1737 1738 if (uvm_map_lookup_entry(map, start, &entry)) { 1739 UVM_MAP_CLIP_START(map, entry, start); 1740 UVM_MAP_CLIP_END(map, entry, end); /* to be safe */ 1741 } else { 1742 entry = NULL; 1743 } 1744 1745 if (entry != NULL && 1746 entry->start == start && entry->end == end && 1747 entry->object.uvm_obj == NULL && entry->aref.ar_amap == NULL && 1748 !UVM_ET_ISCOPYONWRITE(entry) && !UVM_ET_ISNEEDSCOPY(entry)) { 1749 entry->etype |= UVM_ET_SUBMAP; 1750 entry->object.sub_map = submap; 1751 entry->offset = 0; 1752 uvm_map_reference(submap); 1753 error = 0; 1754 } else { 1755 error = EINVAL; 1756 } 1757 vm_map_unlock(map); 1758 return error; 1759 } 1760 1761 1762 /* 1763 * uvm_map_protect: change map protection 1764 * 1765 * => set_max means set max_protection. 1766 * => map must be unlocked. 1767 */ 1768 1769 #define MASK(entry) (UVM_ET_ISCOPYONWRITE(entry) ? \ 1770 ~VM_PROT_WRITE : VM_PROT_ALL) 1771 1772 int 1773 uvm_map_protect(map, start, end, new_prot, set_max) 1774 struct vm_map *map; 1775 vaddr_t start, end; 1776 vm_prot_t new_prot; 1777 boolean_t set_max; 1778 { 1779 struct vm_map_entry *current, *entry; 1780 int error = 0; 1781 UVMHIST_FUNC("uvm_map_protect"); UVMHIST_CALLED(maphist); 1782 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_prot=0x%x)", 1783 map, start, end, new_prot); 1784 1785 vm_map_lock(map); 1786 VM_MAP_RANGE_CHECK(map, start, end); 1787 if (uvm_map_lookup_entry(map, start, &entry)) { 1788 UVM_MAP_CLIP_START(map, entry, start); 1789 } else { 1790 entry = entry->next; 1791 } 1792 1793 /* 1794 * make a first pass to check for protection violations. 1795 */ 1796 1797 current = entry; 1798 while ((current != &map->header) && (current->start < end)) { 1799 if (UVM_ET_ISSUBMAP(current)) { 1800 error = EINVAL; 1801 goto out; 1802 } 1803 if ((new_prot & current->max_protection) != new_prot) { 1804 error = EACCES; 1805 goto out; 1806 } 1807 /* 1808 * Don't allow VM_PROT_EXECUTE to be set on entries that 1809 * point to vnodes that are associated with a NOEXEC file 1810 * system. 1811 */ 1812 if (UVM_ET_ISOBJ(current) && 1813 UVM_OBJ_IS_VNODE(current->object.uvm_obj)) { 1814 struct vnode *vp = 1815 (struct vnode *) current->object.uvm_obj; 1816 1817 if ((new_prot & VM_PROT_EXECUTE) != 0 && 1818 (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0) { 1819 error = EACCES; 1820 goto out; 1821 } 1822 } 1823 current = current->next; 1824 } 1825 1826 /* go back and fix up protections (no need to clip this time). */ 1827 1828 current = entry; 1829 while ((current != &map->header) && (current->start < end)) { 1830 vm_prot_t old_prot; 1831 1832 UVM_MAP_CLIP_END(map, current, end); 1833 old_prot = current->protection; 1834 if (set_max) 1835 current->protection = 1836 (current->max_protection = new_prot) & old_prot; 1837 else 1838 current->protection = new_prot; 1839 1840 /* 1841 * update physical map if necessary. worry about copy-on-write 1842 * here -- CHECK THIS XXX 1843 */ 1844 1845 if (current->protection != old_prot) { 1846 /* update pmap! */ 1847 pmap_protect(map->pmap, current->start, current->end, 1848 current->protection & MASK(entry)); 1849 1850 /* 1851 * If this entry points at a vnode, and the 1852 * protection includes VM_PROT_EXECUTE, mark 1853 * the vnode as VEXECMAP. 1854 */ 1855 if (UVM_ET_ISOBJ(current)) { 1856 struct uvm_object *uobj = 1857 current->object.uvm_obj; 1858 1859 if (UVM_OBJ_IS_VNODE(uobj) && 1860 (current->protection & VM_PROT_EXECUTE)) 1861 vn_markexec((struct vnode *) uobj); 1862 } 1863 } 1864 1865 /* 1866 * If the map is configured to lock any future mappings, 1867 * wire this entry now if the old protection was VM_PROT_NONE 1868 * and the new protection is not VM_PROT_NONE. 1869 */ 1870 1871 if ((map->flags & VM_MAP_WIREFUTURE) != 0 && 1872 VM_MAPENT_ISWIRED(entry) == 0 && 1873 old_prot == VM_PROT_NONE && 1874 new_prot != VM_PROT_NONE) { 1875 if (uvm_map_pageable(map, entry->start, 1876 entry->end, FALSE, 1877 UVM_LK_ENTER|UVM_LK_EXIT) != 0) { 1878 1879 /* 1880 * If locking the entry fails, remember the 1881 * error if it's the first one. Note we 1882 * still continue setting the protection in 1883 * the map, but will return the error 1884 * condition regardless. 1885 * 1886 * XXX Ignore what the actual error is, 1887 * XXX just call it a resource shortage 1888 * XXX so that it doesn't get confused 1889 * XXX what uvm_map_protect() itself would 1890 * XXX normally return. 1891 */ 1892 1893 error = ENOMEM; 1894 } 1895 } 1896 current = current->next; 1897 } 1898 pmap_update(map->pmap); 1899 1900 out: 1901 vm_map_unlock(map); 1902 UVMHIST_LOG(maphist, "<- done, error=%d",error,0,0,0); 1903 return error; 1904 } 1905 1906 #undef MASK 1907 1908 /* 1909 * uvm_map_inherit: set inheritance code for range of addrs in map. 1910 * 1911 * => map must be unlocked 1912 * => note that the inherit code is used during a "fork". see fork 1913 * code for details. 1914 */ 1915 1916 int 1917 uvm_map_inherit(map, start, end, new_inheritance) 1918 struct vm_map *map; 1919 vaddr_t start; 1920 vaddr_t end; 1921 vm_inherit_t new_inheritance; 1922 { 1923 struct vm_map_entry *entry, *temp_entry; 1924 UVMHIST_FUNC("uvm_map_inherit"); UVMHIST_CALLED(maphist); 1925 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_inh=0x%x)", 1926 map, start, end, new_inheritance); 1927 1928 switch (new_inheritance) { 1929 case MAP_INHERIT_NONE: 1930 case MAP_INHERIT_COPY: 1931 case MAP_INHERIT_SHARE: 1932 break; 1933 default: 1934 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0); 1935 return EINVAL; 1936 } 1937 1938 vm_map_lock(map); 1939 VM_MAP_RANGE_CHECK(map, start, end); 1940 if (uvm_map_lookup_entry(map, start, &temp_entry)) { 1941 entry = temp_entry; 1942 UVM_MAP_CLIP_START(map, entry, start); 1943 } else { 1944 entry = temp_entry->next; 1945 } 1946 while ((entry != &map->header) && (entry->start < end)) { 1947 UVM_MAP_CLIP_END(map, entry, end); 1948 entry->inheritance = new_inheritance; 1949 entry = entry->next; 1950 } 1951 vm_map_unlock(map); 1952 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0); 1953 return 0; 1954 } 1955 1956 /* 1957 * uvm_map_advice: set advice code for range of addrs in map. 1958 * 1959 * => map must be unlocked 1960 */ 1961 1962 int 1963 uvm_map_advice(map, start, end, new_advice) 1964 struct vm_map *map; 1965 vaddr_t start; 1966 vaddr_t end; 1967 int new_advice; 1968 { 1969 struct vm_map_entry *entry, *temp_entry; 1970 UVMHIST_FUNC("uvm_map_advice"); UVMHIST_CALLED(maphist); 1971 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_adv=0x%x)", 1972 map, start, end, new_advice); 1973 1974 vm_map_lock(map); 1975 VM_MAP_RANGE_CHECK(map, start, end); 1976 if (uvm_map_lookup_entry(map, start, &temp_entry)) { 1977 entry = temp_entry; 1978 UVM_MAP_CLIP_START(map, entry, start); 1979 } else { 1980 entry = temp_entry->next; 1981 } 1982 1983 /* 1984 * XXXJRT: disallow holes? 1985 */ 1986 1987 while ((entry != &map->header) && (entry->start < end)) { 1988 UVM_MAP_CLIP_END(map, entry, end); 1989 1990 switch (new_advice) { 1991 case MADV_NORMAL: 1992 case MADV_RANDOM: 1993 case MADV_SEQUENTIAL: 1994 /* nothing special here */ 1995 break; 1996 1997 default: 1998 vm_map_unlock(map); 1999 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0); 2000 return EINVAL; 2001 } 2002 entry->advice = new_advice; 2003 entry = entry->next; 2004 } 2005 2006 vm_map_unlock(map); 2007 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0); 2008 return 0; 2009 } 2010 2011 /* 2012 * uvm_map_pageable: sets the pageability of a range in a map. 2013 * 2014 * => wires map entries. should not be used for transient page locking. 2015 * for that, use uvm_fault_wire()/uvm_fault_unwire() (see uvm_vslock()). 2016 * => regions sepcified as not pageable require lock-down (wired) memory 2017 * and page tables. 2018 * => map must never be read-locked 2019 * => if islocked is TRUE, map is already write-locked 2020 * => we always unlock the map, since we must downgrade to a read-lock 2021 * to call uvm_fault_wire() 2022 * => XXXCDC: check this and try and clean it up. 2023 */ 2024 2025 int 2026 uvm_map_pageable(map, start, end, new_pageable, lockflags) 2027 struct vm_map *map; 2028 vaddr_t start, end; 2029 boolean_t new_pageable; 2030 int lockflags; 2031 { 2032 struct vm_map_entry *entry, *start_entry, *failed_entry; 2033 int rv; 2034 #ifdef DIAGNOSTIC 2035 u_int timestamp_save; 2036 #endif 2037 UVMHIST_FUNC("uvm_map_pageable"); UVMHIST_CALLED(maphist); 2038 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_pageable=0x%x)", 2039 map, start, end, new_pageable); 2040 KASSERT(map->flags & VM_MAP_PAGEABLE); 2041 2042 if ((lockflags & UVM_LK_ENTER) == 0) 2043 vm_map_lock(map); 2044 VM_MAP_RANGE_CHECK(map, start, end); 2045 2046 /* 2047 * only one pageability change may take place at one time, since 2048 * uvm_fault_wire assumes it will be called only once for each 2049 * wiring/unwiring. therefore, we have to make sure we're actually 2050 * changing the pageability for the entire region. we do so before 2051 * making any changes. 2052 */ 2053 2054 if (uvm_map_lookup_entry(map, start, &start_entry) == FALSE) { 2055 if ((lockflags & UVM_LK_EXIT) == 0) 2056 vm_map_unlock(map); 2057 2058 UVMHIST_LOG(maphist,"<- done (fault)",0,0,0,0); 2059 return EFAULT; 2060 } 2061 entry = start_entry; 2062 2063 /* 2064 * handle wiring and unwiring separately. 2065 */ 2066 2067 if (new_pageable) { /* unwire */ 2068 UVM_MAP_CLIP_START(map, entry, start); 2069 2070 /* 2071 * unwiring. first ensure that the range to be unwired is 2072 * really wired down and that there are no holes. 2073 */ 2074 2075 while ((entry != &map->header) && (entry->start < end)) { 2076 if (entry->wired_count == 0 || 2077 (entry->end < end && 2078 (entry->next == &map->header || 2079 entry->next->start > entry->end))) { 2080 if ((lockflags & UVM_LK_EXIT) == 0) 2081 vm_map_unlock(map); 2082 UVMHIST_LOG(maphist, "<- done (INVAL)",0,0,0,0); 2083 return EINVAL; 2084 } 2085 entry = entry->next; 2086 } 2087 2088 /* 2089 * POSIX 1003.1b - a single munlock call unlocks a region, 2090 * regardless of the number of mlock calls made on that 2091 * region. 2092 */ 2093 2094 entry = start_entry; 2095 while ((entry != &map->header) && (entry->start < end)) { 2096 UVM_MAP_CLIP_END(map, entry, end); 2097 if (VM_MAPENT_ISWIRED(entry)) 2098 uvm_map_entry_unwire(map, entry); 2099 entry = entry->next; 2100 } 2101 if ((lockflags & UVM_LK_EXIT) == 0) 2102 vm_map_unlock(map); 2103 UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0); 2104 return 0; 2105 } 2106 2107 /* 2108 * wire case: in two passes [XXXCDC: ugly block of code here] 2109 * 2110 * 1: holding the write lock, we create any anonymous maps that need 2111 * to be created. then we clip each map entry to the region to 2112 * be wired and increment its wiring count. 2113 * 2114 * 2: we downgrade to a read lock, and call uvm_fault_wire to fault 2115 * in the pages for any newly wired area (wired_count == 1). 2116 * 2117 * downgrading to a read lock for uvm_fault_wire avoids a possible 2118 * deadlock with another thread that may have faulted on one of 2119 * the pages to be wired (it would mark the page busy, blocking 2120 * us, then in turn block on the map lock that we hold). because 2121 * of problems in the recursive lock package, we cannot upgrade 2122 * to a write lock in vm_map_lookup. thus, any actions that 2123 * require the write lock must be done beforehand. because we 2124 * keep the read lock on the map, the copy-on-write status of the 2125 * entries we modify here cannot change. 2126 */ 2127 2128 while ((entry != &map->header) && (entry->start < end)) { 2129 if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */ 2130 2131 /* 2132 * perform actions of vm_map_lookup that need the 2133 * write lock on the map: create an anonymous map 2134 * for a copy-on-write region, or an anonymous map 2135 * for a zero-fill region. (XXXCDC: submap case 2136 * ok?) 2137 */ 2138 2139 if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */ 2140 if (UVM_ET_ISNEEDSCOPY(entry) && 2141 ((entry->max_protection & VM_PROT_WRITE) || 2142 (entry->object.uvm_obj == NULL))) { 2143 amap_copy(map, entry, M_WAITOK, TRUE, 2144 start, end); 2145 /* XXXCDC: wait OK? */ 2146 } 2147 } 2148 } 2149 UVM_MAP_CLIP_START(map, entry, start); 2150 UVM_MAP_CLIP_END(map, entry, end); 2151 entry->wired_count++; 2152 2153 /* 2154 * Check for holes 2155 */ 2156 2157 if (entry->protection == VM_PROT_NONE || 2158 (entry->end < end && 2159 (entry->next == &map->header || 2160 entry->next->start > entry->end))) { 2161 2162 /* 2163 * found one. amap creation actions do not need to 2164 * be undone, but the wired counts need to be restored. 2165 */ 2166 2167 while (entry != &map->header && entry->end > start) { 2168 entry->wired_count--; 2169 entry = entry->prev; 2170 } 2171 if ((lockflags & UVM_LK_EXIT) == 0) 2172 vm_map_unlock(map); 2173 UVMHIST_LOG(maphist,"<- done (INVALID WIRE)",0,0,0,0); 2174 return EINVAL; 2175 } 2176 entry = entry->next; 2177 } 2178 2179 /* 2180 * Pass 2. 2181 */ 2182 2183 #ifdef DIAGNOSTIC 2184 timestamp_save = map->timestamp; 2185 #endif 2186 vm_map_busy(map); 2187 vm_map_downgrade(map); 2188 2189 rv = 0; 2190 entry = start_entry; 2191 while (entry != &map->header && entry->start < end) { 2192 if (entry->wired_count == 1) { 2193 rv = uvm_fault_wire(map, entry->start, entry->end, 2194 VM_FAULT_WIREMAX, entry->max_protection); 2195 if (rv) { 2196 2197 /* 2198 * wiring failed. break out of the loop. 2199 * we'll clean up the map below, once we 2200 * have a write lock again. 2201 */ 2202 2203 break; 2204 } 2205 } 2206 entry = entry->next; 2207 } 2208 2209 if (rv) { /* failed? */ 2210 2211 /* 2212 * Get back to an exclusive (write) lock. 2213 */ 2214 2215 vm_map_upgrade(map); 2216 vm_map_unbusy(map); 2217 2218 #ifdef DIAGNOSTIC 2219 if (timestamp_save != map->timestamp) 2220 panic("uvm_map_pageable: stale map"); 2221 #endif 2222 2223 /* 2224 * first drop the wiring count on all the entries 2225 * which haven't actually been wired yet. 2226 */ 2227 2228 failed_entry = entry; 2229 while (entry != &map->header && entry->start < end) { 2230 entry->wired_count--; 2231 entry = entry->next; 2232 } 2233 2234 /* 2235 * now, unwire all the entries that were successfully 2236 * wired above. 2237 */ 2238 2239 entry = start_entry; 2240 while (entry != failed_entry) { 2241 entry->wired_count--; 2242 if (VM_MAPENT_ISWIRED(entry) == 0) 2243 uvm_map_entry_unwire(map, entry); 2244 entry = entry->next; 2245 } 2246 if ((lockflags & UVM_LK_EXIT) == 0) 2247 vm_map_unlock(map); 2248 UVMHIST_LOG(maphist, "<- done (RV=%d)", rv,0,0,0); 2249 return(rv); 2250 } 2251 2252 /* We are holding a read lock here. */ 2253 if ((lockflags & UVM_LK_EXIT) == 0) { 2254 vm_map_unbusy(map); 2255 vm_map_unlock_read(map); 2256 } else { 2257 2258 /* 2259 * Get back to an exclusive (write) lock. 2260 */ 2261 2262 vm_map_upgrade(map); 2263 vm_map_unbusy(map); 2264 } 2265 2266 UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0); 2267 return 0; 2268 } 2269 2270 /* 2271 * uvm_map_pageable_all: special case of uvm_map_pageable - affects 2272 * all mapped regions. 2273 * 2274 * => map must not be locked. 2275 * => if no flags are specified, all regions are unwired. 2276 * => XXXJRT: has some of the same problems as uvm_map_pageable() above. 2277 */ 2278 2279 int 2280 uvm_map_pageable_all(map, flags, limit) 2281 struct vm_map *map; 2282 int flags; 2283 vsize_t limit; 2284 { 2285 struct vm_map_entry *entry, *failed_entry; 2286 vsize_t size; 2287 int rv; 2288 #ifdef DIAGNOSTIC 2289 u_int timestamp_save; 2290 #endif 2291 UVMHIST_FUNC("uvm_map_pageable_all"); UVMHIST_CALLED(maphist); 2292 UVMHIST_LOG(maphist,"(map=0x%x,flags=0x%x)", map, flags, 0, 0); 2293 2294 KASSERT(map->flags & VM_MAP_PAGEABLE); 2295 2296 vm_map_lock(map); 2297 2298 /* 2299 * handle wiring and unwiring separately. 2300 */ 2301 2302 if (flags == 0) { /* unwire */ 2303 2304 /* 2305 * POSIX 1003.1b -- munlockall unlocks all regions, 2306 * regardless of how many times mlockall has been called. 2307 */ 2308 2309 for (entry = map->header.next; entry != &map->header; 2310 entry = entry->next) { 2311 if (VM_MAPENT_ISWIRED(entry)) 2312 uvm_map_entry_unwire(map, entry); 2313 } 2314 vm_map_modflags(map, 0, VM_MAP_WIREFUTURE); 2315 vm_map_unlock(map); 2316 UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0); 2317 return 0; 2318 } 2319 2320 if (flags & MCL_FUTURE) { 2321 2322 /* 2323 * must wire all future mappings; remember this. 2324 */ 2325 2326 vm_map_modflags(map, VM_MAP_WIREFUTURE, 0); 2327 } 2328 2329 if ((flags & MCL_CURRENT) == 0) { 2330 2331 /* 2332 * no more work to do! 2333 */ 2334 2335 UVMHIST_LOG(maphist,"<- done (OK no wire)",0,0,0,0); 2336 vm_map_unlock(map); 2337 return 0; 2338 } 2339 2340 /* 2341 * wire case: in three passes [XXXCDC: ugly block of code here] 2342 * 2343 * 1: holding the write lock, count all pages mapped by non-wired 2344 * entries. if this would cause us to go over our limit, we fail. 2345 * 2346 * 2: still holding the write lock, we create any anonymous maps that 2347 * need to be created. then we increment its wiring count. 2348 * 2349 * 3: we downgrade to a read lock, and call uvm_fault_wire to fault 2350 * in the pages for any newly wired area (wired_count == 1). 2351 * 2352 * downgrading to a read lock for uvm_fault_wire avoids a possible 2353 * deadlock with another thread that may have faulted on one of 2354 * the pages to be wired (it would mark the page busy, blocking 2355 * us, then in turn block on the map lock that we hold). because 2356 * of problems in the recursive lock package, we cannot upgrade 2357 * to a write lock in vm_map_lookup. thus, any actions that 2358 * require the write lock must be done beforehand. because we 2359 * keep the read lock on the map, the copy-on-write status of the 2360 * entries we modify here cannot change. 2361 */ 2362 2363 for (size = 0, entry = map->header.next; entry != &map->header; 2364 entry = entry->next) { 2365 if (entry->protection != VM_PROT_NONE && 2366 VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */ 2367 size += entry->end - entry->start; 2368 } 2369 } 2370 2371 if (atop(size) + uvmexp.wired > uvmexp.wiredmax) { 2372 vm_map_unlock(map); 2373 return ENOMEM; 2374 } 2375 2376 /* XXX non-pmap_wired_count case must be handled by caller */ 2377 #ifdef pmap_wired_count 2378 if (limit != 0 && 2379 (size + ptoa(pmap_wired_count(vm_map_pmap(map))) > limit)) { 2380 vm_map_unlock(map); 2381 return ENOMEM; 2382 } 2383 #endif 2384 2385 /* 2386 * Pass 2. 2387 */ 2388 2389 for (entry = map->header.next; entry != &map->header; 2390 entry = entry->next) { 2391 if (entry->protection == VM_PROT_NONE) 2392 continue; 2393 if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */ 2394 2395 /* 2396 * perform actions of vm_map_lookup that need the 2397 * write lock on the map: create an anonymous map 2398 * for a copy-on-write region, or an anonymous map 2399 * for a zero-fill region. (XXXCDC: submap case 2400 * ok?) 2401 */ 2402 2403 if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */ 2404 if (UVM_ET_ISNEEDSCOPY(entry) && 2405 ((entry->max_protection & VM_PROT_WRITE) || 2406 (entry->object.uvm_obj == NULL))) { 2407 amap_copy(map, entry, M_WAITOK, TRUE, 2408 entry->start, entry->end); 2409 /* XXXCDC: wait OK? */ 2410 } 2411 } 2412 } 2413 entry->wired_count++; 2414 } 2415 2416 /* 2417 * Pass 3. 2418 */ 2419 2420 #ifdef DIAGNOSTIC 2421 timestamp_save = map->timestamp; 2422 #endif 2423 vm_map_busy(map); 2424 vm_map_downgrade(map); 2425 2426 rv = 0; 2427 for (entry = map->header.next; entry != &map->header; 2428 entry = entry->next) { 2429 if (entry->wired_count == 1) { 2430 rv = uvm_fault_wire(map, entry->start, entry->end, 2431 VM_FAULT_WIREMAX, entry->max_protection); 2432 if (rv) { 2433 2434 /* 2435 * wiring failed. break out of the loop. 2436 * we'll clean up the map below, once we 2437 * have a write lock again. 2438 */ 2439 2440 break; 2441 } 2442 } 2443 } 2444 2445 if (rv) { 2446 2447 /* 2448 * Get back an exclusive (write) lock. 2449 */ 2450 2451 vm_map_upgrade(map); 2452 vm_map_unbusy(map); 2453 2454 #ifdef DIAGNOSTIC 2455 if (timestamp_save != map->timestamp) 2456 panic("uvm_map_pageable_all: stale map"); 2457 #endif 2458 2459 /* 2460 * first drop the wiring count on all the entries 2461 * which haven't actually been wired yet. 2462 * 2463 * Skip VM_PROT_NONE entries like we did above. 2464 */ 2465 2466 failed_entry = entry; 2467 for (/* nothing */; entry != &map->header; 2468 entry = entry->next) { 2469 if (entry->protection == VM_PROT_NONE) 2470 continue; 2471 entry->wired_count--; 2472 } 2473 2474 /* 2475 * now, unwire all the entries that were successfully 2476 * wired above. 2477 * 2478 * Skip VM_PROT_NONE entries like we did above. 2479 */ 2480 2481 for (entry = map->header.next; entry != failed_entry; 2482 entry = entry->next) { 2483 if (entry->protection == VM_PROT_NONE) 2484 continue; 2485 entry->wired_count--; 2486 if (VM_MAPENT_ISWIRED(entry)) 2487 uvm_map_entry_unwire(map, entry); 2488 } 2489 vm_map_unlock(map); 2490 UVMHIST_LOG(maphist,"<- done (RV=%d)", rv,0,0,0); 2491 return (rv); 2492 } 2493 2494 /* We are holding a read lock here. */ 2495 vm_map_unbusy(map); 2496 vm_map_unlock_read(map); 2497 2498 UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0); 2499 return 0; 2500 } 2501 2502 /* 2503 * uvm_map_clean: clean out a map range 2504 * 2505 * => valid flags: 2506 * if (flags & PGO_CLEANIT): dirty pages are cleaned first 2507 * if (flags & PGO_SYNCIO): dirty pages are written synchronously 2508 * if (flags & PGO_DEACTIVATE): any cached pages are deactivated after clean 2509 * if (flags & PGO_FREE): any cached pages are freed after clean 2510 * => returns an error if any part of the specified range isn't mapped 2511 * => never a need to flush amap layer since the anonymous memory has 2512 * no permanent home, but may deactivate pages there 2513 * => called from sys_msync() and sys_madvise() 2514 * => caller must not write-lock map (read OK). 2515 * => we may sleep while cleaning if SYNCIO [with map read-locked] 2516 */ 2517 2518 int 2519 uvm_map_clean(map, start, end, flags) 2520 struct vm_map *map; 2521 vaddr_t start, end; 2522 int flags; 2523 { 2524 struct vm_map_entry *current, *entry; 2525 struct uvm_object *uobj; 2526 struct vm_amap *amap; 2527 struct vm_anon *anon; 2528 struct vm_page *pg; 2529 vaddr_t offset; 2530 vsize_t size; 2531 int error, refs; 2532 UVMHIST_FUNC("uvm_map_clean"); UVMHIST_CALLED(maphist); 2533 2534 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,flags=0x%x)", 2535 map, start, end, flags); 2536 KASSERT((flags & (PGO_FREE|PGO_DEACTIVATE)) != 2537 (PGO_FREE|PGO_DEACTIVATE)); 2538 2539 vm_map_lock_read(map); 2540 VM_MAP_RANGE_CHECK(map, start, end); 2541 if (uvm_map_lookup_entry(map, start, &entry) == FALSE) { 2542 vm_map_unlock_read(map); 2543 return EFAULT; 2544 } 2545 2546 /* 2547 * Make a first pass to check for holes. 2548 */ 2549 2550 for (current = entry; current->start < end; current = current->next) { 2551 if (UVM_ET_ISSUBMAP(current)) { 2552 vm_map_unlock_read(map); 2553 return EINVAL; 2554 } 2555 if (end <= current->end) { 2556 break; 2557 } 2558 if (current->end != current->next->start) { 2559 vm_map_unlock_read(map); 2560 return EFAULT; 2561 } 2562 } 2563 2564 error = 0; 2565 for (current = entry; start < end; current = current->next) { 2566 amap = current->aref.ar_amap; /* top layer */ 2567 uobj = current->object.uvm_obj; /* bottom layer */ 2568 KASSERT(start >= current->start); 2569 2570 /* 2571 * No amap cleaning necessary if: 2572 * 2573 * (1) There's no amap. 2574 * 2575 * (2) We're not deactivating or freeing pages. 2576 */ 2577 2578 if (amap == NULL || (flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) 2579 goto flush_object; 2580 2581 amap_lock(amap); 2582 offset = start - current->start; 2583 size = MIN(end, current->end) - start; 2584 for ( ; size != 0; size -= PAGE_SIZE, offset += PAGE_SIZE) { 2585 anon = amap_lookup(¤t->aref, offset); 2586 if (anon == NULL) 2587 continue; 2588 2589 simple_lock(&anon->an_lock); 2590 pg = anon->u.an_page; 2591 if (pg == NULL) { 2592 simple_unlock(&anon->an_lock); 2593 continue; 2594 } 2595 2596 switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) { 2597 2598 /* 2599 * In these first 3 cases, we just deactivate the page. 2600 */ 2601 2602 case PGO_CLEANIT|PGO_FREE: 2603 case PGO_CLEANIT|PGO_DEACTIVATE: 2604 case PGO_DEACTIVATE: 2605 deactivate_it: 2606 /* 2607 * skip the page if it's loaned or wired, 2608 * since it shouldn't be on a paging queue 2609 * at all in these cases. 2610 */ 2611 2612 uvm_lock_pageq(); 2613 if (pg->loan_count != 0 || 2614 pg->wire_count != 0) { 2615 uvm_unlock_pageq(); 2616 simple_unlock(&anon->an_lock); 2617 continue; 2618 } 2619 KASSERT(pg->uanon == anon); 2620 pmap_clear_reference(pg); 2621 uvm_pagedeactivate(pg); 2622 uvm_unlock_pageq(); 2623 simple_unlock(&anon->an_lock); 2624 continue; 2625 2626 case PGO_FREE: 2627 2628 /* 2629 * If there are multiple references to 2630 * the amap, just deactivate the page. 2631 */ 2632 2633 if (amap_refs(amap) > 1) 2634 goto deactivate_it; 2635 2636 /* skip the page if it's wired */ 2637 if (pg->wire_count != 0) { 2638 simple_unlock(&anon->an_lock); 2639 continue; 2640 } 2641 amap_unadd(¤t->aref, offset); 2642 refs = --anon->an_ref; 2643 simple_unlock(&anon->an_lock); 2644 if (refs == 0) 2645 uvm_anfree(anon); 2646 continue; 2647 } 2648 } 2649 amap_unlock(amap); 2650 2651 flush_object: 2652 /* 2653 * flush pages if we've got a valid backing object. 2654 * note that we must always clean object pages before 2655 * freeing them since otherwise we could reveal stale 2656 * data from files. 2657 */ 2658 2659 offset = current->offset + (start - current->start); 2660 size = MIN(end, current->end) - start; 2661 if (uobj != NULL) { 2662 simple_lock(&uobj->vmobjlock); 2663 error = (uobj->pgops->pgo_put)(uobj, offset, 2664 offset + size, flags | PGO_CLEANIT); 2665 } 2666 start += size; 2667 } 2668 vm_map_unlock_read(map); 2669 return (error); 2670 } 2671 2672 2673 /* 2674 * uvm_map_checkprot: check protection in map 2675 * 2676 * => must allow specified protection in a fully allocated region. 2677 * => map must be read or write locked by caller. 2678 */ 2679 2680 boolean_t 2681 uvm_map_checkprot(map, start, end, protection) 2682 struct vm_map * map; 2683 vaddr_t start, end; 2684 vm_prot_t protection; 2685 { 2686 struct vm_map_entry *entry; 2687 struct vm_map_entry *tmp_entry; 2688 2689 if (!uvm_map_lookup_entry(map, start, &tmp_entry)) { 2690 return(FALSE); 2691 } 2692 entry = tmp_entry; 2693 while (start < end) { 2694 if (entry == &map->header) { 2695 return(FALSE); 2696 } 2697 2698 /* 2699 * no holes allowed 2700 */ 2701 2702 if (start < entry->start) { 2703 return(FALSE); 2704 } 2705 2706 /* 2707 * check protection associated with entry 2708 */ 2709 2710 if ((entry->protection & protection) != protection) { 2711 return(FALSE); 2712 } 2713 start = entry->end; 2714 entry = entry->next; 2715 } 2716 return(TRUE); 2717 } 2718 2719 /* 2720 * uvmspace_alloc: allocate a vmspace structure. 2721 * 2722 * - structure includes vm_map and pmap 2723 * - XXX: no locking on this structure 2724 * - refcnt set to 1, rest must be init'd by caller 2725 */ 2726 struct vmspace * 2727 uvmspace_alloc(min, max) 2728 vaddr_t min, max; 2729 { 2730 struct vmspace *vm; 2731 UVMHIST_FUNC("uvmspace_alloc"); UVMHIST_CALLED(maphist); 2732 2733 vm = pool_get(&uvm_vmspace_pool, PR_WAITOK); 2734 uvmspace_init(vm, NULL, min, max); 2735 UVMHIST_LOG(maphist,"<- done (vm=0x%x)", vm,0,0,0); 2736 return (vm); 2737 } 2738 2739 /* 2740 * uvmspace_init: initialize a vmspace structure. 2741 * 2742 * - XXX: no locking on this structure 2743 * - refcnt set to 1, rest must me init'd by caller 2744 */ 2745 void 2746 uvmspace_init(vm, pmap, min, max) 2747 struct vmspace *vm; 2748 struct pmap *pmap; 2749 vaddr_t min, max; 2750 { 2751 UVMHIST_FUNC("uvmspace_init"); UVMHIST_CALLED(maphist); 2752 2753 memset(vm, 0, sizeof(*vm)); 2754 uvm_map_setup(&vm->vm_map, min, max, VM_MAP_PAGEABLE); 2755 if (pmap) 2756 pmap_reference(pmap); 2757 else 2758 pmap = pmap_create(); 2759 vm->vm_map.pmap = pmap; 2760 vm->vm_refcnt = 1; 2761 UVMHIST_LOG(maphist,"<- done",0,0,0,0); 2762 } 2763 2764 /* 2765 * uvmspace_share: share a vmspace between two proceses 2766 * 2767 * - XXX: no locking on vmspace 2768 * - used for vfork, threads(?) 2769 */ 2770 2771 void 2772 uvmspace_share(p1, p2) 2773 struct proc *p1, *p2; 2774 { 2775 p2->p_vmspace = p1->p_vmspace; 2776 p1->p_vmspace->vm_refcnt++; 2777 } 2778 2779 /* 2780 * uvmspace_unshare: ensure that process "p" has its own, unshared, vmspace 2781 * 2782 * - XXX: no locking on vmspace 2783 */ 2784 2785 void 2786 uvmspace_unshare(p) 2787 struct proc *p; 2788 { 2789 struct vmspace *nvm, *ovm = p->p_vmspace; 2790 2791 if (ovm->vm_refcnt == 1) 2792 /* nothing to do: vmspace isn't shared in the first place */ 2793 return; 2794 2795 /* make a new vmspace, still holding old one */ 2796 nvm = uvmspace_fork(ovm); 2797 2798 pmap_deactivate(p); /* unbind old vmspace */ 2799 p->p_vmspace = nvm; 2800 pmap_activate(p); /* switch to new vmspace */ 2801 2802 uvmspace_free(ovm); /* drop reference to old vmspace */ 2803 } 2804 2805 /* 2806 * uvmspace_exec: the process wants to exec a new program 2807 * 2808 * - XXX: no locking on vmspace 2809 */ 2810 2811 void 2812 uvmspace_exec(p, start, end) 2813 struct proc *p; 2814 vaddr_t start, end; 2815 { 2816 struct vmspace *nvm, *ovm = p->p_vmspace; 2817 struct vm_map *map = &ovm->vm_map; 2818 2819 #ifdef __sparc__ 2820 /* XXX cgd 960926: the sparc #ifdef should be a MD hook */ 2821 kill_user_windows(p); /* before stack addresses go away */ 2822 #endif 2823 2824 /* 2825 * see if more than one process is using this vmspace... 2826 */ 2827 2828 if (ovm->vm_refcnt == 1) { 2829 2830 /* 2831 * if p is the only process using its vmspace then we can safely 2832 * recycle that vmspace for the program that is being exec'd. 2833 */ 2834 2835 #ifdef SYSVSHM 2836 /* 2837 * SYSV SHM semantics require us to kill all segments on an exec 2838 */ 2839 2840 if (ovm->vm_shm) 2841 shmexit(ovm); 2842 #endif 2843 2844 /* 2845 * POSIX 1003.1b -- "lock future mappings" is revoked 2846 * when a process execs another program image. 2847 */ 2848 2849 vm_map_lock(map); 2850 vm_map_modflags(map, 0, VM_MAP_WIREFUTURE); 2851 vm_map_unlock(map); 2852 2853 /* 2854 * now unmap the old program 2855 */ 2856 2857 uvm_unmap(map, map->min_offset, map->max_offset); 2858 2859 /* 2860 * resize the map 2861 */ 2862 2863 vm_map_lock(map); 2864 map->min_offset = start; 2865 map->max_offset = end; 2866 vm_map_unlock(map); 2867 } else { 2868 2869 /* 2870 * p's vmspace is being shared, so we can't reuse it for p since 2871 * it is still being used for others. allocate a new vmspace 2872 * for p 2873 */ 2874 2875 nvm = uvmspace_alloc(start, end); 2876 2877 /* 2878 * install new vmspace and drop our ref to the old one. 2879 */ 2880 2881 pmap_deactivate(p); 2882 p->p_vmspace = nvm; 2883 pmap_activate(p); 2884 2885 uvmspace_free(ovm); 2886 } 2887 } 2888 2889 /* 2890 * uvmspace_free: free a vmspace data structure 2891 * 2892 * - XXX: no locking on vmspace 2893 */ 2894 2895 void 2896 uvmspace_free(vm) 2897 struct vmspace *vm; 2898 { 2899 struct vm_map_entry *dead_entries; 2900 UVMHIST_FUNC("uvmspace_free"); UVMHIST_CALLED(maphist); 2901 2902 UVMHIST_LOG(maphist,"(vm=0x%x) ref=%d", vm, vm->vm_refcnt,0,0); 2903 if (--vm->vm_refcnt == 0) { 2904 2905 /* 2906 * lock the map, to wait out all other references to it. delete 2907 * all of the mappings and pages they hold, then call the pmap 2908 * module to reclaim anything left. 2909 */ 2910 2911 #ifdef SYSVSHM 2912 /* Get rid of any SYSV shared memory segments. */ 2913 if (vm->vm_shm != NULL) 2914 shmexit(vm); 2915 #endif 2916 vm_map_lock(&vm->vm_map); 2917 if (vm->vm_map.nentries) { 2918 uvm_unmap_remove(&vm->vm_map, 2919 vm->vm_map.min_offset, vm->vm_map.max_offset, 2920 &dead_entries); 2921 if (dead_entries != NULL) 2922 uvm_unmap_detach(dead_entries, 0); 2923 } 2924 pmap_destroy(vm->vm_map.pmap); 2925 vm->vm_map.pmap = NULL; 2926 pool_put(&uvm_vmspace_pool, vm); 2927 } 2928 UVMHIST_LOG(maphist,"<- done", 0,0,0,0); 2929 } 2930 2931 /* 2932 * F O R K - m a i n e n t r y p o i n t 2933 */ 2934 /* 2935 * uvmspace_fork: fork a process' main map 2936 * 2937 * => create a new vmspace for child process from parent. 2938 * => parent's map must not be locked. 2939 */ 2940 2941 struct vmspace * 2942 uvmspace_fork(vm1) 2943 struct vmspace *vm1; 2944 { 2945 struct vmspace *vm2; 2946 struct vm_map *old_map = &vm1->vm_map; 2947 struct vm_map *new_map; 2948 struct vm_map_entry *old_entry; 2949 struct vm_map_entry *new_entry; 2950 pmap_t new_pmap; 2951 UVMHIST_FUNC("uvmspace_fork"); UVMHIST_CALLED(maphist); 2952 2953 vm_map_lock(old_map); 2954 2955 vm2 = uvmspace_alloc(old_map->min_offset, old_map->max_offset); 2956 memcpy(&vm2->vm_startcopy, &vm1->vm_startcopy, 2957 (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy); 2958 new_map = &vm2->vm_map; /* XXX */ 2959 new_pmap = new_map->pmap; 2960 2961 old_entry = old_map->header.next; 2962 2963 /* 2964 * go entry-by-entry 2965 */ 2966 2967 while (old_entry != &old_map->header) { 2968 2969 /* 2970 * first, some sanity checks on the old entry 2971 */ 2972 2973 KASSERT(!UVM_ET_ISSUBMAP(old_entry)); 2974 KASSERT(UVM_ET_ISCOPYONWRITE(old_entry) || 2975 !UVM_ET_ISNEEDSCOPY(old_entry)); 2976 2977 switch (old_entry->inheritance) { 2978 case MAP_INHERIT_NONE: 2979 2980 /* 2981 * drop the mapping 2982 */ 2983 2984 break; 2985 2986 case MAP_INHERIT_SHARE: 2987 2988 /* 2989 * share the mapping: this means we want the old and 2990 * new entries to share amaps and backing objects. 2991 */ 2992 /* 2993 * if the old_entry needs a new amap (due to prev fork) 2994 * then we need to allocate it now so that we have 2995 * something we own to share with the new_entry. [in 2996 * other words, we need to clear needs_copy] 2997 */ 2998 2999 if (UVM_ET_ISNEEDSCOPY(old_entry)) { 3000 /* get our own amap, clears needs_copy */ 3001 amap_copy(old_map, old_entry, M_WAITOK, FALSE, 3002 0, 0); 3003 /* XXXCDC: WAITOK??? */ 3004 } 3005 3006 new_entry = uvm_mapent_alloc(new_map); 3007 /* old_entry -> new_entry */ 3008 uvm_mapent_copy(old_entry, new_entry); 3009 3010 /* new pmap has nothing wired in it */ 3011 new_entry->wired_count = 0; 3012 3013 /* 3014 * gain reference to object backing the map (can't 3015 * be a submap, already checked this case). 3016 */ 3017 3018 if (new_entry->aref.ar_amap) 3019 uvm_map_reference_amap(new_entry, AMAP_SHARED); 3020 3021 if (new_entry->object.uvm_obj && 3022 new_entry->object.uvm_obj->pgops->pgo_reference) 3023 new_entry->object.uvm_obj-> 3024 pgops->pgo_reference( 3025 new_entry->object.uvm_obj); 3026 3027 /* insert entry at end of new_map's entry list */ 3028 uvm_map_entry_link(new_map, new_map->header.prev, 3029 new_entry); 3030 3031 break; 3032 3033 case MAP_INHERIT_COPY: 3034 3035 /* 3036 * copy-on-write the mapping (using mmap's 3037 * MAP_PRIVATE semantics) 3038 * 3039 * allocate new_entry, adjust reference counts. 3040 * (note that new references are read-only). 3041 */ 3042 3043 new_entry = uvm_mapent_alloc(new_map); 3044 /* old_entry -> new_entry */ 3045 uvm_mapent_copy(old_entry, new_entry); 3046 3047 if (new_entry->aref.ar_amap) 3048 uvm_map_reference_amap(new_entry, 0); 3049 3050 if (new_entry->object.uvm_obj && 3051 new_entry->object.uvm_obj->pgops->pgo_reference) 3052 new_entry->object.uvm_obj->pgops->pgo_reference 3053 (new_entry->object.uvm_obj); 3054 3055 /* new pmap has nothing wired in it */ 3056 new_entry->wired_count = 0; 3057 3058 new_entry->etype |= 3059 (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY); 3060 uvm_map_entry_link(new_map, new_map->header.prev, 3061 new_entry); 3062 3063 /* 3064 * the new entry will need an amap. it will either 3065 * need to be copied from the old entry or created 3066 * from scratch (if the old entry does not have an 3067 * amap). can we defer this process until later 3068 * (by setting "needs_copy") or do we need to copy 3069 * the amap now? 3070 * 3071 * we must copy the amap now if any of the following 3072 * conditions hold: 3073 * 1. the old entry has an amap and that amap is 3074 * being shared. this means that the old (parent) 3075 * process is sharing the amap with another 3076 * process. if we do not clear needs_copy here 3077 * we will end up in a situation where both the 3078 * parent and child process are refering to the 3079 * same amap with "needs_copy" set. if the 3080 * parent write-faults, the fault routine will 3081 * clear "needs_copy" in the parent by allocating 3082 * a new amap. this is wrong because the 3083 * parent is supposed to be sharing the old amap 3084 * and the new amap will break that. 3085 * 3086 * 2. if the old entry has an amap and a non-zero 3087 * wire count then we are going to have to call 3088 * amap_cow_now to avoid page faults in the 3089 * parent process. since amap_cow_now requires 3090 * "needs_copy" to be clear we might as well 3091 * clear it here as well. 3092 * 3093 */ 3094 3095 if (old_entry->aref.ar_amap != NULL) { 3096 if ((amap_flags(old_entry->aref.ar_amap) & 3097 AMAP_SHARED) != 0 || 3098 VM_MAPENT_ISWIRED(old_entry)) { 3099 3100 amap_copy(new_map, new_entry, M_WAITOK, 3101 FALSE, 0, 0); 3102 /* XXXCDC: M_WAITOK ... ok? */ 3103 } 3104 } 3105 3106 /* 3107 * if the parent's entry is wired down, then the 3108 * parent process does not want page faults on 3109 * access to that memory. this means that we 3110 * cannot do copy-on-write because we can't write 3111 * protect the old entry. in this case we 3112 * resolve all copy-on-write faults now, using 3113 * amap_cow_now. note that we have already 3114 * allocated any needed amap (above). 3115 */ 3116 3117 if (VM_MAPENT_ISWIRED(old_entry)) { 3118 3119 /* 3120 * resolve all copy-on-write faults now 3121 * (note that there is nothing to do if 3122 * the old mapping does not have an amap). 3123 */ 3124 if (old_entry->aref.ar_amap) 3125 amap_cow_now(new_map, new_entry); 3126 3127 } else { 3128 3129 /* 3130 * setup mappings to trigger copy-on-write faults 3131 * we must write-protect the parent if it has 3132 * an amap and it is not already "needs_copy"... 3133 * if it is already "needs_copy" then the parent 3134 * has already been write-protected by a previous 3135 * fork operation. 3136 */ 3137 3138 if (old_entry->aref.ar_amap && 3139 !UVM_ET_ISNEEDSCOPY(old_entry)) { 3140 if (old_entry->max_protection & VM_PROT_WRITE) { 3141 pmap_protect(old_map->pmap, 3142 old_entry->start, 3143 old_entry->end, 3144 old_entry->protection & 3145 ~VM_PROT_WRITE); 3146 pmap_update(old_map->pmap); 3147 } 3148 old_entry->etype |= UVM_ET_NEEDSCOPY; 3149 } 3150 } 3151 break; 3152 } /* end of switch statement */ 3153 old_entry = old_entry->next; 3154 } 3155 3156 new_map->size = old_map->size; 3157 vm_map_unlock(old_map); 3158 3159 #ifdef SYSVSHM 3160 if (vm1->vm_shm) 3161 shmfork(vm1, vm2); 3162 #endif 3163 3164 #ifdef PMAP_FORK 3165 pmap_fork(vm1->vm_map.pmap, vm2->vm_map.pmap); 3166 #endif 3167 3168 UVMHIST_LOG(maphist,"<- done",0,0,0,0); 3169 return(vm2); 3170 } 3171 3172 3173 #if defined(DDB) 3174 3175 /* 3176 * DDB hooks 3177 */ 3178 3179 /* 3180 * uvm_map_printit: actually prints the map 3181 */ 3182 3183 void 3184 uvm_map_printit(map, full, pr) 3185 struct vm_map *map; 3186 boolean_t full; 3187 void (*pr) __P((const char *, ...)); 3188 { 3189 struct vm_map_entry *entry; 3190 3191 (*pr)("MAP %p: [0x%lx->0x%lx]\n", map, map->min_offset,map->max_offset); 3192 (*pr)("\t#ent=%d, sz=%d, ref=%d, version=%d, flags=0x%x\n", 3193 map->nentries, map->size, map->ref_count, map->timestamp, 3194 map->flags); 3195 (*pr)("\tpmap=%p(resident=%d)\n", map->pmap, 3196 pmap_resident_count(map->pmap)); 3197 if (!full) 3198 return; 3199 for (entry = map->header.next; entry != &map->header; 3200 entry = entry->next) { 3201 (*pr)(" - %p: 0x%lx->0x%lx: obj=%p/0x%llx, amap=%p/%d\n", 3202 entry, entry->start, entry->end, entry->object.uvm_obj, 3203 (long long)entry->offset, entry->aref.ar_amap, 3204 entry->aref.ar_pageoff); 3205 (*pr)( 3206 "\tsubmap=%c, cow=%c, nc=%c, prot(max)=%d/%d, inh=%d, " 3207 "wc=%d, adv=%d\n", 3208 (entry->etype & UVM_ET_SUBMAP) ? 'T' : 'F', 3209 (entry->etype & UVM_ET_COPYONWRITE) ? 'T' : 'F', 3210 (entry->etype & UVM_ET_NEEDSCOPY) ? 'T' : 'F', 3211 entry->protection, entry->max_protection, 3212 entry->inheritance, entry->wired_count, entry->advice); 3213 } 3214 } 3215 3216 /* 3217 * uvm_object_printit: actually prints the object 3218 */ 3219 3220 void 3221 uvm_object_printit(uobj, full, pr) 3222 struct uvm_object *uobj; 3223 boolean_t full; 3224 void (*pr) __P((const char *, ...)); 3225 { 3226 struct vm_page *pg; 3227 int cnt = 0; 3228 3229 (*pr)("OBJECT %p: locked=%d, pgops=%p, npages=%d, ", 3230 uobj, uobj->vmobjlock.lock_data, uobj->pgops, uobj->uo_npages); 3231 if (UVM_OBJ_IS_KERN_OBJECT(uobj)) 3232 (*pr)("refs=<SYSTEM>\n"); 3233 else 3234 (*pr)("refs=%d\n", uobj->uo_refs); 3235 3236 if (!full) { 3237 return; 3238 } 3239 (*pr)(" PAGES <pg,offset>:\n "); 3240 TAILQ_FOREACH(pg, &uobj->memq, listq) { 3241 cnt++; 3242 (*pr)("<%p,0x%llx> ", pg, (long long)pg->offset); 3243 if ((cnt % 3) == 0) { 3244 (*pr)("\n "); 3245 } 3246 } 3247 if ((cnt % 3) != 0) { 3248 (*pr)("\n"); 3249 } 3250 } 3251 3252 /* 3253 * uvm_page_printit: actually print the page 3254 */ 3255 3256 static const char page_flagbits[] = 3257 "\20\1BUSY\2WANTED\3TABLED\4CLEAN\5PAGEOUT\6RELEASED\7FAKE\10RDONLY" 3258 "\11ZERO\15PAGER1"; 3259 static const char page_pqflagbits[] = 3260 "\20\1FREE\2INACTIVE\3ACTIVE\5ANON\6AOBJ"; 3261 3262 void 3263 uvm_page_printit(pg, full, pr) 3264 struct vm_page *pg; 3265 boolean_t full; 3266 void (*pr) __P((const char *, ...)); 3267 { 3268 struct vm_page *tpg; 3269 struct uvm_object *uobj; 3270 struct pglist *pgl; 3271 char pgbuf[128]; 3272 char pqbuf[128]; 3273 3274 (*pr)("PAGE %p:\n", pg); 3275 bitmask_snprintf(pg->flags, page_flagbits, pgbuf, sizeof(pgbuf)); 3276 bitmask_snprintf(pg->pqflags, page_pqflagbits, pqbuf, sizeof(pqbuf)); 3277 (*pr)(" flags=%s, pqflags=%s, wire_count=%d, pa=0x%lx\n", 3278 pgbuf, pqbuf, pg->wire_count, (long)pg->phys_addr); 3279 (*pr)(" uobject=%p, uanon=%p, offset=0x%llx loan_count=%d\n", 3280 pg->uobject, pg->uanon, (long long)pg->offset, pg->loan_count); 3281 #if defined(UVM_PAGE_TRKOWN) 3282 if (pg->flags & PG_BUSY) 3283 (*pr)(" owning process = %d, tag=%s\n", 3284 pg->owner, pg->owner_tag); 3285 else 3286 (*pr)(" page not busy, no owner\n"); 3287 #else 3288 (*pr)(" [page ownership tracking disabled]\n"); 3289 #endif 3290 3291 if (!full) 3292 return; 3293 3294 /* cross-verify object/anon */ 3295 if ((pg->pqflags & PQ_FREE) == 0) { 3296 if (pg->pqflags & PQ_ANON) { 3297 if (pg->uanon == NULL || pg->uanon->u.an_page != pg) 3298 (*pr)(" >>> ANON DOES NOT POINT HERE <<< (%p)\n", 3299 (pg->uanon) ? pg->uanon->u.an_page : NULL); 3300 else 3301 (*pr)(" anon backpointer is OK\n"); 3302 } else { 3303 uobj = pg->uobject; 3304 if (uobj) { 3305 (*pr)(" checking object list\n"); 3306 TAILQ_FOREACH(tpg, &uobj->memq, listq) { 3307 if (tpg == pg) { 3308 break; 3309 } 3310 } 3311 if (tpg) 3312 (*pr)(" page found on object list\n"); 3313 else 3314 (*pr)(" >>> PAGE NOT FOUND ON OBJECT LIST! <<<\n"); 3315 } 3316 } 3317 } 3318 3319 /* cross-verify page queue */ 3320 if (pg->pqflags & PQ_FREE) { 3321 int fl = uvm_page_lookup_freelist(pg); 3322 int color = VM_PGCOLOR_BUCKET(pg); 3323 pgl = &uvm.page_free[fl].pgfl_buckets[color].pgfl_queues[ 3324 ((pg)->flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN]; 3325 } else if (pg->pqflags & PQ_INACTIVE) { 3326 pgl = &uvm.page_inactive; 3327 } else if (pg->pqflags & PQ_ACTIVE) { 3328 pgl = &uvm.page_active; 3329 } else { 3330 pgl = NULL; 3331 } 3332 3333 if (pgl) { 3334 (*pr)(" checking pageq list\n"); 3335 TAILQ_FOREACH(tpg, pgl, pageq) { 3336 if (tpg == pg) { 3337 break; 3338 } 3339 } 3340 if (tpg) 3341 (*pr)(" page found on pageq list\n"); 3342 else 3343 (*pr)(" >>> PAGE NOT FOUND ON PAGEQ LIST! <<<\n"); 3344 } 3345 } 3346 #endif 3347