1 /* $OpenBSD: uvm_map.c,v 1.36 2002/01/02 22:23:25 miod Exp $ */ 2 /* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */ 3 4 /* 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * Copyright (c) 1991, 1993, The Regents of the University of California. 7 * 8 * All rights reserved. 9 * 10 * This code is derived from software contributed to Berkeley by 11 * The Mach Operating System project at Carnegie-Mellon University. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by Charles D. Cranor, 24 * Washington University, the University of California, Berkeley and 25 * its contributors. 26 * 4. Neither the name of the University nor the names of its contributors 27 * may be used to endorse or promote products derived from this software 28 * without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 * 42 * @(#)vm_map.c 8.3 (Berkeley) 1/12/94 43 * from: Id: uvm_map.c,v 1.1.2.27 1998/02/07 01:16:54 chs Exp 44 * 45 * 46 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 47 * All rights reserved. 48 * 49 * Permission to use, copy, modify and distribute this software and 50 * its documentation is hereby granted, provided that both the copyright 51 * notice and this permission notice appear in all copies of the 52 * software, derivative works or modified versions, and any portions 53 * thereof, and that both notices appear in supporting documentation. 54 * 55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 56 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 58 * 59 * Carnegie Mellon requests users of this software to return to 60 * 61 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 62 * School of Computer Science 63 * Carnegie Mellon University 64 * Pittsburgh PA 15213-3890 65 * 66 * any improvements or extensions that they make and grant Carnegie the 67 * rights to redistribute these changes. 68 */ 69 70 /* 71 * uvm_map.c: uvm map operations 72 */ 73 74 #include <sys/param.h> 75 #include <sys/systm.h> 76 #include <sys/mman.h> 77 #include <sys/proc.h> 78 #include <sys/malloc.h> 79 #include <sys/pool.h> 80 81 #ifdef SYSVSHM 82 #include <sys/shm.h> 83 #endif 84 85 #define UVM_MAP 86 #include <uvm/uvm.h> 87 88 #ifdef DDB 89 #include <uvm/uvm_ddb.h> 90 #endif 91 92 93 struct uvm_cnt uvm_map_call, map_backmerge, map_forwmerge; 94 struct uvm_cnt uvm_mlk_call, uvm_mlk_hint; 95 const char vmmapbsy[] = "vmmapbsy"; 96 97 /* 98 * pool for vmspace structures. 99 */ 100 101 struct pool uvm_vmspace_pool; 102 103 /* 104 * pool for dynamically-allocated map entries. 105 */ 106 107 struct pool uvm_map_entry_pool; 108 109 #ifdef PMAP_GROWKERNEL 110 /* 111 * This global represents the end of the kernel virtual address 112 * space. If we want to exceed this, we must grow the kernel 113 * virtual address space dynamically. 114 * 115 * Note, this variable is locked by kernel_map's lock. 116 */ 117 vaddr_t uvm_maxkaddr; 118 #endif 119 120 /* 121 * macros 122 */ 123 124 /* 125 * uvm_map_entry_link: insert entry into a map 126 * 127 * => map must be locked 128 */ 129 #define uvm_map_entry_link(map, after_where, entry) do { \ 130 (map)->nentries++; \ 131 (entry)->prev = (after_where); \ 132 (entry)->next = (after_where)->next; \ 133 (entry)->prev->next = (entry); \ 134 (entry)->next->prev = (entry); \ 135 } while (0) 136 137 /* 138 * uvm_map_entry_unlink: remove entry from a map 139 * 140 * => map must be locked 141 */ 142 #define uvm_map_entry_unlink(map, entry) do { \ 143 (map)->nentries--; \ 144 (entry)->next->prev = (entry)->prev; \ 145 (entry)->prev->next = (entry)->next; \ 146 } while (0) 147 148 /* 149 * SAVE_HINT: saves the specified entry as the hint for future lookups. 150 * 151 * => map need not be locked (protected by hint_lock). 152 */ 153 #define SAVE_HINT(map,check,value) do { \ 154 simple_lock(&(map)->hint_lock); \ 155 if ((map)->hint == (check)) \ 156 (map)->hint = (value); \ 157 simple_unlock(&(map)->hint_lock); \ 158 } while (0) 159 160 /* 161 * VM_MAP_RANGE_CHECK: check and correct range 162 * 163 * => map must at least be read locked 164 */ 165 166 #define VM_MAP_RANGE_CHECK(map, start, end) do { \ 167 if (start < vm_map_min(map)) \ 168 start = vm_map_min(map); \ 169 if (end > vm_map_max(map)) \ 170 end = vm_map_max(map); \ 171 if (start > end) \ 172 start = end; \ 173 } while (0) 174 175 /* 176 * local prototypes 177 */ 178 179 static vm_map_entry_t uvm_mapent_alloc __P((vm_map_t)); 180 static void uvm_mapent_copy __P((vm_map_entry_t,vm_map_entry_t)); 181 static void uvm_mapent_free __P((vm_map_entry_t)); 182 static void uvm_map_entry_unwire __P((vm_map_t, vm_map_entry_t)); 183 static void uvm_map_reference_amap __P((vm_map_entry_t, int)); 184 static void uvm_map_unreference_amap __P((vm_map_entry_t, int)); 185 186 /* 187 * local inlines 188 */ 189 190 /* 191 * uvm_mapent_alloc: allocate a map entry 192 * 193 * => XXX: static pool for kernel map? 194 */ 195 196 static __inline vm_map_entry_t 197 uvm_mapent_alloc(map) 198 vm_map_t map; 199 { 200 vm_map_entry_t me; 201 int s; 202 UVMHIST_FUNC("uvm_mapent_alloc"); 203 UVMHIST_CALLED(maphist); 204 205 if ((map->flags & VM_MAP_INTRSAFE) == 0 && 206 map != kernel_map && kernel_map != NULL /* XXX */) { 207 me = pool_get(&uvm_map_entry_pool, PR_WAITOK); 208 me->flags = 0; 209 /* me can't be null, wait ok */ 210 } else { 211 s = splvm(); /* protect kentry_free list with splvm */ 212 simple_lock(&uvm.kentry_lock); 213 me = uvm.kentry_free; 214 if (me) uvm.kentry_free = me->next; 215 simple_unlock(&uvm.kentry_lock); 216 splx(s); 217 if (!me) 218 panic("mapent_alloc: out of static map entries, check MAX_KMAPENT"); 219 me->flags = UVM_MAP_STATIC; 220 } 221 222 UVMHIST_LOG(maphist, "<- new entry=0x%x [kentry=%d]", 223 me, ((map->flags & VM_MAP_INTRSAFE) != 0 || map == kernel_map) 224 ? TRUE : FALSE, 0, 0); 225 return(me); 226 } 227 228 /* 229 * uvm_mapent_free: free map entry 230 * 231 * => XXX: static pool for kernel map? 232 */ 233 234 static __inline void 235 uvm_mapent_free(me) 236 vm_map_entry_t me; 237 { 238 int s; 239 UVMHIST_FUNC("uvm_mapent_free"); 240 UVMHIST_CALLED(maphist); 241 UVMHIST_LOG(maphist,"<- freeing map entry=0x%x [flags=%d]", 242 me, me->flags, 0, 0); 243 if ((me->flags & UVM_MAP_STATIC) == 0) { 244 pool_put(&uvm_map_entry_pool, me); 245 } else { 246 s = splvm(); /* protect kentry_free list with splvm */ 247 simple_lock(&uvm.kentry_lock); 248 me->next = uvm.kentry_free; 249 uvm.kentry_free = me; 250 simple_unlock(&uvm.kentry_lock); 251 splx(s); 252 } 253 } 254 255 /* 256 * uvm_mapent_copy: copy a map entry, preserving flags 257 */ 258 259 static __inline void 260 uvm_mapent_copy(src, dst) 261 vm_map_entry_t src; 262 vm_map_entry_t dst; 263 { 264 265 memcpy(dst, src, ((char *)&src->uvm_map_entry_stop_copy) - ((char*)src)); 266 } 267 268 /* 269 * uvm_map_entry_unwire: unwire a map entry 270 * 271 * => map should be locked by caller 272 */ 273 274 static __inline void 275 uvm_map_entry_unwire(map, entry) 276 vm_map_t map; 277 vm_map_entry_t entry; 278 { 279 280 entry->wired_count = 0; 281 uvm_fault_unwire_locked(map, entry->start, entry->end); 282 } 283 284 285 /* 286 * wrapper for calling amap_ref() 287 */ 288 static __inline void 289 uvm_map_reference_amap(entry, flags) 290 vm_map_entry_t entry; 291 int flags; 292 { 293 amap_ref(entry->aref.ar_amap, entry->aref.ar_pageoff, 294 (entry->end - entry->start) >> PAGE_SHIFT, flags); 295 } 296 297 298 /* 299 * wrapper for calling amap_unref() 300 */ 301 static __inline void 302 uvm_map_unreference_amap(entry, flags) 303 vm_map_entry_t entry; 304 int flags; 305 { 306 amap_unref(entry->aref.ar_amap, entry->aref.ar_pageoff, 307 (entry->end - entry->start) >> PAGE_SHIFT, flags); 308 } 309 310 311 /* 312 * uvm_map_init: init mapping system at boot time. note that we allocate 313 * and init the static pool of vm_map_entry_t's for the kernel here. 314 */ 315 316 void 317 uvm_map_init() 318 { 319 static struct vm_map_entry kernel_map_entry[MAX_KMAPENT]; 320 #if defined(UVMHIST) 321 static struct uvm_history_ent maphistbuf[100]; 322 static struct uvm_history_ent pdhistbuf[100]; 323 #endif 324 int lcv; 325 326 /* 327 * first, init logging system. 328 */ 329 330 UVMHIST_FUNC("uvm_map_init"); 331 UVMHIST_INIT_STATIC(maphist, maphistbuf); 332 UVMHIST_INIT_STATIC(pdhist, pdhistbuf); 333 UVMHIST_CALLED(maphist); 334 UVMHIST_LOG(maphist,"<starting uvm map system>", 0, 0, 0, 0); 335 UVMCNT_INIT(uvm_map_call, UVMCNT_CNT, 0, 336 "# uvm_map() successful calls", 0); 337 UVMCNT_INIT(map_backmerge, UVMCNT_CNT, 0, "# uvm_map() back merges", 0); 338 UVMCNT_INIT(map_forwmerge, UVMCNT_CNT, 0, "# uvm_map() missed forward", 339 0); 340 UVMCNT_INIT(uvm_mlk_call, UVMCNT_CNT, 0, "# map lookup calls", 0); 341 UVMCNT_INIT(uvm_mlk_hint, UVMCNT_CNT, 0, "# map lookup hint hits", 0); 342 343 /* 344 * now set up static pool of kernel map entrys ... 345 */ 346 347 simple_lock_init(&uvm.kentry_lock); 348 uvm.kentry_free = NULL; 349 for (lcv = 0 ; lcv < MAX_KMAPENT ; lcv++) { 350 kernel_map_entry[lcv].next = uvm.kentry_free; 351 uvm.kentry_free = &kernel_map_entry[lcv]; 352 } 353 354 /* 355 * initialize the map-related pools. 356 */ 357 pool_init(&uvm_vmspace_pool, sizeof(struct vmspace), 358 0, 0, 0, "vmsppl", 0, 359 pool_page_alloc_nointr, pool_page_free_nointr, M_VMMAP); 360 pool_init(&uvm_map_entry_pool, sizeof(struct vm_map_entry), 361 0, 0, 0, "vmmpepl", 0, 362 pool_page_alloc_nointr, pool_page_free_nointr, M_VMMAP); 363 } 364 365 /* 366 * clippers 367 */ 368 369 /* 370 * uvm_map_clip_start: ensure that the entry begins at or after 371 * the starting address, if it doesn't we split the entry. 372 * 373 * => caller should use UVM_MAP_CLIP_START macro rather than calling 374 * this directly 375 * => map must be locked by caller 376 */ 377 378 void uvm_map_clip_start(map, entry, start) 379 vm_map_t map; 380 vm_map_entry_t entry; 381 vaddr_t start; 382 { 383 vm_map_entry_t new_entry; 384 vaddr_t new_adj; 385 386 /* uvm_map_simplify_entry(map, entry); */ /* XXX */ 387 388 /* 389 * Split off the front portion. note that we must insert the new 390 * entry BEFORE this one, so that this entry has the specified 391 * starting address. 392 */ 393 394 new_entry = uvm_mapent_alloc(map); 395 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */ 396 397 new_entry->end = start; 398 new_adj = start - new_entry->start; 399 if (entry->object.uvm_obj) 400 entry->offset += new_adj; /* shift start over */ 401 entry->start = start; 402 403 if (new_entry->aref.ar_amap) { 404 amap_splitref(&new_entry->aref, &entry->aref, new_adj); 405 } 406 407 uvm_map_entry_link(map, entry->prev, new_entry); 408 409 if (UVM_ET_ISSUBMAP(entry)) { 410 /* ... unlikely to happen, but play it safe */ 411 uvm_map_reference(new_entry->object.sub_map); 412 } else { 413 if (UVM_ET_ISOBJ(entry) && 414 entry->object.uvm_obj->pgops && 415 entry->object.uvm_obj->pgops->pgo_reference) 416 entry->object.uvm_obj->pgops->pgo_reference( 417 entry->object.uvm_obj); 418 } 419 } 420 421 /* 422 * uvm_map_clip_end: ensure that the entry ends at or before 423 * the ending address, if it does't we split the reference 424 * 425 * => caller should use UVM_MAP_CLIP_END macro rather than calling 426 * this directly 427 * => map must be locked by caller 428 */ 429 430 void 431 uvm_map_clip_end(map, entry, end) 432 vm_map_t map; 433 vm_map_entry_t entry; 434 vaddr_t end; 435 { 436 vm_map_entry_t new_entry; 437 vaddr_t new_adj; /* #bytes we move start forward */ 438 439 /* 440 * Create a new entry and insert it 441 * AFTER the specified entry 442 */ 443 444 new_entry = uvm_mapent_alloc(map); 445 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */ 446 447 new_entry->start = entry->end = end; 448 new_adj = end - entry->start; 449 if (new_entry->object.uvm_obj) 450 new_entry->offset += new_adj; 451 452 if (entry->aref.ar_amap) 453 amap_splitref(&entry->aref, &new_entry->aref, new_adj); 454 455 uvm_map_entry_link(map, entry, new_entry); 456 457 if (UVM_ET_ISSUBMAP(entry)) { 458 /* ... unlikely to happen, but play it safe */ 459 uvm_map_reference(new_entry->object.sub_map); 460 } else { 461 if (UVM_ET_ISOBJ(entry) && 462 entry->object.uvm_obj->pgops && 463 entry->object.uvm_obj->pgops->pgo_reference) 464 entry->object.uvm_obj->pgops->pgo_reference( 465 entry->object.uvm_obj); 466 } 467 } 468 469 470 /* 471 * M A P - m a i n e n t r y p o i n t 472 */ 473 /* 474 * uvm_map: establish a valid mapping in a map 475 * 476 * => assume startp is page aligned. 477 * => assume size is a multiple of PAGE_SIZE. 478 * => assume sys_mmap provides enough of a "hint" to have us skip 479 * over text/data/bss area. 480 * => map must be unlocked (we will lock it) 481 * => <uobj,uoffset> value meanings (4 cases): 482 * [1] <NULL,uoffset> == uoffset is a hint for PMAP_PREFER 483 * [2] <NULL,UVM_UNKNOWN_OFFSET> == don't PMAP_PREFER 484 * [3] <uobj,uoffset> == normal mapping 485 * [4] <uobj,UVM_UNKNOWN_OFFSET> == uvm_map finds offset based on VA 486 * 487 * case [4] is for kernel mappings where we don't know the offset until 488 * we've found a virtual address. note that kernel object offsets are 489 * always relative to vm_map_min(kernel_map). 490 * 491 * => if `align' is non-zero, we try to align the virtual address to 492 * the specified alignment. this is only a hint; if we can't 493 * do it, the address will be unaligned. this is provided as 494 * a mechanism for large pages. 495 * 496 * => XXXCDC: need way to map in external amap? 497 */ 498 499 int 500 uvm_map(map, startp, size, uobj, uoffset, align, flags) 501 vm_map_t map; 502 vaddr_t *startp; /* IN/OUT */ 503 vsize_t size; 504 struct uvm_object *uobj; 505 voff_t uoffset; 506 vsize_t align; 507 uvm_flag_t flags; 508 { 509 vm_map_entry_t prev_entry, new_entry; 510 vm_prot_t prot = UVM_PROTECTION(flags), maxprot = 511 UVM_MAXPROTECTION(flags); 512 vm_inherit_t inherit = UVM_INHERIT(flags); 513 int advice = UVM_ADVICE(flags); 514 UVMHIST_FUNC("uvm_map"); 515 UVMHIST_CALLED(maphist); 516 517 UVMHIST_LOG(maphist, "(map=0x%x, *startp=0x%x, size=%d, flags=0x%x)", 518 map, *startp, size, flags); 519 UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0); 520 521 /* 522 * step 0: sanity check of protection code 523 */ 524 525 if ((prot & maxprot) != prot) { 526 UVMHIST_LOG(maphist, "<- prot. failure: prot=0x%x, max=0x%x", 527 prot, maxprot,0,0); 528 return(KERN_PROTECTION_FAILURE); 529 } 530 531 /* 532 * step 1: figure out where to put new VM range 533 */ 534 535 if (vm_map_lock_try(map) == FALSE) { 536 if (flags & UVM_FLAG_TRYLOCK) 537 return(KERN_FAILURE); 538 vm_map_lock(map); /* could sleep here */ 539 } 540 if ((prev_entry = uvm_map_findspace(map, *startp, size, startp, 541 uobj, uoffset, align, flags)) == NULL) { 542 UVMHIST_LOG(maphist,"<- uvm_map_findspace failed!",0,0,0,0); 543 vm_map_unlock(map); 544 return (KERN_NO_SPACE); 545 } 546 547 #ifdef PMAP_GROWKERNEL 548 { 549 /* 550 * If the kernel pmap can't map the requested space, 551 * then allocate more resources for it. 552 */ 553 if (map == kernel_map && uvm_maxkaddr < (*startp + size)) 554 uvm_maxkaddr = pmap_growkernel(*startp + size); 555 } 556 #endif 557 558 UVMCNT_INCR(uvm_map_call); 559 560 /* 561 * if uobj is null, then uoffset is either a VAC hint for PMAP_PREFER 562 * [typically from uvm_map_reserve] or it is UVM_UNKNOWN_OFFSET. in 563 * either case we want to zero it before storing it in the map entry 564 * (because it looks strange and confusing when debugging...) 565 * 566 * if uobj is not null 567 * if uoffset is not UVM_UNKNOWN_OFFSET then we have a normal mapping 568 * and we do not need to change uoffset. 569 * if uoffset is UVM_UNKNOWN_OFFSET then we need to find the offset 570 * now (based on the starting address of the map). this case is 571 * for kernel object mappings where we don't know the offset until 572 * the virtual address is found (with uvm_map_findspace). the 573 * offset is the distance we are from the start of the map. 574 */ 575 576 if (uobj == NULL) { 577 uoffset = 0; 578 } else { 579 if (uoffset == UVM_UNKNOWN_OFFSET) { 580 KASSERT(UVM_OBJ_IS_KERN_OBJECT(uobj)); 581 uoffset = *startp - vm_map_min(kernel_map); 582 } 583 } 584 585 /* 586 * step 2: try and insert in map by extending previous entry, if 587 * possible 588 * XXX: we don't try and pull back the next entry. might be useful 589 * for a stack, but we are currently allocating our stack in advance. 590 */ 591 592 if ((flags & UVM_FLAG_NOMERGE) == 0 && 593 prev_entry->end == *startp && prev_entry != &map->header && 594 prev_entry->object.uvm_obj == uobj) { 595 596 if (uobj && prev_entry->offset + 597 (prev_entry->end - prev_entry->start) != uoffset) 598 goto step3; 599 600 if (UVM_ET_ISSUBMAP(prev_entry)) 601 goto step3; 602 603 if (prev_entry->protection != prot || 604 prev_entry->max_protection != maxprot) 605 goto step3; 606 607 if (prev_entry->inheritance != inherit || 608 prev_entry->advice != advice) 609 goto step3; 610 611 /* wiring status must match (new area is unwired) */ 612 if (VM_MAPENT_ISWIRED(prev_entry)) 613 goto step3; 614 615 /* 616 * can't extend a shared amap. note: no need to lock amap to 617 * look at refs since we don't care about its exact value. 618 * if it is one (i.e. we have only reference) it will stay there 619 */ 620 621 if (prev_entry->aref.ar_amap && 622 amap_refs(prev_entry->aref.ar_amap) != 1) { 623 goto step3; 624 } 625 626 /* got it! */ 627 628 UVMCNT_INCR(map_backmerge); 629 UVMHIST_LOG(maphist," starting back merge", 0, 0, 0, 0); 630 631 /* 632 * drop our reference to uobj since we are extending a reference 633 * that we already have (the ref count can not drop to zero). 634 */ 635 if (uobj && uobj->pgops->pgo_detach) 636 uobj->pgops->pgo_detach(uobj); 637 638 if (prev_entry->aref.ar_amap) { 639 amap_extend(prev_entry, size); 640 } 641 642 prev_entry->end += size; 643 map->size += size; 644 645 UVMHIST_LOG(maphist,"<- done (via backmerge)!", 0, 0, 0, 0); 646 vm_map_unlock(map); 647 return (KERN_SUCCESS); 648 649 } 650 step3: 651 UVMHIST_LOG(maphist," allocating new map entry", 0, 0, 0, 0); 652 653 /* 654 * check for possible forward merge (which we don't do) and count 655 * the number of times we missed a *possible* chance to merge more 656 */ 657 658 if ((flags & UVM_FLAG_NOMERGE) == 0 && 659 prev_entry->next != &map->header && 660 prev_entry->next->start == (*startp + size)) 661 UVMCNT_INCR(map_forwmerge); 662 663 /* 664 * step 3: allocate new entry and link it in 665 */ 666 667 new_entry = uvm_mapent_alloc(map); 668 new_entry->start = *startp; 669 new_entry->end = new_entry->start + size; 670 new_entry->object.uvm_obj = uobj; 671 new_entry->offset = uoffset; 672 673 if (uobj) 674 new_entry->etype = UVM_ET_OBJ; 675 else 676 new_entry->etype = 0; 677 678 if (flags & UVM_FLAG_COPYONW) { 679 new_entry->etype |= UVM_ET_COPYONWRITE; 680 if ((flags & UVM_FLAG_OVERLAY) == 0) 681 new_entry->etype |= UVM_ET_NEEDSCOPY; 682 } 683 684 new_entry->protection = prot; 685 new_entry->max_protection = maxprot; 686 new_entry->inheritance = inherit; 687 new_entry->wired_count = 0; 688 new_entry->advice = advice; 689 if (flags & UVM_FLAG_OVERLAY) { 690 /* 691 * to_add: for BSS we overallocate a little since we 692 * are likely to extend 693 */ 694 vaddr_t to_add = (flags & UVM_FLAG_AMAPPAD) ? 695 UVM_AMAP_CHUNK << PAGE_SHIFT : 0; 696 struct vm_amap *amap = amap_alloc(size, to_add, M_WAITOK); 697 new_entry->aref.ar_pageoff = 0; 698 new_entry->aref.ar_amap = amap; 699 } else { 700 new_entry->aref.ar_pageoff = 0; 701 new_entry->aref.ar_amap = NULL; 702 } 703 704 uvm_map_entry_link(map, prev_entry, new_entry); 705 706 map->size += size; 707 708 /* 709 * Update the free space hint 710 */ 711 712 if ((map->first_free == prev_entry) && 713 (prev_entry->end >= new_entry->start)) 714 map->first_free = new_entry; 715 716 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0); 717 vm_map_unlock(map); 718 return(KERN_SUCCESS); 719 } 720 721 /* 722 * uvm_map_lookup_entry: find map entry at or before an address 723 * 724 * => map must at least be read-locked by caller 725 * => entry is returned in "entry" 726 * => return value is true if address is in the returned entry 727 */ 728 729 boolean_t 730 uvm_map_lookup_entry(map, address, entry) 731 vm_map_t map; 732 vaddr_t address; 733 vm_map_entry_t *entry; /* OUT */ 734 { 735 vm_map_entry_t cur; 736 vm_map_entry_t last; 737 UVMHIST_FUNC("uvm_map_lookup_entry"); 738 UVMHIST_CALLED(maphist); 739 740 UVMHIST_LOG(maphist,"(map=0x%x,addr=0x%x,ent=0x%x)", 741 map, address, entry, 0); 742 743 /* 744 * start looking either from the head of the 745 * list, or from the hint. 746 */ 747 748 simple_lock(&map->hint_lock); 749 cur = map->hint; 750 simple_unlock(&map->hint_lock); 751 752 if (cur == &map->header) 753 cur = cur->next; 754 755 UVMCNT_INCR(uvm_mlk_call); 756 if (address >= cur->start) { 757 /* 758 * go from hint to end of list. 759 * 760 * but first, make a quick check to see if 761 * we are already looking at the entry we 762 * want (which is usually the case). 763 * note also that we don't need to save the hint 764 * here... it is the same hint (unless we are 765 * at the header, in which case the hint didn't 766 * buy us anything anyway). 767 */ 768 last = &map->header; 769 if ((cur != last) && (cur->end > address)) { 770 UVMCNT_INCR(uvm_mlk_hint); 771 *entry = cur; 772 UVMHIST_LOG(maphist,"<- got it via hint (0x%x)", 773 cur, 0, 0, 0); 774 return (TRUE); 775 } 776 } else { 777 /* 778 * go from start to hint, *inclusively* 779 */ 780 last = cur->next; 781 cur = map->header.next; 782 } 783 784 /* 785 * search linearly 786 */ 787 788 while (cur != last) { 789 if (cur->end > address) { 790 if (address >= cur->start) { 791 /* 792 * save this lookup for future 793 * hints, and return 794 */ 795 796 *entry = cur; 797 SAVE_HINT(map, map->hint, cur); 798 UVMHIST_LOG(maphist,"<- search got it (0x%x)", 799 cur, 0, 0, 0); 800 return (TRUE); 801 } 802 break; 803 } 804 cur = cur->next; 805 } 806 *entry = cur->prev; 807 SAVE_HINT(map, map->hint, *entry); 808 UVMHIST_LOG(maphist,"<- failed!",0,0,0,0); 809 return (FALSE); 810 } 811 812 /* 813 * uvm_map_findspace: find "length" sized space in "map". 814 * 815 * => "hint" is a hint about where we want it, unless FINDSPACE_FIXED is 816 * set (in which case we insist on using "hint"). 817 * => "result" is VA returned 818 * => uobj/uoffset are to be used to handle VAC alignment, if required 819 * => if `align' is non-zero, we attempt to align to that value. 820 * => caller must at least have read-locked map 821 * => returns NULL on failure, or pointer to prev. map entry if success 822 * => note this is a cross between the old vm_map_findspace and vm_map_find 823 */ 824 825 vm_map_entry_t 826 uvm_map_findspace(map, hint, length, result, uobj, uoffset, align, flags) 827 vm_map_t map; 828 vaddr_t hint; 829 vsize_t length; 830 vaddr_t *result; /* OUT */ 831 struct uvm_object *uobj; 832 voff_t uoffset; 833 vsize_t align; 834 int flags; 835 { 836 vm_map_entry_t entry, next, tmp; 837 vaddr_t end, orig_hint; 838 UVMHIST_FUNC("uvm_map_findspace"); 839 UVMHIST_CALLED(maphist); 840 841 UVMHIST_LOG(maphist, "(map=0x%x, hint=0x%x, len=%d, flags=0x%x)", 842 map, hint, length, flags); 843 KASSERT((align & (align - 1)) == 0); 844 KASSERT((flags & UVM_FLAG_FIXED) == 0 || align == 0); 845 846 /* 847 * remember the original hint. if we are aligning, then we 848 * may have to try again with no alignment constraint if 849 * we fail the first time. 850 */ 851 852 orig_hint = hint; 853 if (hint < map->min_offset) { /* check ranges ... */ 854 if (flags & UVM_FLAG_FIXED) { 855 UVMHIST_LOG(maphist,"<- VA below map range",0,0,0,0); 856 return(NULL); 857 } 858 hint = map->min_offset; 859 } 860 if (hint > map->max_offset) { 861 UVMHIST_LOG(maphist,"<- VA 0x%x > range [0x%x->0x%x]", 862 hint, map->min_offset, map->max_offset, 0); 863 return(NULL); 864 } 865 866 /* 867 * Look for the first possible address; if there's already 868 * something at this address, we have to start after it. 869 */ 870 871 if ((flags & UVM_FLAG_FIXED) == 0 && hint == map->min_offset) { 872 if ((entry = map->first_free) != &map->header) 873 hint = entry->end; 874 } else { 875 if (uvm_map_lookup_entry(map, hint, &tmp)) { 876 /* "hint" address already in use ... */ 877 if (flags & UVM_FLAG_FIXED) { 878 UVMHIST_LOG(maphist,"<- fixed & VA in use", 879 0, 0, 0, 0); 880 return(NULL); 881 } 882 hint = tmp->end; 883 } 884 entry = tmp; 885 } 886 887 /* 888 * Look through the rest of the map, trying to fit a new region in 889 * the gap between existing regions, or after the very last region. 890 * note: entry->end = base VA of current gap, 891 * next->start = VA of end of current gap 892 */ 893 for (;; hint = (entry = next)->end) { 894 /* 895 * Find the end of the proposed new region. Be sure we didn't 896 * go beyond the end of the map, or wrap around the address; 897 * if so, we lose. Otherwise, if this is the last entry, or 898 * if the proposed new region fits before the next entry, we 899 * win. 900 */ 901 902 #ifdef PMAP_PREFER 903 /* 904 * push hint forward as needed to avoid VAC alias problems. 905 * we only do this if a valid offset is specified. 906 */ 907 if ((flags & UVM_FLAG_FIXED) == 0 && 908 uoffset != UVM_UNKNOWN_OFFSET) 909 PMAP_PREFER(uoffset, &hint); 910 #endif 911 if (align != 0) { 912 if ((hint & (align - 1)) != 0) 913 hint = roundup(hint, align); 914 /* 915 * XXX Should we PMAP_PREFER() here again? 916 */ 917 } 918 end = hint + length; 919 if (end > map->max_offset || end < hint) { 920 UVMHIST_LOG(maphist,"<- failed (off end)", 0,0,0,0); 921 if (align != 0) { 922 UVMHIST_LOG(maphist, 923 "calling recursively, no align", 924 0,0,0,0); 925 return (uvm_map_findspace(map, orig_hint, 926 length, result, uobj, uoffset, 0, flags)); 927 } 928 return (NULL); 929 } 930 next = entry->next; 931 if (next == &map->header || next->start >= end) 932 break; 933 if (flags & UVM_FLAG_FIXED) { 934 UVMHIST_LOG(maphist,"<- fixed mapping failed", 0,0,0,0); 935 return(NULL); /* only one shot at it ... */ 936 } 937 } 938 SAVE_HINT(map, map->hint, entry); 939 *result = hint; 940 UVMHIST_LOG(maphist,"<- got it! (result=0x%x)", hint, 0,0,0); 941 return (entry); 942 } 943 944 /* 945 * U N M A P - m a i n h e l p e r f u n c t i o n s 946 */ 947 948 /* 949 * uvm_unmap_remove: remove mappings from a vm_map (from "start" up to "stop") 950 * 951 * => caller must check alignment and size 952 * => map must be locked by caller 953 * => we return a list of map entries that we've remove from the map 954 * in "entry_list" 955 */ 956 957 int 958 uvm_unmap_remove(map, start, end, entry_list) 959 vm_map_t map; 960 vaddr_t start,end; 961 vm_map_entry_t *entry_list; /* OUT */ 962 { 963 vm_map_entry_t entry, first_entry, next; 964 vaddr_t len; 965 UVMHIST_FUNC("uvm_unmap_remove"); 966 UVMHIST_CALLED(maphist); 967 968 UVMHIST_LOG(maphist,"(map=0x%x, start=0x%x, end=0x%x)", 969 map, start, end, 0); 970 971 VM_MAP_RANGE_CHECK(map, start, end); 972 973 /* 974 * find first entry 975 */ 976 if (uvm_map_lookup_entry(map, start, &first_entry) == TRUE) { 977 /* clip and go... */ 978 entry = first_entry; 979 UVM_MAP_CLIP_START(map, entry, start); 980 /* critical! prevents stale hint */ 981 SAVE_HINT(map, entry, entry->prev); 982 983 } else { 984 entry = first_entry->next; 985 } 986 987 /* 988 * Save the free space hint 989 */ 990 991 if (map->first_free->start >= start) 992 map->first_free = entry->prev; 993 994 /* 995 * note: we now re-use first_entry for a different task. we remove 996 * a number of map entries from the map and save them in a linked 997 * list headed by "first_entry". once we remove them from the map 998 * the caller should unlock the map and drop the references to the 999 * backing objects [c.f. uvm_unmap_detach]. the object is to 1000 * separate unmapping from reference dropping. why? 1001 * [1] the map has to be locked for unmapping 1002 * [2] the map need not be locked for reference dropping 1003 * [3] dropping references may trigger pager I/O, and if we hit 1004 * a pager that does synchronous I/O we may have to wait for it. 1005 * [4] we would like all waiting for I/O to occur with maps unlocked 1006 * so that we don't block other threads. 1007 */ 1008 first_entry = NULL; 1009 *entry_list = NULL; /* to be safe */ 1010 1011 /* 1012 * break up the area into map entry sized regions and unmap. note 1013 * that all mappings have to be removed before we can even consider 1014 * dropping references to amaps or VM objects (otherwise we could end 1015 * up with a mapping to a page on the free list which would be very bad) 1016 */ 1017 1018 while ((entry != &map->header) && (entry->start < end)) { 1019 1020 UVM_MAP_CLIP_END(map, entry, end); 1021 next = entry->next; 1022 len = entry->end - entry->start; 1023 1024 /* 1025 * unwire before removing addresses from the pmap; otherwise 1026 * unwiring will put the entries back into the pmap (XXX). 1027 */ 1028 1029 if (VM_MAPENT_ISWIRED(entry)) 1030 uvm_map_entry_unwire(map, entry); 1031 1032 /* 1033 * special case: handle mappings to anonymous kernel objects. 1034 * we want to free these pages right away... 1035 */ 1036 if (UVM_ET_ISOBJ(entry) && 1037 UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) { 1038 KASSERT(vm_map_pmap(map) == pmap_kernel()); 1039 1040 /* 1041 * note: kernel object mappings are currently used in 1042 * two ways: 1043 * [1] "normal" mappings of pages in the kernel object 1044 * [2] uvm_km_valloc'd allocations in which we 1045 * pmap_enter in some non-kernel-object page 1046 * (e.g. vmapbuf). 1047 * 1048 * for case [1], we need to remove the mapping from 1049 * the pmap and then remove the page from the kernel 1050 * object (because, once pages in a kernel object are 1051 * unmapped they are no longer needed, unlike, say, 1052 * a vnode where you might want the data to persist 1053 * until flushed out of a queue). 1054 * 1055 * for case [2], we need to remove the mapping from 1056 * the pmap. there shouldn't be any pages at the 1057 * specified offset in the kernel object [but it 1058 * doesn't hurt to call uvm_km_pgremove just to be 1059 * safe?] 1060 * 1061 * uvm_km_pgremove currently does the following: 1062 * for pages in the kernel object in range: 1063 * - drops the swap slot 1064 * - uvm_pagefree the page 1065 * 1066 * note there is version of uvm_km_pgremove() that 1067 * is used for "intrsafe" objects. 1068 */ 1069 1070 /* 1071 * remove mappings from pmap and drop the pages 1072 * from the object. offsets are always relative 1073 * to vm_map_min(kernel_map). 1074 */ 1075 if (UVM_OBJ_IS_INTRSAFE_OBJECT(entry->object.uvm_obj)) { 1076 pmap_kremove(entry->start, len); 1077 uvm_km_pgremove_intrsafe(entry->object.uvm_obj, 1078 entry->start - vm_map_min(kernel_map), 1079 entry->end - vm_map_min(kernel_map)); 1080 } else { 1081 pmap_remove(pmap_kernel(), entry->start, 1082 entry->start + len); 1083 uvm_km_pgremove(entry->object.uvm_obj, 1084 entry->start - vm_map_min(kernel_map), 1085 entry->end - vm_map_min(kernel_map)); 1086 } 1087 1088 /* 1089 * null out kernel_object reference, we've just 1090 * dropped it 1091 */ 1092 entry->etype &= ~UVM_ET_OBJ; 1093 entry->object.uvm_obj = NULL; /* to be safe */ 1094 1095 } else { 1096 /* 1097 * remove mappings the standard way. 1098 */ 1099 pmap_remove(map->pmap, entry->start, entry->end); 1100 } 1101 1102 /* 1103 * remove entry from map and put it on our list of entries 1104 * that we've nuked. then go do next entry. 1105 */ 1106 UVMHIST_LOG(maphist, " removed map entry 0x%x", entry, 0, 0,0); 1107 1108 /* critical! prevents stale hint */ 1109 SAVE_HINT(map, entry, entry->prev); 1110 1111 uvm_map_entry_unlink(map, entry); 1112 map->size -= len; 1113 entry->next = first_entry; 1114 first_entry = entry; 1115 entry = next; /* next entry, please */ 1116 } 1117 1118 /* 1119 * now we've cleaned up the map and are ready for the caller to drop 1120 * references to the mapped objects. 1121 */ 1122 1123 *entry_list = first_entry; 1124 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0); 1125 return(KERN_SUCCESS); 1126 } 1127 1128 /* 1129 * uvm_unmap_detach: drop references in a chain of map entries 1130 * 1131 * => we will free the map entries as we traverse the list. 1132 */ 1133 1134 void 1135 uvm_unmap_detach(first_entry, flags) 1136 vm_map_entry_t first_entry; 1137 int flags; 1138 { 1139 vm_map_entry_t next_entry; 1140 UVMHIST_FUNC("uvm_unmap_detach"); UVMHIST_CALLED(maphist); 1141 1142 while (first_entry) { 1143 KASSERT(!VM_MAPENT_ISWIRED(first_entry)); 1144 UVMHIST_LOG(maphist, 1145 " detach 0x%x: amap=0x%x, obj=0x%x, submap?=%d", 1146 first_entry, first_entry->aref.ar_amap, 1147 first_entry->object.uvm_obj, 1148 UVM_ET_ISSUBMAP(first_entry)); 1149 1150 /* 1151 * drop reference to amap, if we've got one 1152 */ 1153 1154 if (first_entry->aref.ar_amap) 1155 uvm_map_unreference_amap(first_entry, flags); 1156 1157 /* 1158 * drop reference to our backing object, if we've got one 1159 */ 1160 1161 if (UVM_ET_ISSUBMAP(first_entry)) { 1162 /* ... unlikely to happen, but play it safe */ 1163 uvm_map_deallocate(first_entry->object.sub_map); 1164 } else { 1165 if (UVM_ET_ISOBJ(first_entry) && 1166 first_entry->object.uvm_obj->pgops->pgo_detach) 1167 first_entry->object.uvm_obj->pgops-> 1168 pgo_detach(first_entry->object.uvm_obj); 1169 } 1170 1171 next_entry = first_entry->next; 1172 uvm_mapent_free(first_entry); 1173 first_entry = next_entry; 1174 } 1175 UVMHIST_LOG(maphist, "<- done", 0,0,0,0); 1176 } 1177 1178 /* 1179 * E X T R A C T I O N F U N C T I O N S 1180 */ 1181 1182 /* 1183 * uvm_map_reserve: reserve space in a vm_map for future use. 1184 * 1185 * => we reserve space in a map by putting a dummy map entry in the 1186 * map (dummy means obj=NULL, amap=NULL, prot=VM_PROT_NONE) 1187 * => map should be unlocked (we will write lock it) 1188 * => we return true if we were able to reserve space 1189 * => XXXCDC: should be inline? 1190 */ 1191 1192 int 1193 uvm_map_reserve(map, size, offset, align, raddr) 1194 vm_map_t map; 1195 vsize_t size; 1196 vaddr_t offset; /* hint for pmap_prefer */ 1197 vsize_t align; /* alignment hint */ 1198 vaddr_t *raddr; /* IN:hint, OUT: reserved VA */ 1199 { 1200 UVMHIST_FUNC("uvm_map_reserve"); UVMHIST_CALLED(maphist); 1201 1202 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x, offset=0x%x,addr=0x%x)", 1203 map,size,offset,raddr); 1204 1205 size = round_page(size); 1206 if (*raddr < vm_map_min(map)) 1207 *raddr = vm_map_min(map); /* hint */ 1208 1209 /* 1210 * reserve some virtual space. 1211 */ 1212 1213 if (uvm_map(map, raddr, size, NULL, offset, 0, 1214 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, 1215 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != KERN_SUCCESS) { 1216 UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0); 1217 return (FALSE); 1218 } 1219 1220 UVMHIST_LOG(maphist, "<- done (*raddr=0x%x)", *raddr,0,0,0); 1221 return (TRUE); 1222 } 1223 1224 /* 1225 * uvm_map_replace: replace a reserved (blank) area of memory with 1226 * real mappings. 1227 * 1228 * => caller must WRITE-LOCK the map 1229 * => we return TRUE if replacement was a success 1230 * => we expect the newents chain to have nnewents entrys on it and 1231 * we expect newents->prev to point to the last entry on the list 1232 * => note newents is allowed to be NULL 1233 */ 1234 1235 int 1236 uvm_map_replace(map, start, end, newents, nnewents) 1237 struct vm_map *map; 1238 vaddr_t start, end; 1239 vm_map_entry_t newents; 1240 int nnewents; 1241 { 1242 vm_map_entry_t oldent, last; 1243 1244 /* 1245 * first find the blank map entry at the specified address 1246 */ 1247 1248 if (!uvm_map_lookup_entry(map, start, &oldent)) { 1249 return(FALSE); 1250 } 1251 1252 /* 1253 * check to make sure we have a proper blank entry 1254 */ 1255 1256 if (oldent->start != start || oldent->end != end || 1257 oldent->object.uvm_obj != NULL || oldent->aref.ar_amap != NULL) { 1258 return (FALSE); 1259 } 1260 1261 #ifdef DIAGNOSTIC 1262 /* 1263 * sanity check the newents chain 1264 */ 1265 { 1266 vm_map_entry_t tmpent = newents; 1267 int nent = 0; 1268 vaddr_t cur = start; 1269 1270 while (tmpent) { 1271 nent++; 1272 if (tmpent->start < cur) 1273 panic("uvm_map_replace1"); 1274 if (tmpent->start > tmpent->end || tmpent->end > end) { 1275 printf("tmpent->start=0x%lx, tmpent->end=0x%lx, end=0x%lx\n", 1276 tmpent->start, tmpent->end, end); 1277 panic("uvm_map_replace2"); 1278 } 1279 cur = tmpent->end; 1280 if (tmpent->next) { 1281 if (tmpent->next->prev != tmpent) 1282 panic("uvm_map_replace3"); 1283 } else { 1284 if (newents->prev != tmpent) 1285 panic("uvm_map_replace4"); 1286 } 1287 tmpent = tmpent->next; 1288 } 1289 if (nent != nnewents) 1290 panic("uvm_map_replace5"); 1291 } 1292 #endif 1293 1294 /* 1295 * map entry is a valid blank! replace it. (this does all the 1296 * work of map entry link/unlink...). 1297 */ 1298 1299 if (newents) { 1300 1301 last = newents->prev; /* we expect this */ 1302 1303 /* critical: flush stale hints out of map */ 1304 SAVE_HINT(map, map->hint, newents); 1305 if (map->first_free == oldent) 1306 map->first_free = last; 1307 1308 last->next = oldent->next; 1309 last->next->prev = last; 1310 newents->prev = oldent->prev; 1311 newents->prev->next = newents; 1312 map->nentries = map->nentries + (nnewents - 1); 1313 1314 } else { 1315 1316 /* critical: flush stale hints out of map */ 1317 SAVE_HINT(map, map->hint, oldent->prev); 1318 if (map->first_free == oldent) 1319 map->first_free = oldent->prev; 1320 1321 /* NULL list of new entries: just remove the old one */ 1322 uvm_map_entry_unlink(map, oldent); 1323 } 1324 1325 1326 /* 1327 * now we can free the old blank entry, unlock the map and return. 1328 */ 1329 1330 uvm_mapent_free(oldent); 1331 return(TRUE); 1332 } 1333 1334 /* 1335 * uvm_map_extract: extract a mapping from a map and put it somewhere 1336 * (maybe removing the old mapping) 1337 * 1338 * => maps should be unlocked (we will write lock them) 1339 * => returns 0 on success, error code otherwise 1340 * => start must be page aligned 1341 * => len must be page sized 1342 * => flags: 1343 * UVM_EXTRACT_REMOVE: remove mappings from srcmap 1344 * UVM_EXTRACT_CONTIG: abort if unmapped area (advisory only) 1345 * UVM_EXTRACT_QREF: for a temporary extraction do quick obj refs 1346 * UVM_EXTRACT_FIXPROT: set prot to maxprot as we go 1347 * >>>NOTE: if you set REMOVE, you are not allowed to use CONTIG or QREF!<<< 1348 * >>>NOTE: QREF's must be unmapped via the QREF path, thus should only 1349 * be used from within the kernel in a kernel level map <<< 1350 */ 1351 1352 int 1353 uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags) 1354 vm_map_t srcmap, dstmap; 1355 vaddr_t start, *dstaddrp; 1356 vsize_t len; 1357 int flags; 1358 { 1359 vaddr_t dstaddr, end, newend, oldoffset, fudge, orig_fudge, 1360 oldstart; 1361 vm_map_entry_t chain, endchain, entry, orig_entry, newentry, deadentry; 1362 vm_map_entry_t oldentry; 1363 vsize_t elen; 1364 int nchain, error, copy_ok; 1365 UVMHIST_FUNC("uvm_map_extract"); UVMHIST_CALLED(maphist); 1366 1367 UVMHIST_LOG(maphist,"(srcmap=0x%x,start=0x%x, len=0x%x", srcmap, start, 1368 len,0); 1369 UVMHIST_LOG(maphist," ...,dstmap=0x%x, flags=0x%x)", dstmap,flags,0,0); 1370 1371 /* 1372 * step 0: sanity check: start must be on a page boundary, length 1373 * must be page sized. can't ask for CONTIG/QREF if you asked for 1374 * REMOVE. 1375 */ 1376 1377 KASSERT((start & PAGE_MASK) == 0 && (len & PAGE_MASK) == 0); 1378 KASSERT((flags & UVM_EXTRACT_REMOVE) == 0 || 1379 (flags & (UVM_EXTRACT_CONTIG|UVM_EXTRACT_QREF)) == 0); 1380 1381 /* 1382 * step 1: reserve space in the target map for the extracted area 1383 */ 1384 1385 dstaddr = vm_map_min(dstmap); 1386 if (uvm_map_reserve(dstmap, len, start, 0, &dstaddr) == FALSE) 1387 return(ENOMEM); 1388 *dstaddrp = dstaddr; /* pass address back to caller */ 1389 UVMHIST_LOG(maphist, " dstaddr=0x%x", dstaddr,0,0,0); 1390 1391 /* 1392 * step 2: setup for the extraction process loop by init'ing the 1393 * map entry chain, locking src map, and looking up the first useful 1394 * entry in the map. 1395 */ 1396 1397 end = start + len; 1398 newend = dstaddr + len; 1399 chain = endchain = NULL; 1400 nchain = 0; 1401 vm_map_lock(srcmap); 1402 1403 if (uvm_map_lookup_entry(srcmap, start, &entry)) { 1404 1405 /* "start" is within an entry */ 1406 if (flags & UVM_EXTRACT_QREF) { 1407 1408 /* 1409 * for quick references we don't clip the entry, so 1410 * the entry may map space "before" the starting 1411 * virtual address... this is the "fudge" factor 1412 * (which can be non-zero only the first time 1413 * through the "while" loop in step 3). 1414 */ 1415 1416 fudge = start - entry->start; 1417 } else { 1418 1419 /* 1420 * normal reference: we clip the map to fit (thus 1421 * fudge is zero) 1422 */ 1423 1424 UVM_MAP_CLIP_START(srcmap, entry, start); 1425 SAVE_HINT(srcmap, srcmap->hint, entry->prev); 1426 fudge = 0; 1427 } 1428 } else { 1429 1430 /* "start" is not within an entry ... skip to next entry */ 1431 if (flags & UVM_EXTRACT_CONTIG) { 1432 error = EINVAL; 1433 goto bad; /* definite hole here ... */ 1434 } 1435 1436 entry = entry->next; 1437 fudge = 0; 1438 } 1439 1440 /* save values from srcmap for step 6 */ 1441 orig_entry = entry; 1442 orig_fudge = fudge; 1443 1444 /* 1445 * step 3: now start looping through the map entries, extracting 1446 * as we go. 1447 */ 1448 1449 while (entry->start < end && entry != &srcmap->header) { 1450 1451 /* if we are not doing a quick reference, clip it */ 1452 if ((flags & UVM_EXTRACT_QREF) == 0) 1453 UVM_MAP_CLIP_END(srcmap, entry, end); 1454 1455 /* clear needs_copy (allow chunking) */ 1456 if (UVM_ET_ISNEEDSCOPY(entry)) { 1457 if (fudge) 1458 oldstart = entry->start; 1459 else 1460 oldstart = 0; /* XXX: gcc */ 1461 amap_copy(srcmap, entry, M_NOWAIT, TRUE, start, end); 1462 if (UVM_ET_ISNEEDSCOPY(entry)) { /* failed? */ 1463 error = ENOMEM; 1464 goto bad; 1465 } 1466 1467 /* amap_copy could clip (during chunk)! update fudge */ 1468 if (fudge) { 1469 fudge = fudge - (entry->start - oldstart); 1470 orig_fudge = fudge; 1471 } 1472 } 1473 1474 /* calculate the offset of this from "start" */ 1475 oldoffset = (entry->start + fudge) - start; 1476 1477 /* allocate a new map entry */ 1478 newentry = uvm_mapent_alloc(dstmap); 1479 if (newentry == NULL) { 1480 error = ENOMEM; 1481 goto bad; 1482 } 1483 1484 /* set up new map entry */ 1485 newentry->next = NULL; 1486 newentry->prev = endchain; 1487 newentry->start = dstaddr + oldoffset; 1488 newentry->end = 1489 newentry->start + (entry->end - (entry->start + fudge)); 1490 if (newentry->end > newend || newentry->end < newentry->start) 1491 newentry->end = newend; 1492 newentry->object.uvm_obj = entry->object.uvm_obj; 1493 if (newentry->object.uvm_obj) { 1494 if (newentry->object.uvm_obj->pgops->pgo_reference) 1495 newentry->object.uvm_obj->pgops-> 1496 pgo_reference(newentry->object.uvm_obj); 1497 newentry->offset = entry->offset + fudge; 1498 } else { 1499 newentry->offset = 0; 1500 } 1501 newentry->etype = entry->etype; 1502 newentry->protection = (flags & UVM_EXTRACT_FIXPROT) ? 1503 entry->max_protection : entry->protection; 1504 newentry->max_protection = entry->max_protection; 1505 newentry->inheritance = entry->inheritance; 1506 newentry->wired_count = 0; 1507 newentry->aref.ar_amap = entry->aref.ar_amap; 1508 if (newentry->aref.ar_amap) { 1509 newentry->aref.ar_pageoff = 1510 entry->aref.ar_pageoff + (fudge >> PAGE_SHIFT); 1511 uvm_map_reference_amap(newentry, AMAP_SHARED | 1512 ((flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0)); 1513 } else { 1514 newentry->aref.ar_pageoff = 0; 1515 } 1516 newentry->advice = entry->advice; 1517 1518 /* now link it on the chain */ 1519 nchain++; 1520 if (endchain == NULL) { 1521 chain = endchain = newentry; 1522 } else { 1523 endchain->next = newentry; 1524 endchain = newentry; 1525 } 1526 1527 /* end of 'while' loop! */ 1528 if ((flags & UVM_EXTRACT_CONTIG) && entry->end < end && 1529 (entry->next == &srcmap->header || 1530 entry->next->start != entry->end)) { 1531 error = EINVAL; 1532 goto bad; 1533 } 1534 entry = entry->next; 1535 fudge = 0; 1536 } 1537 1538 /* 1539 * step 4: close off chain (in format expected by uvm_map_replace) 1540 */ 1541 1542 if (chain) 1543 chain->prev = endchain; 1544 1545 /* 1546 * step 5: attempt to lock the dest map so we can pmap_copy. 1547 * note usage of copy_ok: 1548 * 1 => dstmap locked, pmap_copy ok, and we "replace" here (step 5) 1549 * 0 => dstmap unlocked, NO pmap_copy, and we will "replace" in step 7 1550 */ 1551 1552 if (srcmap == dstmap || vm_map_lock_try(dstmap) == TRUE) { 1553 copy_ok = 1; 1554 if (!uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain, 1555 nchain)) { 1556 if (srcmap != dstmap) 1557 vm_map_unlock(dstmap); 1558 error = EIO; 1559 goto bad; 1560 } 1561 } else { 1562 copy_ok = 0; 1563 /* replace defered until step 7 */ 1564 } 1565 1566 /* 1567 * step 6: traverse the srcmap a second time to do the following: 1568 * - if we got a lock on the dstmap do pmap_copy 1569 * - if UVM_EXTRACT_REMOVE remove the entries 1570 * we make use of orig_entry and orig_fudge (saved in step 2) 1571 */ 1572 1573 if (copy_ok || (flags & UVM_EXTRACT_REMOVE)) { 1574 1575 /* purge possible stale hints from srcmap */ 1576 if (flags & UVM_EXTRACT_REMOVE) { 1577 SAVE_HINT(srcmap, srcmap->hint, orig_entry->prev); 1578 if (srcmap->first_free->start >= start) 1579 srcmap->first_free = orig_entry->prev; 1580 } 1581 1582 entry = orig_entry; 1583 fudge = orig_fudge; 1584 deadentry = NULL; /* for UVM_EXTRACT_REMOVE */ 1585 1586 while (entry->start < end && entry != &srcmap->header) { 1587 if (copy_ok) { 1588 oldoffset = (entry->start + fudge) - start; 1589 elen = MIN(end, entry->end) - 1590 (entry->start + fudge); 1591 pmap_copy(dstmap->pmap, srcmap->pmap, 1592 dstaddr + oldoffset, elen, 1593 entry->start + fudge); 1594 } 1595 1596 /* we advance "entry" in the following if statement */ 1597 if (flags & UVM_EXTRACT_REMOVE) { 1598 pmap_remove(srcmap->pmap, entry->start, 1599 entry->end); 1600 oldentry = entry; /* save entry */ 1601 entry = entry->next; /* advance */ 1602 uvm_map_entry_unlink(srcmap, oldentry); 1603 /* add to dead list */ 1604 oldentry->next = deadentry; 1605 deadentry = oldentry; 1606 } else { 1607 entry = entry->next; /* advance */ 1608 } 1609 1610 /* end of 'while' loop */ 1611 fudge = 0; 1612 } 1613 1614 /* 1615 * unlock dstmap. we will dispose of deadentry in 1616 * step 7 if needed 1617 */ 1618 1619 if (copy_ok && srcmap != dstmap) 1620 vm_map_unlock(dstmap); 1621 1622 } 1623 else 1624 deadentry = NULL; /* XXX: gcc */ 1625 1626 /* 1627 * step 7: we are done with the source map, unlock. if copy_ok 1628 * is 0 then we have not replaced the dummy mapping in dstmap yet 1629 * and we need to do so now. 1630 */ 1631 1632 vm_map_unlock(srcmap); 1633 if ((flags & UVM_EXTRACT_REMOVE) && deadentry) 1634 uvm_unmap_detach(deadentry, 0); /* dispose of old entries */ 1635 1636 /* now do the replacement if we didn't do it in step 5 */ 1637 if (copy_ok == 0) { 1638 vm_map_lock(dstmap); 1639 error = uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain, 1640 nchain); 1641 vm_map_unlock(dstmap); 1642 1643 if (error == FALSE) { 1644 error = EIO; 1645 goto bad2; 1646 } 1647 } 1648 return(0); 1649 1650 /* 1651 * bad: failure recovery 1652 */ 1653 bad: 1654 vm_map_unlock(srcmap); 1655 bad2: /* src already unlocked */ 1656 if (chain) 1657 uvm_unmap_detach(chain, 1658 (flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0); 1659 uvm_unmap(dstmap, dstaddr, dstaddr+len); /* ??? */ 1660 return(error); 1661 } 1662 1663 /* end of extraction functions */ 1664 1665 /* 1666 * uvm_map_submap: punch down part of a map into a submap 1667 * 1668 * => only the kernel_map is allowed to be submapped 1669 * => the purpose of submapping is to break up the locking granularity 1670 * of a larger map 1671 * => the range specified must have been mapped previously with a uvm_map() 1672 * call [with uobj==NULL] to create a blank map entry in the main map. 1673 * [And it had better still be blank!] 1674 * => maps which contain submaps should never be copied or forked. 1675 * => to remove a submap, use uvm_unmap() on the main map 1676 * and then uvm_map_deallocate() the submap. 1677 * => main map must be unlocked. 1678 * => submap must have been init'd and have a zero reference count. 1679 * [need not be locked as we don't actually reference it] 1680 */ 1681 1682 int 1683 uvm_map_submap(map, start, end, submap) 1684 vm_map_t map, submap; 1685 vaddr_t start, end; 1686 { 1687 vm_map_entry_t entry; 1688 int result; 1689 1690 vm_map_lock(map); 1691 1692 VM_MAP_RANGE_CHECK(map, start, end); 1693 1694 if (uvm_map_lookup_entry(map, start, &entry)) { 1695 UVM_MAP_CLIP_START(map, entry, start); 1696 UVM_MAP_CLIP_END(map, entry, end); /* to be safe */ 1697 } else { 1698 entry = NULL; 1699 } 1700 1701 if (entry != NULL && 1702 entry->start == start && entry->end == end && 1703 entry->object.uvm_obj == NULL && entry->aref.ar_amap == NULL && 1704 !UVM_ET_ISCOPYONWRITE(entry) && !UVM_ET_ISNEEDSCOPY(entry)) { 1705 entry->etype |= UVM_ET_SUBMAP; 1706 entry->object.sub_map = submap; 1707 entry->offset = 0; 1708 uvm_map_reference(submap); 1709 result = KERN_SUCCESS; 1710 } else { 1711 result = KERN_INVALID_ARGUMENT; 1712 } 1713 vm_map_unlock(map); 1714 return(result); 1715 } 1716 1717 1718 /* 1719 * uvm_map_protect: change map protection 1720 * 1721 * => set_max means set max_protection. 1722 * => map must be unlocked. 1723 */ 1724 1725 #define MASK(entry) (UVM_ET_ISCOPYONWRITE(entry) ? \ 1726 ~VM_PROT_WRITE : VM_PROT_ALL) 1727 #define max(a,b) ((a) > (b) ? (a) : (b)) 1728 1729 int 1730 uvm_map_protect(map, start, end, new_prot, set_max) 1731 vm_map_t map; 1732 vaddr_t start, end; 1733 vm_prot_t new_prot; 1734 boolean_t set_max; 1735 { 1736 vm_map_entry_t current, entry; 1737 int rv = KERN_SUCCESS; 1738 UVMHIST_FUNC("uvm_map_protect"); UVMHIST_CALLED(maphist); 1739 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_prot=0x%x)", 1740 map, start, end, new_prot); 1741 1742 vm_map_lock(map); 1743 1744 VM_MAP_RANGE_CHECK(map, start, end); 1745 1746 if (uvm_map_lookup_entry(map, start, &entry)) { 1747 UVM_MAP_CLIP_START(map, entry, start); 1748 } else { 1749 entry = entry->next; 1750 } 1751 1752 /* 1753 * make a first pass to check for protection violations. 1754 */ 1755 1756 current = entry; 1757 while ((current != &map->header) && (current->start < end)) { 1758 if (UVM_ET_ISSUBMAP(current)) { 1759 rv = KERN_INVALID_ARGUMENT; 1760 goto out; 1761 } 1762 if ((new_prot & current->max_protection) != new_prot) { 1763 rv = KERN_PROTECTION_FAILURE; 1764 goto out; 1765 } 1766 current = current->next; 1767 } 1768 1769 /* go back and fix up protections (no need to clip this time). */ 1770 1771 current = entry; 1772 1773 while ((current != &map->header) && (current->start < end)) { 1774 vm_prot_t old_prot; 1775 1776 UVM_MAP_CLIP_END(map, current, end); 1777 1778 old_prot = current->protection; 1779 if (set_max) 1780 current->protection = 1781 (current->max_protection = new_prot) & old_prot; 1782 else 1783 current->protection = new_prot; 1784 1785 /* 1786 * update physical map if necessary. worry about copy-on-write 1787 * here -- CHECK THIS XXX 1788 */ 1789 1790 if (current->protection != old_prot) { 1791 /* update pmap! */ 1792 pmap_protect(map->pmap, current->start, current->end, 1793 current->protection & MASK(entry)); 1794 } 1795 1796 /* 1797 * If the map is configured to lock any future mappings, 1798 * wire this entry now if the old protection was VM_PROT_NONE 1799 * and the new protection is not VM_PROT_NONE. 1800 */ 1801 1802 if ((map->flags & VM_MAP_WIREFUTURE) != 0 && 1803 VM_MAPENT_ISWIRED(entry) == 0 && 1804 old_prot == VM_PROT_NONE && 1805 new_prot != VM_PROT_NONE) { 1806 if (uvm_map_pageable(map, entry->start, 1807 entry->end, FALSE, 1808 UVM_LK_ENTER|UVM_LK_EXIT) != KERN_SUCCESS) { 1809 /* 1810 * If locking the entry fails, remember the 1811 * error if it's the first one. Note we 1812 * still continue setting the protection in 1813 * the map, but will return the resource 1814 * shortage condition regardless. 1815 * 1816 * XXX Ignore what the actual error is, 1817 * XXX just call it a resource shortage 1818 * XXX so that it doesn't get confused 1819 * XXX what uvm_map_protect() itself would 1820 * XXX normally return. 1821 */ 1822 rv = KERN_RESOURCE_SHORTAGE; 1823 } 1824 } 1825 1826 current = current->next; 1827 } 1828 1829 out: 1830 vm_map_unlock(map); 1831 UVMHIST_LOG(maphist, "<- done, rv=%d",rv,0,0,0); 1832 return (rv); 1833 } 1834 1835 #undef max 1836 #undef MASK 1837 1838 /* 1839 * uvm_map_inherit: set inheritance code for range of addrs in map. 1840 * 1841 * => map must be unlocked 1842 * => note that the inherit code is used during a "fork". see fork 1843 * code for details. 1844 */ 1845 1846 int 1847 uvm_map_inherit(map, start, end, new_inheritance) 1848 vm_map_t map; 1849 vaddr_t start; 1850 vaddr_t end; 1851 vm_inherit_t new_inheritance; 1852 { 1853 vm_map_entry_t entry, temp_entry; 1854 UVMHIST_FUNC("uvm_map_inherit"); UVMHIST_CALLED(maphist); 1855 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_inh=0x%x)", 1856 map, start, end, new_inheritance); 1857 1858 switch (new_inheritance) { 1859 case MAP_INHERIT_NONE: 1860 case MAP_INHERIT_COPY: 1861 case MAP_INHERIT_SHARE: 1862 break; 1863 default: 1864 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0); 1865 return (KERN_INVALID_ARGUMENT); 1866 } 1867 1868 vm_map_lock(map); 1869 1870 VM_MAP_RANGE_CHECK(map, start, end); 1871 1872 if (uvm_map_lookup_entry(map, start, &temp_entry)) { 1873 entry = temp_entry; 1874 UVM_MAP_CLIP_START(map, entry, start); 1875 } else { 1876 entry = temp_entry->next; 1877 } 1878 1879 while ((entry != &map->header) && (entry->start < end)) { 1880 UVM_MAP_CLIP_END(map, entry, end); 1881 entry->inheritance = new_inheritance; 1882 entry = entry->next; 1883 } 1884 1885 vm_map_unlock(map); 1886 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0); 1887 return(KERN_SUCCESS); 1888 } 1889 1890 /* 1891 * uvm_map_advice: set advice code for range of addrs in map. 1892 * 1893 * => map must be unlocked 1894 */ 1895 1896 int 1897 uvm_map_advice(map, start, end, new_advice) 1898 vm_map_t map; 1899 vaddr_t start; 1900 vaddr_t end; 1901 int new_advice; 1902 { 1903 vm_map_entry_t entry, temp_entry; 1904 UVMHIST_FUNC("uvm_map_advice"); UVMHIST_CALLED(maphist); 1905 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_adv=0x%x)", 1906 map, start, end, new_advice); 1907 1908 vm_map_lock(map); 1909 VM_MAP_RANGE_CHECK(map, start, end); 1910 if (uvm_map_lookup_entry(map, start, &temp_entry)) { 1911 entry = temp_entry; 1912 UVM_MAP_CLIP_START(map, entry, start); 1913 } else { 1914 entry = temp_entry->next; 1915 } 1916 1917 /* 1918 * XXXJRT: disallow holes? 1919 */ 1920 1921 while ((entry != &map->header) && (entry->start < end)) { 1922 UVM_MAP_CLIP_END(map, entry, end); 1923 1924 switch (new_advice) { 1925 case MADV_NORMAL: 1926 case MADV_RANDOM: 1927 case MADV_SEQUENTIAL: 1928 /* nothing special here */ 1929 break; 1930 1931 default: 1932 vm_map_unlock(map); 1933 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0); 1934 return (KERN_INVALID_ARGUMENT); 1935 } 1936 entry->advice = new_advice; 1937 entry = entry->next; 1938 } 1939 1940 vm_map_unlock(map); 1941 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0); 1942 return (KERN_SUCCESS); 1943 } 1944 1945 /* 1946 * uvm_map_pageable: sets the pageability of a range in a map. 1947 * 1948 * => wires map entries. should not be used for transient page locking. 1949 * for that, use uvm_fault_wire()/uvm_fault_unwire() (see uvm_vslock()). 1950 * => regions sepcified as not pageable require lock-down (wired) memory 1951 * and page tables. 1952 * => map must never be read-locked 1953 * => if islocked is TRUE, map is already write-locked 1954 * => we always unlock the map, since we must downgrade to a read-lock 1955 * to call uvm_fault_wire() 1956 * => XXXCDC: check this and try and clean it up. 1957 */ 1958 1959 int 1960 uvm_map_pageable(map, start, end, new_pageable, lockflags) 1961 vm_map_t map; 1962 vaddr_t start, end; 1963 boolean_t new_pageable; 1964 int lockflags; 1965 { 1966 vm_map_entry_t entry, start_entry, failed_entry; 1967 int rv; 1968 #ifdef DIAGNOSTIC 1969 u_int timestamp_save; 1970 #endif 1971 UVMHIST_FUNC("uvm_map_pageable"); UVMHIST_CALLED(maphist); 1972 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_pageable=0x%x)", 1973 map, start, end, new_pageable); 1974 KASSERT(map->flags & VM_MAP_PAGEABLE); 1975 1976 if ((lockflags & UVM_LK_ENTER) == 0) 1977 vm_map_lock(map); 1978 1979 VM_MAP_RANGE_CHECK(map, start, end); 1980 1981 /* 1982 * only one pageability change may take place at one time, since 1983 * uvm_fault_wire assumes it will be called only once for each 1984 * wiring/unwiring. therefore, we have to make sure we're actually 1985 * changing the pageability for the entire region. we do so before 1986 * making any changes. 1987 */ 1988 1989 if (uvm_map_lookup_entry(map, start, &start_entry) == FALSE) { 1990 if ((lockflags & UVM_LK_EXIT) == 0) 1991 vm_map_unlock(map); 1992 1993 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0); 1994 return (KERN_INVALID_ADDRESS); 1995 } 1996 entry = start_entry; 1997 1998 /* 1999 * handle wiring and unwiring separately. 2000 */ 2001 2002 if (new_pageable) { /* unwire */ 2003 UVM_MAP_CLIP_START(map, entry, start); 2004 2005 /* 2006 * unwiring. first ensure that the range to be unwired is 2007 * really wired down and that there are no holes. 2008 */ 2009 2010 while ((entry != &map->header) && (entry->start < end)) { 2011 if (entry->wired_count == 0 || 2012 (entry->end < end && 2013 (entry->next == &map->header || 2014 entry->next->start > entry->end))) { 2015 if ((lockflags & UVM_LK_EXIT) == 0) 2016 vm_map_unlock(map); 2017 UVMHIST_LOG(maphist, 2018 "<- done (INVALID UNWIRE ARG)",0,0,0,0); 2019 return (KERN_INVALID_ARGUMENT); 2020 } 2021 entry = entry->next; 2022 } 2023 2024 /* 2025 * POSIX 1003.1b - a single munlock call unlocks a region, 2026 * regardless of the number of mlock calls made on that 2027 * region. 2028 */ 2029 2030 entry = start_entry; 2031 while ((entry != &map->header) && (entry->start < end)) { 2032 UVM_MAP_CLIP_END(map, entry, end); 2033 if (VM_MAPENT_ISWIRED(entry)) 2034 uvm_map_entry_unwire(map, entry); 2035 entry = entry->next; 2036 } 2037 if ((lockflags & UVM_LK_EXIT) == 0) 2038 vm_map_unlock(map); 2039 UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0); 2040 return(KERN_SUCCESS); 2041 } 2042 2043 /* 2044 * wire case: in two passes [XXXCDC: ugly block of code here] 2045 * 2046 * 1: holding the write lock, we create any anonymous maps that need 2047 * to be created. then we clip each map entry to the region to 2048 * be wired and increment its wiring count. 2049 * 2050 * 2: we downgrade to a read lock, and call uvm_fault_wire to fault 2051 * in the pages for any newly wired area (wired_count == 1). 2052 * 2053 * downgrading to a read lock for uvm_fault_wire avoids a possible 2054 * deadlock with another thread that may have faulted on one of 2055 * the pages to be wired (it would mark the page busy, blocking 2056 * us, then in turn block on the map lock that we hold). because 2057 * of problems in the recursive lock package, we cannot upgrade 2058 * to a write lock in vm_map_lookup. thus, any actions that 2059 * require the write lock must be done beforehand. because we 2060 * keep the read lock on the map, the copy-on-write status of the 2061 * entries we modify here cannot change. 2062 */ 2063 2064 while ((entry != &map->header) && (entry->start < end)) { 2065 if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */ 2066 2067 /* 2068 * perform actions of vm_map_lookup that need the 2069 * write lock on the map: create an anonymous map 2070 * for a copy-on-write region, or an anonymous map 2071 * for a zero-fill region. (XXXCDC: submap case 2072 * ok?) 2073 */ 2074 2075 if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */ 2076 if (UVM_ET_ISNEEDSCOPY(entry) && 2077 ((entry->protection & VM_PROT_WRITE) || 2078 (entry->object.uvm_obj == NULL))) { 2079 amap_copy(map, entry, M_WAITOK, TRUE, 2080 start, end); 2081 /* XXXCDC: wait OK? */ 2082 } 2083 } 2084 } 2085 UVM_MAP_CLIP_START(map, entry, start); 2086 UVM_MAP_CLIP_END(map, entry, end); 2087 entry->wired_count++; 2088 2089 /* 2090 * Check for holes 2091 */ 2092 2093 if (entry->protection == VM_PROT_NONE || 2094 (entry->end < end && 2095 (entry->next == &map->header || 2096 entry->next->start > entry->end))) { 2097 2098 /* 2099 * found one. amap creation actions do not need to 2100 * be undone, but the wired counts need to be restored. 2101 */ 2102 2103 while (entry != &map->header && entry->end > start) { 2104 entry->wired_count--; 2105 entry = entry->prev; 2106 } 2107 if ((lockflags & UVM_LK_EXIT) == 0) 2108 vm_map_unlock(map); 2109 UVMHIST_LOG(maphist,"<- done (INVALID WIRE)",0,0,0,0); 2110 return (KERN_INVALID_ARGUMENT); 2111 } 2112 entry = entry->next; 2113 } 2114 2115 /* 2116 * Pass 2. 2117 */ 2118 2119 #ifdef DIAGNOSTIC 2120 timestamp_save = map->timestamp; 2121 #endif 2122 vm_map_busy(map); 2123 vm_map_downgrade(map); 2124 2125 rv = 0; 2126 entry = start_entry; 2127 while (entry != &map->header && entry->start < end) { 2128 if (entry->wired_count == 1) { 2129 rv = uvm_fault_wire(map, entry->start, entry->end, 2130 entry->protection); 2131 if (rv) { 2132 /* 2133 * wiring failed. break out of the loop. 2134 * we'll clean up the map below, once we 2135 * have a write lock again. 2136 */ 2137 break; 2138 } 2139 } 2140 entry = entry->next; 2141 } 2142 2143 if (rv) { /* failed? */ 2144 2145 /* 2146 * Get back to an exclusive (write) lock. 2147 */ 2148 2149 vm_map_upgrade(map); 2150 vm_map_unbusy(map); 2151 2152 #ifdef DIAGNOSTIC 2153 if (timestamp_save != map->timestamp) 2154 panic("uvm_map_pageable: stale map"); 2155 #endif 2156 2157 /* 2158 * first drop the wiring count on all the entries 2159 * which haven't actually been wired yet. 2160 */ 2161 2162 failed_entry = entry; 2163 while (entry != &map->header && entry->start < end) { 2164 entry->wired_count--; 2165 entry = entry->next; 2166 } 2167 2168 /* 2169 * now, unwire all the entries that were successfully 2170 * wired above. 2171 */ 2172 2173 entry = start_entry; 2174 while (entry != failed_entry) { 2175 entry->wired_count--; 2176 if (VM_MAPENT_ISWIRED(entry) == 0) 2177 uvm_map_entry_unwire(map, entry); 2178 entry = entry->next; 2179 } 2180 if ((lockflags & UVM_LK_EXIT) == 0) 2181 vm_map_unlock(map); 2182 UVMHIST_LOG(maphist, "<- done (RV=%d)", rv,0,0,0); 2183 return(rv); 2184 } 2185 2186 /* We are holding a read lock here. */ 2187 if ((lockflags & UVM_LK_EXIT) == 0) { 2188 vm_map_unbusy(map); 2189 vm_map_unlock_read(map); 2190 } else { 2191 2192 /* 2193 * Get back to an exclusive (write) lock. 2194 */ 2195 2196 vm_map_upgrade(map); 2197 vm_map_unbusy(map); 2198 } 2199 2200 UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0); 2201 return(KERN_SUCCESS); 2202 } 2203 2204 /* 2205 * uvm_map_pageable_all: special case of uvm_map_pageable - affects 2206 * all mapped regions. 2207 * 2208 * => map must not be locked. 2209 * => if no flags are specified, all regions are unwired. 2210 * => XXXJRT: has some of the same problems as uvm_map_pageable() above. 2211 */ 2212 2213 int 2214 uvm_map_pageable_all(map, flags, limit) 2215 vm_map_t map; 2216 int flags; 2217 vsize_t limit; 2218 { 2219 vm_map_entry_t entry, failed_entry; 2220 vsize_t size; 2221 int rv; 2222 #ifdef DIAGNOSTIC 2223 u_int timestamp_save; 2224 #endif 2225 UVMHIST_FUNC("uvm_map_pageable_all"); UVMHIST_CALLED(maphist); 2226 UVMHIST_LOG(maphist,"(map=0x%x,flags=0x%x)", map, flags, 0, 0); 2227 2228 KASSERT(map->flags & VM_MAP_PAGEABLE); 2229 2230 vm_map_lock(map); 2231 2232 /* 2233 * handle wiring and unwiring separately. 2234 */ 2235 2236 if (flags == 0) { /* unwire */ 2237 /* 2238 * POSIX 1003.1b -- munlockall unlocks all regions, 2239 * regardless of how many times mlockall has been called. 2240 */ 2241 for (entry = map->header.next; entry != &map->header; 2242 entry = entry->next) { 2243 if (VM_MAPENT_ISWIRED(entry)) 2244 uvm_map_entry_unwire(map, entry); 2245 } 2246 vm_map_modflags(map, 0, VM_MAP_WIREFUTURE); 2247 vm_map_unlock(map); 2248 UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0); 2249 return (KERN_SUCCESS); 2250 2251 /* 2252 * end of unwire case! 2253 */ 2254 } 2255 2256 if (flags & MCL_FUTURE) { 2257 /* 2258 * must wire all future mappings; remember this. 2259 */ 2260 vm_map_modflags(map, VM_MAP_WIREFUTURE, 0); 2261 } 2262 2263 if ((flags & MCL_CURRENT) == 0) { 2264 /* 2265 * no more work to do! 2266 */ 2267 UVMHIST_LOG(maphist,"<- done (OK no wire)",0,0,0,0); 2268 vm_map_unlock(map); 2269 return (KERN_SUCCESS); 2270 } 2271 2272 /* 2273 * wire case: in three passes [XXXCDC: ugly block of code here] 2274 * 2275 * 1: holding the write lock, count all pages mapped by non-wired 2276 * entries. if this would cause us to go over our limit, we fail. 2277 * 2278 * 2: still holding the write lock, we create any anonymous maps that 2279 * need to be created. then we increment its wiring count. 2280 * 2281 * 3: we downgrade to a read lock, and call uvm_fault_wire to fault 2282 * in the pages for any newly wired area (wired_count == 1). 2283 * 2284 * downgrading to a read lock for uvm_fault_wire avoids a possible 2285 * deadlock with another thread that may have faulted on one of 2286 * the pages to be wired (it would mark the page busy, blocking 2287 * us, then in turn block on the map lock that we hold). because 2288 * of problems in the recursive lock package, we cannot upgrade 2289 * to a write lock in vm_map_lookup. thus, any actions that 2290 * require the write lock must be done beforehand. because we 2291 * keep the read lock on the map, the copy-on-write status of the 2292 * entries we modify here cannot change. 2293 */ 2294 2295 for (size = 0, entry = map->header.next; entry != &map->header; 2296 entry = entry->next) { 2297 if (entry->protection != VM_PROT_NONE && 2298 VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */ 2299 size += entry->end - entry->start; 2300 } 2301 } 2302 2303 if (atop(size) + uvmexp.wired > uvmexp.wiredmax) { 2304 vm_map_unlock(map); 2305 return (KERN_NO_SPACE); /* XXX overloaded */ 2306 } 2307 2308 /* XXX non-pmap_wired_count case must be handled by caller */ 2309 #ifdef pmap_wired_count 2310 if (limit != 0 && 2311 (size + ptoa(pmap_wired_count(vm_map_pmap(map))) > limit)) { 2312 vm_map_unlock(map); 2313 return (KERN_NO_SPACE); /* XXX overloaded */ 2314 } 2315 #endif 2316 2317 /* 2318 * Pass 2. 2319 */ 2320 2321 for (entry = map->header.next; entry != &map->header; 2322 entry = entry->next) { 2323 if (entry->protection == VM_PROT_NONE) 2324 continue; 2325 if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */ 2326 /* 2327 * perform actions of vm_map_lookup that need the 2328 * write lock on the map: create an anonymous map 2329 * for a copy-on-write region, or an anonymous map 2330 * for a zero-fill region. (XXXCDC: submap case 2331 * ok?) 2332 */ 2333 if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */ 2334 if (UVM_ET_ISNEEDSCOPY(entry) && 2335 ((entry->protection & VM_PROT_WRITE) || 2336 (entry->object.uvm_obj == NULL))) { 2337 amap_copy(map, entry, M_WAITOK, TRUE, 2338 entry->start, entry->end); 2339 /* XXXCDC: wait OK? */ 2340 } 2341 } 2342 } 2343 entry->wired_count++; 2344 } 2345 2346 /* 2347 * Pass 3. 2348 */ 2349 2350 #ifdef DIAGNOSTIC 2351 timestamp_save = map->timestamp; 2352 #endif 2353 vm_map_busy(map); 2354 vm_map_downgrade(map); 2355 2356 rv = KERN_SUCCESS; 2357 for (entry = map->header.next; entry != &map->header; 2358 entry = entry->next) { 2359 if (entry->wired_count == 1) { 2360 rv = uvm_fault_wire(map, entry->start, entry->end, 2361 entry->protection); 2362 if (rv) { 2363 /* 2364 * wiring failed. break out of the loop. 2365 * we'll clean up the map below, once we 2366 * have a write lock again. 2367 */ 2368 break; 2369 } 2370 } 2371 } 2372 2373 if (rv) { /* failed? */ 2374 /* 2375 * Get back an exclusive (write) lock. 2376 */ 2377 vm_map_upgrade(map); 2378 vm_map_unbusy(map); 2379 2380 #ifdef DIAGNOSTIC 2381 if (timestamp_save != map->timestamp) 2382 panic("uvm_map_pageable_all: stale map"); 2383 #endif 2384 2385 /* 2386 * first drop the wiring count on all the entries 2387 * which haven't actually been wired yet. 2388 * 2389 * Skip VM_PROT_NONE entries like we did above. 2390 */ 2391 failed_entry = entry; 2392 for (/* nothing */; entry != &map->header; 2393 entry = entry->next) { 2394 if (entry->protection == VM_PROT_NONE) 2395 continue; 2396 entry->wired_count--; 2397 } 2398 2399 /* 2400 * now, unwire all the entries that were successfully 2401 * wired above. 2402 * 2403 * Skip VM_PROT_NONE entries like we did above. 2404 */ 2405 for (entry = map->header.next; entry != failed_entry; 2406 entry = entry->next) { 2407 if (entry->protection == VM_PROT_NONE) 2408 continue; 2409 entry->wired_count--; 2410 if (VM_MAPENT_ISWIRED(entry)) 2411 uvm_map_entry_unwire(map, entry); 2412 } 2413 vm_map_unlock(map); 2414 UVMHIST_LOG(maphist,"<- done (RV=%d)", rv,0,0,0); 2415 return (rv); 2416 } 2417 2418 /* We are holding a read lock here. */ 2419 vm_map_unbusy(map); 2420 vm_map_unlock_read(map); 2421 2422 UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0); 2423 return (KERN_SUCCESS); 2424 } 2425 2426 /* 2427 * uvm_map_clean: clean out a map range 2428 * 2429 * => valid flags: 2430 * if (flags & PGO_CLEANIT): dirty pages are cleaned first 2431 * if (flags & PGO_SYNCIO): dirty pages are written synchronously 2432 * if (flags & PGO_DEACTIVATE): any cached pages are deactivated after clean 2433 * if (flags & PGO_FREE): any cached pages are freed after clean 2434 * => returns an error if any part of the specified range isn't mapped 2435 * => never a need to flush amap layer since the anonymous memory has 2436 * no permanent home, but may deactivate pages there 2437 * => called from sys_msync() and sys_madvise() 2438 * => caller must not write-lock map (read OK). 2439 * => we may sleep while cleaning if SYNCIO [with map read-locked] 2440 */ 2441 2442 int amap_clean_works = 1; /* XXX for now, just in case... */ 2443 2444 int 2445 uvm_map_clean(map, start, end, flags) 2446 vm_map_t map; 2447 vaddr_t start, end; 2448 int flags; 2449 { 2450 vm_map_entry_t current, entry; 2451 struct uvm_object *uobj; 2452 struct vm_amap *amap; 2453 struct vm_anon *anon; 2454 struct vm_page *pg; 2455 vaddr_t offset; 2456 vsize_t size; 2457 int rv, error, refs; 2458 UVMHIST_FUNC("uvm_map_clean"); UVMHIST_CALLED(maphist); 2459 2460 UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,flags=0x%x)", 2461 map, start, end, flags); 2462 KASSERT((flags & (PGO_FREE|PGO_DEACTIVATE)) != 2463 (PGO_FREE|PGO_DEACTIVATE)); 2464 2465 vm_map_lock_read(map); 2466 VM_MAP_RANGE_CHECK(map, start, end); 2467 if (uvm_map_lookup_entry(map, start, &entry) == FALSE) { 2468 vm_map_unlock_read(map); 2469 return(KERN_INVALID_ADDRESS); 2470 } 2471 2472 /* 2473 * Make a first pass to check for holes. 2474 */ 2475 2476 for (current = entry; current->start < end; current = current->next) { 2477 if (UVM_ET_ISSUBMAP(current)) { 2478 vm_map_unlock_read(map); 2479 return (KERN_INVALID_ARGUMENT); 2480 } 2481 if (end > current->end && (current->next == &map->header || 2482 current->end != current->next->start)) { 2483 vm_map_unlock_read(map); 2484 return (KERN_INVALID_ADDRESS); 2485 } 2486 } 2487 2488 error = KERN_SUCCESS; 2489 2490 for (current = entry; current->start < end; current = current->next) { 2491 amap = current->aref.ar_amap; /* top layer */ 2492 uobj = current->object.uvm_obj; /* bottom layer */ 2493 KASSERT(start >= current->start); 2494 2495 /* 2496 * No amap cleaning necessary if: 2497 * 2498 * (1) There's no amap. 2499 * 2500 * (2) We're not deactivating or freeing pages. 2501 */ 2502 2503 if (amap == NULL || (flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) 2504 goto flush_object; 2505 2506 /* XXX for now, just in case... */ 2507 if (amap_clean_works == 0) 2508 goto flush_object; 2509 2510 amap_lock(amap); 2511 offset = start - current->start; 2512 size = MIN(end, current->end) - start; 2513 for ( ; size != 0; size -= PAGE_SIZE, offset += PAGE_SIZE) { 2514 anon = amap_lookup(¤t->aref, offset); 2515 if (anon == NULL) 2516 continue; 2517 2518 simple_lock(&anon->an_lock); 2519 2520 pg = anon->u.an_page; 2521 if (pg == NULL) { 2522 simple_unlock(&anon->an_lock); 2523 continue; 2524 } 2525 2526 switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) { 2527 2528 /* 2529 * XXX In these first 3 cases, we always just 2530 * XXX deactivate the page. We may want to 2531 * XXX handle the different cases more 2532 * XXX specifically, in the future. 2533 */ 2534 2535 case PGO_CLEANIT|PGO_FREE: 2536 case PGO_CLEANIT|PGO_DEACTIVATE: 2537 case PGO_DEACTIVATE: 2538 deactivate_it: 2539 /* skip the page if it's loaned or wired */ 2540 if (pg->loan_count != 0 || 2541 pg->wire_count != 0) { 2542 simple_unlock(&anon->an_lock); 2543 continue; 2544 } 2545 2546 uvm_lock_pageq(); 2547 2548 /* 2549 * skip the page if it's not actually owned 2550 * by the anon (may simply be loaned to the 2551 * anon). 2552 */ 2553 2554 if ((pg->pqflags & PQ_ANON) == 0) { 2555 KASSERT(pg->uobject == NULL); 2556 uvm_unlock_pageq(); 2557 simple_unlock(&anon->an_lock); 2558 continue; 2559 } 2560 KASSERT(pg->uanon == anon); 2561 2562 #ifdef UBC 2563 /* ...and deactivate the page. */ 2564 pmap_clear_reference(pg); 2565 #else 2566 /* zap all mappings for the page. */ 2567 pmap_page_protect(pg, VM_PROT_NONE); 2568 2569 /* ...and deactivate the page. */ 2570 #endif 2571 uvm_pagedeactivate(pg); 2572 2573 uvm_unlock_pageq(); 2574 simple_unlock(&anon->an_lock); 2575 continue; 2576 2577 case PGO_FREE: 2578 2579 /* 2580 * If there are multiple references to 2581 * the amap, just deactivate the page. 2582 */ 2583 2584 if (amap_refs(amap) > 1) 2585 goto deactivate_it; 2586 2587 /* XXX skip the page if it's wired */ 2588 if (pg->wire_count != 0) { 2589 simple_unlock(&anon->an_lock); 2590 continue; 2591 } 2592 amap_unadd(¤t->aref, offset); 2593 refs = --anon->an_ref; 2594 simple_unlock(&anon->an_lock); 2595 if (refs == 0) 2596 uvm_anfree(anon); 2597 continue; 2598 2599 default: 2600 panic("uvm_map_clean: wierd flags"); 2601 } 2602 } 2603 amap_unlock(amap); 2604 2605 flush_object: 2606 /* 2607 * flush pages if we've got a valid backing object. 2608 */ 2609 2610 offset = current->offset + (start - current->start); 2611 size = MIN(end, current->end) - start; 2612 if (uobj != NULL) { 2613 simple_lock(&uobj->vmobjlock); 2614 rv = uobj->pgops->pgo_flush(uobj, offset, 2615 offset + size, flags); 2616 simple_unlock(&uobj->vmobjlock); 2617 2618 if (rv == FALSE) 2619 error = KERN_FAILURE; 2620 } 2621 start += size; 2622 } 2623 vm_map_unlock_read(map); 2624 return (error); 2625 } 2626 2627 2628 /* 2629 * uvm_map_checkprot: check protection in map 2630 * 2631 * => must allow specified protection in a fully allocated region. 2632 * => map must be read or write locked by caller. 2633 */ 2634 2635 boolean_t 2636 uvm_map_checkprot(map, start, end, protection) 2637 vm_map_t map; 2638 vaddr_t start, end; 2639 vm_prot_t protection; 2640 { 2641 vm_map_entry_t entry; 2642 vm_map_entry_t tmp_entry; 2643 2644 if (!uvm_map_lookup_entry(map, start, &tmp_entry)) { 2645 return(FALSE); 2646 } 2647 entry = tmp_entry; 2648 while (start < end) { 2649 if (entry == &map->header) { 2650 return(FALSE); 2651 } 2652 2653 /* 2654 * no holes allowed 2655 */ 2656 2657 if (start < entry->start) { 2658 return(FALSE); 2659 } 2660 2661 /* 2662 * check protection associated with entry 2663 */ 2664 2665 if ((entry->protection & protection) != protection) { 2666 return(FALSE); 2667 } 2668 2669 /* go to next entry */ 2670 2671 start = entry->end; 2672 entry = entry->next; 2673 } 2674 return(TRUE); 2675 } 2676 2677 /* 2678 * uvmspace_alloc: allocate a vmspace structure. 2679 * 2680 * - structure includes vm_map and pmap 2681 * - XXX: no locking on this structure 2682 * - refcnt set to 1, rest must be init'd by caller 2683 */ 2684 struct vmspace * 2685 uvmspace_alloc(min, max, pageable) 2686 vaddr_t min, max; 2687 int pageable; 2688 { 2689 struct vmspace *vm; 2690 UVMHIST_FUNC("uvmspace_alloc"); UVMHIST_CALLED(maphist); 2691 2692 vm = pool_get(&uvm_vmspace_pool, PR_WAITOK); 2693 uvmspace_init(vm, NULL, min, max, pageable); 2694 UVMHIST_LOG(maphist,"<- done (vm=0x%x)", vm,0,0,0); 2695 return (vm); 2696 } 2697 2698 /* 2699 * uvmspace_init: initialize a vmspace structure. 2700 * 2701 * - XXX: no locking on this structure 2702 * - refcnt set to 1, rest must me init'd by caller 2703 */ 2704 void 2705 uvmspace_init(vm, pmap, min, max, pageable) 2706 struct vmspace *vm; 2707 struct pmap *pmap; 2708 vaddr_t min, max; 2709 boolean_t pageable; 2710 { 2711 UVMHIST_FUNC("uvmspace_init"); UVMHIST_CALLED(maphist); 2712 2713 memset(vm, 0, sizeof(*vm)); 2714 2715 uvm_map_setup(&vm->vm_map, min, max, pageable ? VM_MAP_PAGEABLE : 0); 2716 2717 if (pmap) 2718 pmap_reference(pmap); 2719 else 2720 pmap = pmap_create(); 2721 vm->vm_map.pmap = pmap; 2722 2723 vm->vm_refcnt = 1; 2724 UVMHIST_LOG(maphist,"<- done",0,0,0,0); 2725 } 2726 2727 /* 2728 * uvmspace_share: share a vmspace between two proceses 2729 * 2730 * - XXX: no locking on vmspace 2731 * - used for vfork, threads(?) 2732 */ 2733 2734 void 2735 uvmspace_share(p1, p2) 2736 struct proc *p1, *p2; 2737 { 2738 p2->p_vmspace = p1->p_vmspace; 2739 p1->p_vmspace->vm_refcnt++; 2740 } 2741 2742 /* 2743 * uvmspace_unshare: ensure that process "p" has its own, unshared, vmspace 2744 * 2745 * - XXX: no locking on vmspace 2746 */ 2747 2748 void 2749 uvmspace_unshare(p) 2750 struct proc *p; 2751 { 2752 struct vmspace *nvm, *ovm = p->p_vmspace; 2753 2754 if (ovm->vm_refcnt == 1) 2755 /* nothing to do: vmspace isn't shared in the first place */ 2756 return; 2757 2758 /* make a new vmspace, still holding old one */ 2759 nvm = uvmspace_fork(ovm); 2760 2761 pmap_deactivate(p); /* unbind old vmspace */ 2762 p->p_vmspace = nvm; 2763 pmap_activate(p); /* switch to new vmspace */ 2764 2765 uvmspace_free(ovm); /* drop reference to old vmspace */ 2766 } 2767 2768 /* 2769 * uvmspace_exec: the process wants to exec a new program 2770 * 2771 * - XXX: no locking on vmspace 2772 */ 2773 2774 void 2775 uvmspace_exec(p, start, end) 2776 struct proc *p; 2777 vaddr_t start, end; 2778 { 2779 struct vmspace *nvm, *ovm = p->p_vmspace; 2780 vm_map_t map = &ovm->vm_map; 2781 2782 #ifdef __sparc__ 2783 /* XXX cgd 960926: the sparc #ifdef should be a MD hook */ 2784 kill_user_windows(p); /* before stack addresses go away */ 2785 #endif 2786 2787 /* 2788 * see if more than one process is using this vmspace... 2789 */ 2790 2791 if (ovm->vm_refcnt == 1) { 2792 2793 /* 2794 * if p is the only process using its vmspace then we can safely 2795 * recycle that vmspace for the program that is being exec'd. 2796 */ 2797 2798 #ifdef SYSVSHM 2799 /* 2800 * SYSV SHM semantics require us to kill all segments on an exec 2801 */ 2802 if (ovm->vm_shm) 2803 shmexit(ovm); 2804 #endif 2805 2806 /* 2807 * POSIX 1003.1b -- "lock future mappings" is revoked 2808 * when a process execs another program image. 2809 */ 2810 vm_map_lock(map); 2811 vm_map_modflags(map, 0, VM_MAP_WIREFUTURE); 2812 vm_map_unlock(map); 2813 2814 /* 2815 * now unmap the old program 2816 */ 2817 uvm_unmap(map, map->min_offset, map->max_offset); 2818 2819 /* 2820 * resize the map 2821 */ 2822 vm_map_lock(map); 2823 map->min_offset = start; 2824 map->max_offset = end; 2825 vm_map_unlock(map); 2826 2827 2828 } else { 2829 2830 /* 2831 * p's vmspace is being shared, so we can't reuse it for p since 2832 * it is still being used for others. allocate a new vmspace 2833 * for p 2834 */ 2835 nvm = uvmspace_alloc(start, end, 2836 (map->flags & VM_MAP_PAGEABLE) ? TRUE : FALSE); 2837 2838 /* 2839 * install new vmspace and drop our ref to the old one. 2840 */ 2841 2842 pmap_deactivate(p); 2843 p->p_vmspace = nvm; 2844 pmap_activate(p); 2845 2846 uvmspace_free(ovm); 2847 } 2848 } 2849 2850 /* 2851 * uvmspace_free: free a vmspace data structure 2852 * 2853 * - XXX: no locking on vmspace 2854 */ 2855 2856 void 2857 uvmspace_free(vm) 2858 struct vmspace *vm; 2859 { 2860 vm_map_entry_t dead_entries; 2861 UVMHIST_FUNC("uvmspace_free"); UVMHIST_CALLED(maphist); 2862 2863 UVMHIST_LOG(maphist,"(vm=0x%x) ref=%d", vm, vm->vm_refcnt,0,0); 2864 if (--vm->vm_refcnt == 0) { 2865 /* 2866 * lock the map, to wait out all other references to it. delete 2867 * all of the mappings and pages they hold, then call the pmap 2868 * module to reclaim anything left. 2869 */ 2870 #ifdef SYSVSHM 2871 /* Get rid of any SYSV shared memory segments. */ 2872 if (vm->vm_shm != NULL) 2873 shmexit(vm); 2874 #endif 2875 vm_map_lock(&vm->vm_map); 2876 if (vm->vm_map.nentries) { 2877 (void)uvm_unmap_remove(&vm->vm_map, 2878 vm->vm_map.min_offset, vm->vm_map.max_offset, 2879 &dead_entries); 2880 if (dead_entries != NULL) 2881 uvm_unmap_detach(dead_entries, 0); 2882 } 2883 pmap_destroy(vm->vm_map.pmap); 2884 vm->vm_map.pmap = NULL; 2885 pool_put(&uvm_vmspace_pool, vm); 2886 } 2887 UVMHIST_LOG(maphist,"<- done", 0,0,0,0); 2888 } 2889 2890 /* 2891 * F O R K - m a i n e n t r y p o i n t 2892 */ 2893 /* 2894 * uvmspace_fork: fork a process' main map 2895 * 2896 * => create a new vmspace for child process from parent. 2897 * => parent's map must not be locked. 2898 */ 2899 2900 struct vmspace * 2901 uvmspace_fork(vm1) 2902 struct vmspace *vm1; 2903 { 2904 struct vmspace *vm2; 2905 vm_map_t old_map = &vm1->vm_map; 2906 vm_map_t new_map; 2907 vm_map_entry_t old_entry; 2908 vm_map_entry_t new_entry; 2909 pmap_t new_pmap; 2910 boolean_t protect_child; 2911 UVMHIST_FUNC("uvmspace_fork"); UVMHIST_CALLED(maphist); 2912 2913 vm_map_lock(old_map); 2914 2915 vm2 = uvmspace_alloc(old_map->min_offset, old_map->max_offset, 2916 (old_map->flags & VM_MAP_PAGEABLE) ? TRUE : FALSE); 2917 memcpy(&vm2->vm_startcopy, &vm1->vm_startcopy, 2918 (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy); 2919 new_map = &vm2->vm_map; /* XXX */ 2920 new_pmap = new_map->pmap; 2921 2922 old_entry = old_map->header.next; 2923 2924 /* 2925 * go entry-by-entry 2926 */ 2927 2928 while (old_entry != &old_map->header) { 2929 2930 /* 2931 * first, some sanity checks on the old entry 2932 */ 2933 if (UVM_ET_ISSUBMAP(old_entry)) 2934 panic("fork: encountered a submap during fork (illegal)"); 2935 2936 if (!UVM_ET_ISCOPYONWRITE(old_entry) && 2937 UVM_ET_ISNEEDSCOPY(old_entry)) 2938 panic("fork: non-copy_on_write map entry marked needs_copy (illegal)"); 2939 2940 2941 switch (old_entry->inheritance) { 2942 case MAP_INHERIT_NONE: 2943 /* 2944 * drop the mapping 2945 */ 2946 break; 2947 2948 case MAP_INHERIT_SHARE: 2949 /* 2950 * share the mapping: this means we want the old and 2951 * new entries to share amaps and backing objects. 2952 */ 2953 2954 /* 2955 * if the old_entry needs a new amap (due to prev fork) 2956 * then we need to allocate it now so that we have 2957 * something we own to share with the new_entry. [in 2958 * other words, we need to clear needs_copy] 2959 */ 2960 2961 if (UVM_ET_ISNEEDSCOPY(old_entry)) { 2962 /* get our own amap, clears needs_copy */ 2963 amap_copy(old_map, old_entry, M_WAITOK, FALSE, 2964 0, 0); 2965 /* XXXCDC: WAITOK??? */ 2966 } 2967 2968 new_entry = uvm_mapent_alloc(new_map); 2969 /* old_entry -> new_entry */ 2970 uvm_mapent_copy(old_entry, new_entry); 2971 2972 /* new pmap has nothing wired in it */ 2973 new_entry->wired_count = 0; 2974 2975 /* 2976 * gain reference to object backing the map (can't 2977 * be a submap, already checked this case). 2978 */ 2979 if (new_entry->aref.ar_amap) 2980 /* share reference */ 2981 uvm_map_reference_amap(new_entry, AMAP_SHARED); 2982 2983 if (new_entry->object.uvm_obj && 2984 new_entry->object.uvm_obj->pgops->pgo_reference) 2985 new_entry->object.uvm_obj-> 2986 pgops->pgo_reference( 2987 new_entry->object.uvm_obj); 2988 2989 /* insert entry at end of new_map's entry list */ 2990 uvm_map_entry_link(new_map, new_map->header.prev, 2991 new_entry); 2992 2993 /* 2994 * pmap_copy the mappings: this routine is optional 2995 * but if it is there it will reduce the number of 2996 * page faults in the new proc. 2997 */ 2998 2999 pmap_copy(new_pmap, old_map->pmap, new_entry->start, 3000 (old_entry->end - old_entry->start), 3001 old_entry->start); 3002 3003 break; 3004 3005 case MAP_INHERIT_COPY: 3006 3007 /* 3008 * copy-on-write the mapping (using mmap's 3009 * MAP_PRIVATE semantics) 3010 * 3011 * allocate new_entry, adjust reference counts. 3012 * (note that new references are read-only). 3013 */ 3014 3015 new_entry = uvm_mapent_alloc(new_map); 3016 /* old_entry -> new_entry */ 3017 uvm_mapent_copy(old_entry, new_entry); 3018 3019 if (new_entry->aref.ar_amap) 3020 uvm_map_reference_amap(new_entry, 0); 3021 3022 if (new_entry->object.uvm_obj && 3023 new_entry->object.uvm_obj->pgops->pgo_reference) 3024 new_entry->object.uvm_obj->pgops->pgo_reference 3025 (new_entry->object.uvm_obj); 3026 3027 /* new pmap has nothing wired in it */ 3028 new_entry->wired_count = 0; 3029 3030 new_entry->etype |= 3031 (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY); 3032 uvm_map_entry_link(new_map, new_map->header.prev, 3033 new_entry); 3034 3035 /* 3036 * the new entry will need an amap. it will either 3037 * need to be copied from the old entry or created 3038 * from scratch (if the old entry does not have an 3039 * amap). can we defer this process until later 3040 * (by setting "needs_copy") or do we need to copy 3041 * the amap now? 3042 * 3043 * we must copy the amap now if any of the following 3044 * conditions hold: 3045 * 1. the old entry has an amap and that amap is 3046 * being shared. this means that the old (parent) 3047 * process is sharing the amap with another 3048 * process. if we do not clear needs_copy here 3049 * we will end up in a situation where both the 3050 * parent and child process are refering to the 3051 * same amap with "needs_copy" set. if the 3052 * parent write-faults, the fault routine will 3053 * clear "needs_copy" in the parent by allocating 3054 * a new amap. this is wrong because the 3055 * parent is supposed to be sharing the old amap 3056 * and the new amap will break that. 3057 * 3058 * 2. if the old entry has an amap and a non-zero 3059 * wire count then we are going to have to call 3060 * amap_cow_now to avoid page faults in the 3061 * parent process. since amap_cow_now requires 3062 * "needs_copy" to be clear we might as well 3063 * clear it here as well. 3064 * 3065 */ 3066 3067 if (old_entry->aref.ar_amap != NULL) { 3068 3069 if ((amap_flags(old_entry->aref.ar_amap) & 3070 AMAP_SHARED) != 0 || 3071 VM_MAPENT_ISWIRED(old_entry)) { 3072 3073 amap_copy(new_map, new_entry, M_WAITOK, FALSE, 3074 0, 0); 3075 /* XXXCDC: M_WAITOK ... ok? */ 3076 } 3077 } 3078 3079 /* 3080 * if the parent's entry is wired down, then the 3081 * parent process does not want page faults on 3082 * access to that memory. this means that we 3083 * cannot do copy-on-write because we can't write 3084 * protect the old entry. in this case we 3085 * resolve all copy-on-write faults now, using 3086 * amap_cow_now. note that we have already 3087 * allocated any needed amap (above). 3088 */ 3089 3090 if (VM_MAPENT_ISWIRED(old_entry)) { 3091 3092 /* 3093 * resolve all copy-on-write faults now 3094 * (note that there is nothing to do if 3095 * the old mapping does not have an amap). 3096 * XXX: is it worthwhile to bother with pmap_copy 3097 * in this case? 3098 */ 3099 if (old_entry->aref.ar_amap) 3100 amap_cow_now(new_map, new_entry); 3101 3102 } else { 3103 3104 /* 3105 * setup mappings to trigger copy-on-write faults 3106 * we must write-protect the parent if it has 3107 * an amap and it is not already "needs_copy"... 3108 * if it is already "needs_copy" then the parent 3109 * has already been write-protected by a previous 3110 * fork operation. 3111 * 3112 * if we do not write-protect the parent, then 3113 * we must be sure to write-protect the child 3114 * after the pmap_copy() operation. 3115 * 3116 * XXX: pmap_copy should have some way of telling 3117 * us that it didn't do anything so we can avoid 3118 * calling pmap_protect needlessly. 3119 */ 3120 3121 if (old_entry->aref.ar_amap) { 3122 3123 if (!UVM_ET_ISNEEDSCOPY(old_entry)) { 3124 if (old_entry->max_protection & VM_PROT_WRITE) { 3125 pmap_protect(old_map->pmap, 3126 old_entry->start, 3127 old_entry->end, 3128 old_entry->protection & 3129 ~VM_PROT_WRITE); 3130 } 3131 old_entry->etype |= UVM_ET_NEEDSCOPY; 3132 } 3133 3134 /* 3135 * parent must now be write-protected 3136 */ 3137 protect_child = FALSE; 3138 } else { 3139 3140 /* 3141 * we only need to protect the child if the 3142 * parent has write access. 3143 */ 3144 if (old_entry->max_protection & VM_PROT_WRITE) 3145 protect_child = TRUE; 3146 else 3147 protect_child = FALSE; 3148 3149 } 3150 3151 /* 3152 * copy the mappings 3153 * XXX: need a way to tell if this does anything 3154 */ 3155 3156 pmap_copy(new_pmap, old_map->pmap, 3157 new_entry->start, 3158 (old_entry->end - old_entry->start), 3159 old_entry->start); 3160 3161 /* 3162 * protect the child's mappings if necessary 3163 */ 3164 if (protect_child) { 3165 pmap_protect(new_pmap, new_entry->start, 3166 new_entry->end, 3167 new_entry->protection & 3168 ~VM_PROT_WRITE); 3169 } 3170 3171 } 3172 break; 3173 } /* end of switch statement */ 3174 old_entry = old_entry->next; 3175 } 3176 3177 new_map->size = old_map->size; 3178 vm_map_unlock(old_map); 3179 3180 #ifdef SYSVSHM 3181 if (vm1->vm_shm) 3182 shmfork(vm1, vm2); 3183 #endif 3184 3185 #ifdef PMAP_FORK 3186 pmap_fork(vm1->vm_map.pmap, vm2->vm_map.pmap); 3187 #endif 3188 3189 UVMHIST_LOG(maphist,"<- done",0,0,0,0); 3190 return(vm2); 3191 } 3192 3193 3194 #if defined(DDB) 3195 3196 /* 3197 * DDB hooks 3198 */ 3199 3200 /* 3201 * uvm_map_printit: actually prints the map 3202 */ 3203 3204 void 3205 uvm_map_printit(map, full, pr) 3206 vm_map_t map; 3207 boolean_t full; 3208 int (*pr) __P((const char *, ...)); 3209 { 3210 vm_map_entry_t entry; 3211 3212 (*pr)("MAP %p: [0x%lx->0x%lx]\n", map, map->min_offset,map->max_offset); 3213 (*pr)("\t#ent=%d, sz=%d, ref=%d, version=%d, flags=0x%x\n", 3214 map->nentries, map->size, map->ref_count, map->timestamp, 3215 map->flags); 3216 #ifdef pmap_resident_count 3217 (*pr)("\tpmap=%p(resident=%d)\n", map->pmap, 3218 pmap_resident_count(map->pmap)); 3219 #else 3220 /* XXXCDC: this should be required ... */ 3221 (*pr)("\tpmap=%p(resident=<<NOT SUPPORTED!!!>>)\n", map->pmap); 3222 #endif 3223 if (!full) 3224 return; 3225 for (entry = map->header.next; entry != &map->header; 3226 entry = entry->next) { 3227 (*pr)(" - %p: 0x%lx->0x%lx: obj=%p/0x%llx, amap=%p/%d\n", 3228 entry, entry->start, entry->end, entry->object.uvm_obj, 3229 (long long)entry->offset, entry->aref.ar_amap, 3230 entry->aref.ar_pageoff); 3231 (*pr)( 3232 "\tsubmap=%c, cow=%c, nc=%c, prot(max)=%d/%d, inh=%d, " 3233 "wc=%d, adv=%d\n", 3234 (entry->etype & UVM_ET_SUBMAP) ? 'T' : 'F', 3235 (entry->etype & UVM_ET_COPYONWRITE) ? 'T' : 'F', 3236 (entry->etype & UVM_ET_NEEDSCOPY) ? 'T' : 'F', 3237 entry->protection, entry->max_protection, 3238 entry->inheritance, entry->wired_count, entry->advice); 3239 } 3240 } 3241 3242 /* 3243 * uvm_object_printit: actually prints the object 3244 */ 3245 3246 void 3247 uvm_object_printit(uobj, full, pr) 3248 struct uvm_object *uobj; 3249 boolean_t full; 3250 int (*pr) __P((const char *, ...)); 3251 { 3252 struct vm_page *pg; 3253 int cnt = 0; 3254 3255 (*pr)("OBJECT %p: locked=%d, pgops=%p, npages=%d, ", 3256 uobj, uobj->vmobjlock.lock_data, uobj->pgops, uobj->uo_npages); 3257 if (UVM_OBJ_IS_KERN_OBJECT(uobj)) 3258 (*pr)("refs=<SYSTEM>\n"); 3259 else 3260 (*pr)("refs=%d\n", uobj->uo_refs); 3261 3262 if (!full) { 3263 return; 3264 } 3265 (*pr)(" PAGES <pg,offset>:\n "); 3266 for (pg = TAILQ_FIRST(&uobj->memq); 3267 pg != NULL; 3268 pg = TAILQ_NEXT(pg, listq), cnt++) { 3269 (*pr)("<%p,0x%llx> ", pg, (long long)pg->offset); 3270 if ((cnt % 3) == 2) { 3271 (*pr)("\n "); 3272 } 3273 } 3274 if ((cnt % 3) != 2) { 3275 (*pr)("\n"); 3276 } 3277 } 3278 3279 /* 3280 * uvm_page_printit: actually print the page 3281 */ 3282 3283 static const char page_flagbits[] = 3284 "\20\1BUSY\2WANTED\3TABLED\4CLEAN\5CLEANCHK\6RELEASED\7FAKE\10RDONLY" 3285 "\11ZERO\15PAGER1"; 3286 static const char page_pqflagbits[] = 3287 "\20\1FREE\2INACTIVE\3ACTIVE\4LAUNDRY\5ANON\6AOBJ"; 3288 3289 void 3290 uvm_page_printit(pg, full, pr) 3291 struct vm_page *pg; 3292 boolean_t full; 3293 int (*pr) __P((const char *, ...)); 3294 { 3295 struct vm_page *tpg; 3296 struct uvm_object *uobj; 3297 struct pglist *pgl; 3298 char pgbuf[128]; 3299 char pqbuf[128]; 3300 3301 (*pr)("PAGE %p:\n", pg); 3302 snprintf(pgbuf, sizeof(pgbuf), "%b", pg->flags, page_flagbits); 3303 snprintf(pqbuf, sizeof(pqbuf), "%b", pg->pqflags, page_pqflagbits); 3304 (*pr)(" flags=%s, pqflags=%s, vers=%d, wire_count=%d, pa=0x%lx\n", 3305 pgbuf, pqbuf, pg->version, pg->wire_count, (long)pg->phys_addr); 3306 (*pr)(" uobject=%p, uanon=%p, offset=0x%llx loan_count=%d\n", 3307 pg->uobject, pg->uanon, (long long)pg->offset, pg->loan_count); 3308 #if defined(UVM_PAGE_TRKOWN) 3309 if (pg->flags & PG_BUSY) 3310 (*pr)(" owning process = %d, tag=%s\n", 3311 pg->owner, pg->owner_tag); 3312 else 3313 (*pr)(" page not busy, no owner\n"); 3314 #else 3315 (*pr)(" [page ownership tracking disabled]\n"); 3316 #endif 3317 3318 if (!full) 3319 return; 3320 3321 /* cross-verify object/anon */ 3322 if ((pg->pqflags & PQ_FREE) == 0) { 3323 if (pg->pqflags & PQ_ANON) { 3324 if (pg->uanon == NULL || pg->uanon->u.an_page != pg) 3325 (*pr)(" >>> ANON DOES NOT POINT HERE <<< (%p)\n", 3326 (pg->uanon) ? pg->uanon->u.an_page : NULL); 3327 else 3328 (*pr)(" anon backpointer is OK\n"); 3329 } else { 3330 uobj = pg->uobject; 3331 if (uobj) { 3332 (*pr)(" checking object list\n"); 3333 TAILQ_FOREACH(tpg, &uobj->memq, listq) { 3334 if (tpg == pg) { 3335 break; 3336 } 3337 } 3338 if (tpg) 3339 (*pr)(" page found on object list\n"); 3340 else 3341 (*pr)(" >>> PAGE NOT FOUND ON OBJECT LIST! <<<\n"); 3342 } 3343 } 3344 } 3345 3346 /* cross-verify page queue */ 3347 if (pg->pqflags & PQ_FREE) { 3348 int fl = uvm_page_lookup_freelist(pg); 3349 pgl = &uvm.page_free[fl].pgfl_queues[((pg)->flags & PG_ZERO) ? 3350 PGFL_ZEROS : PGFL_UNKNOWN]; 3351 } else if (pg->pqflags & PQ_INACTIVE) { 3352 pgl = (pg->pqflags & PQ_SWAPBACKED) ? 3353 &uvm.page_inactive_swp : &uvm.page_inactive_obj; 3354 } else if (pg->pqflags & PQ_ACTIVE) { 3355 pgl = &uvm.page_active; 3356 } else { 3357 pgl = NULL; 3358 } 3359 3360 if (pgl) { 3361 (*pr)(" checking pageq list\n"); 3362 TAILQ_FOREACH(tpg, pgl, pageq) { 3363 if (tpg == pg) { 3364 break; 3365 } 3366 } 3367 if (tpg) 3368 (*pr)(" page found on pageq list\n"); 3369 else 3370 (*pr)(" >>> PAGE NOT FOUND ON PAGEQ LIST! <<<\n"); 3371 } 3372 } 3373 #endif 3374