1 /* $NetBSD: uvm_mmap.c,v 1.145 2013/09/11 18:26:14 martin Exp $ */ 2 3 /* 4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * Copyright (c) 1991, 1993 The Regents of the University of California. 6 * Copyright (c) 1988 University of Utah. 7 * 8 * All rights reserved. 9 * 10 * This code is derived from software contributed to Berkeley by 11 * the Systems Programming Group of the University of Utah Computer 12 * Science Department. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 3. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$ 39 * @(#)vm_mmap.c 8.5 (Berkeley) 5/19/94 40 * from: Id: uvm_mmap.c,v 1.1.2.14 1998/01/05 21:04:26 chuck Exp 41 */ 42 43 /* 44 * uvm_mmap.c: system call interface into VM system, plus kernel vm_mmap 45 * function. 46 */ 47 48 #include <sys/cdefs.h> 49 __KERNEL_RCSID(0, "$NetBSD: uvm_mmap.c,v 1.145 2013/09/11 18:26:14 martin Exp $"); 50 51 #include "opt_compat_netbsd.h" 52 #include "opt_pax.h" 53 #include "veriexec.h" 54 55 #include <sys/param.h> 56 #include <sys/systm.h> 57 #include <sys/file.h> 58 #include <sys/filedesc.h> 59 #include <sys/resourcevar.h> 60 #include <sys/mman.h> 61 #include <sys/mount.h> 62 #include <sys/vnode.h> 63 #include <sys/conf.h> 64 #include <sys/stat.h> 65 66 #if NVERIEXEC > 0 67 #include <sys/verified_exec.h> 68 #endif /* NVERIEXEC > 0 */ 69 70 #if defined(PAX_ASLR) || defined(PAX_MPROTECT) 71 #include <sys/pax.h> 72 #endif /* PAX_ASLR || PAX_MPROTECT */ 73 74 #include <miscfs/specfs/specdev.h> 75 76 #include <sys/syscallargs.h> 77 78 #include <uvm/uvm.h> 79 #include <uvm/uvm_device.h> 80 81 #ifndef COMPAT_ZERODEV 82 #define COMPAT_ZERODEV(dev) (0) 83 #endif 84 85 static int 86 range_test(vaddr_t addr, vsize_t size, bool ismmap) 87 { 88 vaddr_t vm_min_address = VM_MIN_ADDRESS; 89 vaddr_t vm_max_address = VM_MAXUSER_ADDRESS; 90 vaddr_t eaddr = addr + size; 91 int res = 0; 92 93 if (addr < vm_min_address) 94 return EINVAL; 95 if (eaddr > vm_max_address) 96 return ismmap ? EFBIG : EINVAL; 97 if (addr > eaddr) /* no wrapping! */ 98 return ismmap ? EOVERFLOW : EINVAL; 99 100 #ifdef MD_MMAP_RANGE_TEST 101 res = MD_MMAP_RANGE_TEST(addr, eaddr); 102 #endif 103 104 return res; 105 } 106 107 /* 108 * unimplemented VM system calls: 109 */ 110 111 /* 112 * sys_sbrk: sbrk system call. 113 */ 114 115 /* ARGSUSED */ 116 int 117 sys_sbrk(struct lwp *l, const struct sys_sbrk_args *uap, register_t *retval) 118 { 119 /* { 120 syscallarg(intptr_t) incr; 121 } */ 122 123 return (ENOSYS); 124 } 125 126 /* 127 * sys_sstk: sstk system call. 128 */ 129 130 /* ARGSUSED */ 131 int 132 sys_sstk(struct lwp *l, const struct sys_sstk_args *uap, register_t *retval) 133 { 134 /* { 135 syscallarg(int) incr; 136 } */ 137 138 return (ENOSYS); 139 } 140 141 /* 142 * sys_mincore: determine if pages are in core or not. 143 */ 144 145 /* ARGSUSED */ 146 int 147 sys_mincore(struct lwp *l, const struct sys_mincore_args *uap, 148 register_t *retval) 149 { 150 /* { 151 syscallarg(void *) addr; 152 syscallarg(size_t) len; 153 syscallarg(char *) vec; 154 } */ 155 struct proc *p = l->l_proc; 156 struct vm_page *pg; 157 char *vec, pgi; 158 struct uvm_object *uobj; 159 struct vm_amap *amap; 160 struct vm_anon *anon; 161 struct vm_map_entry *entry; 162 vaddr_t start, end, lim; 163 struct vm_map *map; 164 vsize_t len; 165 int error = 0, npgs; 166 167 map = &p->p_vmspace->vm_map; 168 169 start = (vaddr_t)SCARG(uap, addr); 170 len = SCARG(uap, len); 171 vec = SCARG(uap, vec); 172 173 if (start & PAGE_MASK) 174 return (EINVAL); 175 len = round_page(len); 176 end = start + len; 177 if (end <= start) 178 return (EINVAL); 179 180 /* 181 * Lock down vec, so our returned status isn't outdated by 182 * storing the status byte for a page. 183 */ 184 185 npgs = len >> PAGE_SHIFT; 186 error = uvm_vslock(p->p_vmspace, vec, npgs, VM_PROT_WRITE); 187 if (error) { 188 return error; 189 } 190 vm_map_lock_read(map); 191 192 if (uvm_map_lookup_entry(map, start, &entry) == false) { 193 error = ENOMEM; 194 goto out; 195 } 196 197 for (/* nothing */; 198 entry != &map->header && entry->start < end; 199 entry = entry->next) { 200 KASSERT(!UVM_ET_ISSUBMAP(entry)); 201 KASSERT(start >= entry->start); 202 203 /* Make sure there are no holes. */ 204 if (entry->end < end && 205 (entry->next == &map->header || 206 entry->next->start > entry->end)) { 207 error = ENOMEM; 208 goto out; 209 } 210 211 lim = end < entry->end ? end : entry->end; 212 213 /* 214 * Special case for objects with no "real" pages. Those 215 * are always considered resident (mapped devices). 216 */ 217 218 if (UVM_ET_ISOBJ(entry)) { 219 KASSERT(!UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)); 220 if (UVM_OBJ_IS_DEVICE(entry->object.uvm_obj)) { 221 for (/* nothing */; start < lim; 222 start += PAGE_SIZE, vec++) 223 subyte(vec, 1); 224 continue; 225 } 226 } 227 228 amap = entry->aref.ar_amap; /* upper layer */ 229 uobj = entry->object.uvm_obj; /* lower layer */ 230 231 if (amap != NULL) 232 amap_lock(amap); 233 if (uobj != NULL) 234 mutex_enter(uobj->vmobjlock); 235 236 for (/* nothing */; start < lim; start += PAGE_SIZE, vec++) { 237 pgi = 0; 238 if (amap != NULL) { 239 /* Check the upper layer first. */ 240 anon = amap_lookup(&entry->aref, 241 start - entry->start); 242 /* Don't need to lock anon here. */ 243 if (anon != NULL && anon->an_page != NULL) { 244 245 /* 246 * Anon has the page for this entry 247 * offset. 248 */ 249 250 pgi = 1; 251 } 252 } 253 if (uobj != NULL && pgi == 0) { 254 /* Check the lower layer. */ 255 pg = uvm_pagelookup(uobj, 256 entry->offset + (start - entry->start)); 257 if (pg != NULL) { 258 259 /* 260 * Object has the page for this entry 261 * offset. 262 */ 263 264 pgi = 1; 265 } 266 } 267 (void) subyte(vec, pgi); 268 } 269 if (uobj != NULL) 270 mutex_exit(uobj->vmobjlock); 271 if (amap != NULL) 272 amap_unlock(amap); 273 } 274 275 out: 276 vm_map_unlock_read(map); 277 uvm_vsunlock(p->p_vmspace, SCARG(uap, vec), npgs); 278 return (error); 279 } 280 281 /* 282 * sys_mmap: mmap system call. 283 * 284 * => file offset and address may not be page aligned 285 * - if MAP_FIXED, offset and address must have remainder mod PAGE_SIZE 286 * - if address isn't page aligned the mapping starts at trunc_page(addr) 287 * and the return value is adjusted up by the page offset. 288 */ 289 290 int 291 sys_mmap(struct lwp *l, const struct sys_mmap_args *uap, register_t *retval) 292 { 293 /* { 294 syscallarg(void *) addr; 295 syscallarg(size_t) len; 296 syscallarg(int) prot; 297 syscallarg(int) flags; 298 syscallarg(int) fd; 299 syscallarg(long) pad; 300 syscallarg(off_t) pos; 301 } */ 302 struct proc *p = l->l_proc; 303 vaddr_t addr; 304 struct vattr va; 305 off_t pos; 306 vsize_t size, pageoff; 307 vm_prot_t prot, maxprot; 308 int flags, fd; 309 vaddr_t defaddr; 310 struct file *fp = NULL; 311 struct vnode *vp; 312 void *handle; 313 int error; 314 #ifdef PAX_ASLR 315 vaddr_t orig_addr; 316 #endif /* PAX_ASLR */ 317 318 /* 319 * first, extract syscall args from the uap. 320 */ 321 322 addr = (vaddr_t)SCARG(uap, addr); 323 size = (vsize_t)SCARG(uap, len); 324 prot = SCARG(uap, prot) & VM_PROT_ALL; 325 flags = SCARG(uap, flags); 326 fd = SCARG(uap, fd); 327 pos = SCARG(uap, pos); 328 329 #ifdef PAX_ASLR 330 orig_addr = addr; 331 #endif /* PAX_ASLR */ 332 333 /* 334 * Fixup the old deprecated MAP_COPY into MAP_PRIVATE, and 335 * validate the flags. 336 */ 337 if (flags & MAP_COPY) 338 flags = (flags & ~MAP_COPY) | MAP_PRIVATE; 339 if ((flags & (MAP_SHARED|MAP_PRIVATE)) == (MAP_SHARED|MAP_PRIVATE)) 340 return (EINVAL); 341 342 /* 343 * align file position and save offset. adjust size. 344 */ 345 346 pageoff = (pos & PAGE_MASK); 347 pos -= pageoff; 348 size += pageoff; /* add offset */ 349 size = (vsize_t)round_page(size); /* round up */ 350 351 /* 352 * now check (MAP_FIXED) or get (!MAP_FIXED) the "addr" 353 */ 354 if (flags & MAP_FIXED) { 355 356 /* ensure address and file offset are aligned properly */ 357 addr -= pageoff; 358 if (addr & PAGE_MASK) 359 return (EINVAL); 360 361 error = range_test(addr, size, true); 362 if (error) 363 return error; 364 } else if (addr == 0 || !(flags & MAP_TRYFIXED)) { 365 366 /* 367 * not fixed: make sure we skip over the largest 368 * possible heap for non-topdown mapping arrangements. 369 * we will refine our guess later (e.g. to account for 370 * VAC, etc) 371 */ 372 373 defaddr = p->p_emul->e_vm_default_addr(p, 374 (vaddr_t)p->p_vmspace->vm_daddr, size); 375 376 if (addr == 0 || 377 !(p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN)) 378 addr = MAX(addr, defaddr); 379 else 380 addr = MIN(addr, defaddr); 381 } 382 383 /* 384 * check for file mappings (i.e. not anonymous) and verify file. 385 */ 386 387 if ((flags & MAP_ANON) == 0) { 388 if ((fp = fd_getfile(fd)) == NULL) 389 return (EBADF); 390 if (fp->f_type != DTYPE_VNODE) { 391 fd_putfile(fd); 392 return (ENODEV); /* only mmap vnodes! */ 393 } 394 vp = fp->f_data; /* convert to vnode */ 395 if (vp->v_type != VREG && vp->v_type != VCHR && 396 vp->v_type != VBLK) { 397 fd_putfile(fd); 398 return (ENODEV); /* only REG/CHR/BLK support mmap */ 399 } 400 if (vp->v_type != VCHR && pos < 0) { 401 fd_putfile(fd); 402 return (EINVAL); 403 } 404 if (vp->v_type != VCHR && (off_t)(pos + size) < pos) { 405 fd_putfile(fd); 406 return (EOVERFLOW); /* no offset wrapping */ 407 } 408 409 /* special case: catch SunOS style /dev/zero */ 410 if (vp->v_type == VCHR 411 && (vp->v_rdev == zerodev || COMPAT_ZERODEV(vp->v_rdev))) { 412 flags |= MAP_ANON; 413 fd_putfile(fd); 414 fp = NULL; 415 goto is_anon; 416 } 417 418 /* 419 * Old programs may not select a specific sharing type, so 420 * default to an appropriate one. 421 * 422 * XXX: how does MAP_ANON fit in the picture? 423 */ 424 if ((flags & (MAP_SHARED|MAP_PRIVATE)) == 0) { 425 #if defined(DEBUG) 426 printf("WARNING: defaulted mmap() share type to " 427 "%s (pid %d command %s)\n", vp->v_type == VCHR ? 428 "MAP_SHARED" : "MAP_PRIVATE", p->p_pid, 429 p->p_comm); 430 #endif 431 if (vp->v_type == VCHR) 432 flags |= MAP_SHARED; /* for a device */ 433 else 434 flags |= MAP_PRIVATE; /* for a file */ 435 } 436 437 /* 438 * MAP_PRIVATE device mappings don't make sense (and aren't 439 * supported anyway). However, some programs rely on this, 440 * so just change it to MAP_SHARED. 441 */ 442 if (vp->v_type == VCHR && (flags & MAP_PRIVATE) != 0) { 443 flags = (flags & ~MAP_PRIVATE) | MAP_SHARED; 444 } 445 446 /* 447 * now check protection 448 */ 449 450 maxprot = VM_PROT_EXECUTE; 451 452 /* check read access */ 453 if (fp->f_flag & FREAD) 454 maxprot |= VM_PROT_READ; 455 else if (prot & PROT_READ) { 456 fd_putfile(fd); 457 return (EACCES); 458 } 459 460 /* check write access, shared case first */ 461 if (flags & MAP_SHARED) { 462 /* 463 * if the file is writable, only add PROT_WRITE to 464 * maxprot if the file is not immutable, append-only. 465 * otherwise, if we have asked for PROT_WRITE, return 466 * EPERM. 467 */ 468 if (fp->f_flag & FWRITE) { 469 vn_lock(vp, LK_SHARED | LK_RETRY); 470 error = VOP_GETATTR(vp, &va, l->l_cred); 471 VOP_UNLOCK(vp); 472 if (error) { 473 fd_putfile(fd); 474 return (error); 475 } 476 if ((va.va_flags & 477 (SF_SNAPSHOT|IMMUTABLE|APPEND)) == 0) 478 maxprot |= VM_PROT_WRITE; 479 else if (prot & PROT_WRITE) { 480 fd_putfile(fd); 481 return (EPERM); 482 } 483 } 484 else if (prot & PROT_WRITE) { 485 fd_putfile(fd); 486 return (EACCES); 487 } 488 } else { 489 /* MAP_PRIVATE mappings can always write to */ 490 maxprot |= VM_PROT_WRITE; 491 } 492 handle = vp; 493 494 } else { /* MAP_ANON case */ 495 /* 496 * XXX What do we do about (MAP_SHARED|MAP_PRIVATE) == 0? 497 */ 498 if (fd != -1) 499 return (EINVAL); 500 501 is_anon: /* label for SunOS style /dev/zero */ 502 handle = NULL; 503 maxprot = VM_PROT_ALL; 504 pos = 0; 505 } 506 507 #if NVERIEXEC > 0 508 if (handle != NULL) { 509 /* 510 * Check if the file can be executed indirectly. 511 * 512 * XXX: This gives false warnings about "Incorrect access type" 513 * XXX: if the mapping is not executable. Harmless, but will be 514 * XXX: fixed as part of other changes. 515 */ 516 if (veriexec_verify(l, handle, "(mmap)", VERIEXEC_INDIRECT, 517 NULL)) { 518 /* 519 * Don't allow executable mappings if we can't 520 * indirectly execute the file. 521 */ 522 if (prot & VM_PROT_EXECUTE) { 523 if (fp != NULL) 524 fd_putfile(fd); 525 return (EPERM); 526 } 527 528 /* 529 * Strip the executable bit from 'maxprot' to make sure 530 * it can't be made executable later. 531 */ 532 maxprot &= ~VM_PROT_EXECUTE; 533 } 534 } 535 #endif /* NVERIEXEC > 0 */ 536 537 #ifdef PAX_MPROTECT 538 pax_mprotect(l, &prot, &maxprot); 539 #endif /* PAX_MPROTECT */ 540 541 #ifdef PAX_ASLR 542 pax_aslr(l, &addr, orig_addr, flags); 543 #endif /* PAX_ASLR */ 544 545 /* 546 * now let kernel internal function uvm_mmap do the work. 547 */ 548 549 error = uvm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, maxprot, 550 flags, handle, pos, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur); 551 552 if (error == 0) 553 /* remember to add offset */ 554 *retval = (register_t)(addr + pageoff); 555 556 if (fp != NULL) 557 fd_putfile(fd); 558 559 return (error); 560 } 561 562 /* 563 * sys___msync13: the msync system call (a front-end for flush) 564 */ 565 566 int 567 sys___msync13(struct lwp *l, const struct sys___msync13_args *uap, 568 register_t *retval) 569 { 570 /* { 571 syscallarg(void *) addr; 572 syscallarg(size_t) len; 573 syscallarg(int) flags; 574 } */ 575 struct proc *p = l->l_proc; 576 vaddr_t addr; 577 vsize_t size, pageoff; 578 struct vm_map *map; 579 int error, rv, flags, uvmflags; 580 581 /* 582 * extract syscall args from the uap 583 */ 584 585 addr = (vaddr_t)SCARG(uap, addr); 586 size = (vsize_t)SCARG(uap, len); 587 flags = SCARG(uap, flags); 588 589 /* sanity check flags */ 590 if ((flags & ~(MS_ASYNC | MS_SYNC | MS_INVALIDATE)) != 0 || 591 (flags & (MS_ASYNC | MS_SYNC | MS_INVALIDATE)) == 0 || 592 (flags & (MS_ASYNC | MS_SYNC)) == (MS_ASYNC | MS_SYNC)) 593 return (EINVAL); 594 if ((flags & (MS_ASYNC | MS_SYNC)) == 0) 595 flags |= MS_SYNC; 596 597 /* 598 * align the address to a page boundary and adjust the size accordingly. 599 */ 600 601 pageoff = (addr & PAGE_MASK); 602 addr -= pageoff; 603 size += pageoff; 604 size = (vsize_t)round_page(size); 605 606 error = range_test(addr, size, false); 607 if (error) 608 return error; 609 610 /* 611 * get map 612 */ 613 614 map = &p->p_vmspace->vm_map; 615 616 /* 617 * XXXCDC: do we really need this semantic? 618 * 619 * XXX Gak! If size is zero we are supposed to sync "all modified 620 * pages with the region containing addr". Unfortunately, we 621 * don't really keep track of individual mmaps so we approximate 622 * by flushing the range of the map entry containing addr. 623 * This can be incorrect if the region splits or is coalesced 624 * with a neighbor. 625 */ 626 627 if (size == 0) { 628 struct vm_map_entry *entry; 629 630 vm_map_lock_read(map); 631 rv = uvm_map_lookup_entry(map, addr, &entry); 632 if (rv == true) { 633 addr = entry->start; 634 size = entry->end - entry->start; 635 } 636 vm_map_unlock_read(map); 637 if (rv == false) 638 return (EINVAL); 639 } 640 641 /* 642 * translate MS_ flags into PGO_ flags 643 */ 644 645 uvmflags = PGO_CLEANIT; 646 if (flags & MS_INVALIDATE) 647 uvmflags |= PGO_FREE; 648 if (flags & MS_SYNC) 649 uvmflags |= PGO_SYNCIO; 650 651 error = uvm_map_clean(map, addr, addr+size, uvmflags); 652 return error; 653 } 654 655 /* 656 * sys_munmap: unmap a users memory 657 */ 658 659 int 660 sys_munmap(struct lwp *l, const struct sys_munmap_args *uap, register_t *retval) 661 { 662 /* { 663 syscallarg(void *) addr; 664 syscallarg(size_t) len; 665 } */ 666 struct proc *p = l->l_proc; 667 vaddr_t addr; 668 vsize_t size, pageoff; 669 struct vm_map *map; 670 struct vm_map_entry *dead_entries; 671 int error; 672 673 /* 674 * get syscall args. 675 */ 676 677 addr = (vaddr_t)SCARG(uap, addr); 678 size = (vsize_t)SCARG(uap, len); 679 680 /* 681 * align the address to a page boundary and adjust the size accordingly. 682 */ 683 684 pageoff = (addr & PAGE_MASK); 685 addr -= pageoff; 686 size += pageoff; 687 size = (vsize_t)round_page(size); 688 689 if (size == 0) 690 return (0); 691 692 error = range_test(addr, size, false); 693 if (error) 694 return error; 695 696 map = &p->p_vmspace->vm_map; 697 698 /* 699 * interesting system call semantic: make sure entire range is 700 * allocated before allowing an unmap. 701 */ 702 703 vm_map_lock(map); 704 #if 0 705 if (!uvm_map_checkprot(map, addr, addr + size, VM_PROT_NONE)) { 706 vm_map_unlock(map); 707 return (EINVAL); 708 } 709 #endif 710 uvm_unmap_remove(map, addr, addr + size, &dead_entries, 0); 711 vm_map_unlock(map); 712 if (dead_entries != NULL) 713 uvm_unmap_detach(dead_entries, 0); 714 return (0); 715 } 716 717 /* 718 * sys_mprotect: the mprotect system call 719 */ 720 721 int 722 sys_mprotect(struct lwp *l, const struct sys_mprotect_args *uap, 723 register_t *retval) 724 { 725 /* { 726 syscallarg(void *) addr; 727 syscallarg(size_t) len; 728 syscallarg(int) prot; 729 } */ 730 struct proc *p = l->l_proc; 731 vaddr_t addr; 732 vsize_t size, pageoff; 733 vm_prot_t prot; 734 int error; 735 736 /* 737 * extract syscall args from uap 738 */ 739 740 addr = (vaddr_t)SCARG(uap, addr); 741 size = (vsize_t)SCARG(uap, len); 742 prot = SCARG(uap, prot) & VM_PROT_ALL; 743 744 /* 745 * align the address to a page boundary and adjust the size accordingly. 746 */ 747 748 pageoff = (addr & PAGE_MASK); 749 addr -= pageoff; 750 size += pageoff; 751 size = round_page(size); 752 753 error = range_test(addr, size, false); 754 if (error) 755 return error; 756 757 error = uvm_map_protect(&p->p_vmspace->vm_map, addr, addr + size, prot, 758 false); 759 return error; 760 } 761 762 /* 763 * sys_minherit: the minherit system call 764 */ 765 766 int 767 sys_minherit(struct lwp *l, const struct sys_minherit_args *uap, 768 register_t *retval) 769 { 770 /* { 771 syscallarg(void *) addr; 772 syscallarg(int) len; 773 syscallarg(int) inherit; 774 } */ 775 struct proc *p = l->l_proc; 776 vaddr_t addr; 777 vsize_t size, pageoff; 778 vm_inherit_t inherit; 779 int error; 780 781 addr = (vaddr_t)SCARG(uap, addr); 782 size = (vsize_t)SCARG(uap, len); 783 inherit = SCARG(uap, inherit); 784 785 /* 786 * align the address to a page boundary and adjust the size accordingly. 787 */ 788 789 pageoff = (addr & PAGE_MASK); 790 addr -= pageoff; 791 size += pageoff; 792 size = (vsize_t)round_page(size); 793 794 error = range_test(addr, size, false); 795 if (error) 796 return error; 797 798 error = uvm_map_inherit(&p->p_vmspace->vm_map, addr, addr + size, 799 inherit); 800 return error; 801 } 802 803 /* 804 * sys_madvise: give advice about memory usage. 805 */ 806 807 /* ARGSUSED */ 808 int 809 sys_madvise(struct lwp *l, const struct sys_madvise_args *uap, 810 register_t *retval) 811 { 812 /* { 813 syscallarg(void *) addr; 814 syscallarg(size_t) len; 815 syscallarg(int) behav; 816 } */ 817 struct proc *p = l->l_proc; 818 vaddr_t addr; 819 vsize_t size, pageoff; 820 int advice, error; 821 822 addr = (vaddr_t)SCARG(uap, addr); 823 size = (vsize_t)SCARG(uap, len); 824 advice = SCARG(uap, behav); 825 826 /* 827 * align the address to a page boundary, and adjust the size accordingly 828 */ 829 830 pageoff = (addr & PAGE_MASK); 831 addr -= pageoff; 832 size += pageoff; 833 size = (vsize_t)round_page(size); 834 835 error = range_test(addr, size, false); 836 if (error) 837 return error; 838 839 switch (advice) { 840 case MADV_NORMAL: 841 case MADV_RANDOM: 842 case MADV_SEQUENTIAL: 843 error = uvm_map_advice(&p->p_vmspace->vm_map, addr, addr + size, 844 advice); 845 break; 846 847 case MADV_WILLNEED: 848 849 /* 850 * Activate all these pages, pre-faulting them in if 851 * necessary. 852 */ 853 error = uvm_map_willneed(&p->p_vmspace->vm_map, 854 addr, addr + size); 855 break; 856 857 case MADV_DONTNEED: 858 859 /* 860 * Deactivate all these pages. We don't need them 861 * any more. We don't, however, toss the data in 862 * the pages. 863 */ 864 865 error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size, 866 PGO_DEACTIVATE); 867 break; 868 869 case MADV_FREE: 870 871 /* 872 * These pages contain no valid data, and may be 873 * garbage-collected. Toss all resources, including 874 * any swap space in use. 875 */ 876 877 error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size, 878 PGO_FREE); 879 break; 880 881 case MADV_SPACEAVAIL: 882 883 /* 884 * XXXMRG What is this? I think it's: 885 * 886 * Ensure that we have allocated backing-store 887 * for these pages. 888 * 889 * This is going to require changes to the page daemon, 890 * as it will free swap space allocated to pages in core. 891 * There's also what to do for device/file/anonymous memory. 892 */ 893 894 return (EINVAL); 895 896 default: 897 return (EINVAL); 898 } 899 900 return error; 901 } 902 903 /* 904 * sys_mlock: memory lock 905 */ 906 907 int 908 sys_mlock(struct lwp *l, const struct sys_mlock_args *uap, register_t *retval) 909 { 910 /* { 911 syscallarg(const void *) addr; 912 syscallarg(size_t) len; 913 } */ 914 struct proc *p = l->l_proc; 915 vaddr_t addr; 916 vsize_t size, pageoff; 917 int error; 918 919 /* 920 * extract syscall args from uap 921 */ 922 923 addr = (vaddr_t)SCARG(uap, addr); 924 size = (vsize_t)SCARG(uap, len); 925 926 /* 927 * align the address to a page boundary and adjust the size accordingly 928 */ 929 930 pageoff = (addr & PAGE_MASK); 931 addr -= pageoff; 932 size += pageoff; 933 size = (vsize_t)round_page(size); 934 935 error = range_test(addr, size, false); 936 if (error) 937 return error; 938 939 if (atop(size) + uvmexp.wired > uvmexp.wiredmax) 940 return (EAGAIN); 941 942 if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) > 943 p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur) 944 return (EAGAIN); 945 946 error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, false, 947 0); 948 if (error == EFAULT) 949 error = ENOMEM; 950 return error; 951 } 952 953 /* 954 * sys_munlock: unlock wired pages 955 */ 956 957 int 958 sys_munlock(struct lwp *l, const struct sys_munlock_args *uap, 959 register_t *retval) 960 { 961 /* { 962 syscallarg(const void *) addr; 963 syscallarg(size_t) len; 964 } */ 965 struct proc *p = l->l_proc; 966 vaddr_t addr; 967 vsize_t size, pageoff; 968 int error; 969 970 /* 971 * extract syscall args from uap 972 */ 973 974 addr = (vaddr_t)SCARG(uap, addr); 975 size = (vsize_t)SCARG(uap, len); 976 977 /* 978 * align the address to a page boundary, and adjust the size accordingly 979 */ 980 981 pageoff = (addr & PAGE_MASK); 982 addr -= pageoff; 983 size += pageoff; 984 size = (vsize_t)round_page(size); 985 986 error = range_test(addr, size, false); 987 if (error) 988 return error; 989 990 error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, true, 991 0); 992 if (error == EFAULT) 993 error = ENOMEM; 994 return error; 995 } 996 997 /* 998 * sys_mlockall: lock all pages mapped into an address space. 999 */ 1000 1001 int 1002 sys_mlockall(struct lwp *l, const struct sys_mlockall_args *uap, 1003 register_t *retval) 1004 { 1005 /* { 1006 syscallarg(int) flags; 1007 } */ 1008 struct proc *p = l->l_proc; 1009 int error, flags; 1010 1011 flags = SCARG(uap, flags); 1012 1013 if (flags == 0 || 1014 (flags & ~(MCL_CURRENT|MCL_FUTURE)) != 0) 1015 return (EINVAL); 1016 1017 error = uvm_map_pageable_all(&p->p_vmspace->vm_map, flags, 1018 p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur); 1019 return (error); 1020 } 1021 1022 /* 1023 * sys_munlockall: unlock all pages mapped into an address space. 1024 */ 1025 1026 int 1027 sys_munlockall(struct lwp *l, const void *v, register_t *retval) 1028 { 1029 struct proc *p = l->l_proc; 1030 1031 (void) uvm_map_pageable_all(&p->p_vmspace->vm_map, 0, 0); 1032 return (0); 1033 } 1034 1035 /* 1036 * uvm_mmap: internal version of mmap 1037 * 1038 * - used by sys_mmap and various framebuffers 1039 * - handle is a vnode pointer or NULL for MAP_ANON 1040 * - caller must page-align the file offset 1041 */ 1042 1043 int 1044 uvm_mmap(struct vm_map *map, vaddr_t *addr, vsize_t size, vm_prot_t prot, 1045 vm_prot_t maxprot, int flags, void *handle, voff_t foff, vsize_t locklimit) 1046 { 1047 struct uvm_object *uobj; 1048 struct vnode *vp; 1049 vaddr_t align = 0; 1050 int error; 1051 int advice = UVM_ADV_NORMAL; 1052 uvm_flag_t uvmflag = 0; 1053 bool needwritemap; 1054 1055 /* 1056 * check params 1057 */ 1058 1059 if (size == 0) 1060 return(0); 1061 if (foff & PAGE_MASK) 1062 return(EINVAL); 1063 if ((prot & maxprot) != prot) 1064 return(EINVAL); 1065 1066 /* 1067 * for non-fixed mappings, round off the suggested address. 1068 * for fixed mappings, check alignment and zap old mappings. 1069 */ 1070 1071 if ((flags & MAP_FIXED) == 0) { 1072 *addr = round_page(*addr); 1073 } else { 1074 if (*addr & PAGE_MASK) 1075 return(EINVAL); 1076 uvmflag |= UVM_FLAG_FIXED; 1077 (void) uvm_unmap(map, *addr, *addr + size); 1078 } 1079 1080 /* 1081 * Try to see if any requested alignment can even be attemped. 1082 * Make sure we can express the alignment (asking for a >= 4GB 1083 * alignment on an ILP32 architecure make no sense) and the 1084 * alignment is at least for a page sized quanitiy. If the 1085 * request was for a fixed mapping, make sure supplied address 1086 * adheres to the request alignment. 1087 */ 1088 align = (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT; 1089 if (align) { 1090 if (align >= sizeof(vaddr_t) * NBBY) 1091 return(EINVAL); 1092 align = 1L << align; 1093 if (align < PAGE_SIZE) 1094 return(EINVAL); 1095 if (align >= vm_map_max(map)) 1096 return(ENOMEM); 1097 if (flags & MAP_FIXED) { 1098 if ((*addr & (align-1)) != 0) 1099 return(EINVAL); 1100 align = 0; 1101 } 1102 } 1103 1104 /* 1105 * check resource limits 1106 */ 1107 1108 if (!VM_MAP_IS_KERNEL(map) && 1109 (((rlim_t)curproc->p_vmspace->vm_map.size + (rlim_t)size) > 1110 curproc->p_rlimit[RLIMIT_AS].rlim_cur)) 1111 return ENOMEM; 1112 1113 /* 1114 * handle anon vs. non-anon mappings. for non-anon mappings attach 1115 * to underlying vm object. 1116 */ 1117 1118 if (flags & MAP_ANON) { 1119 KASSERT(handle == NULL); 1120 foff = UVM_UNKNOWN_OFFSET; 1121 uobj = NULL; 1122 if ((flags & MAP_SHARED) == 0) 1123 /* XXX: defer amap create */ 1124 uvmflag |= UVM_FLAG_COPYONW; 1125 else 1126 /* shared: create amap now */ 1127 uvmflag |= UVM_FLAG_OVERLAY; 1128 1129 } else { 1130 KASSERT(handle != NULL); 1131 vp = (struct vnode *)handle; 1132 1133 /* 1134 * Don't allow mmap for EXEC if the file system 1135 * is mounted NOEXEC. 1136 */ 1137 if ((prot & PROT_EXEC) != 0 && 1138 (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0) 1139 return (EACCES); 1140 1141 if (vp->v_type != VCHR) { 1142 error = VOP_MMAP(vp, prot, curlwp->l_cred); 1143 if (error) { 1144 return error; 1145 } 1146 vref(vp); 1147 uobj = &vp->v_uobj; 1148 1149 /* 1150 * If the vnode is being mapped with PROT_EXEC, 1151 * then mark it as text. 1152 */ 1153 if (prot & PROT_EXEC) { 1154 vn_markexec(vp); 1155 } 1156 } else { 1157 int i = maxprot; 1158 1159 /* 1160 * XXX Some devices don't like to be mapped with 1161 * XXX PROT_EXEC or PROT_WRITE, but we don't really 1162 * XXX have a better way of handling this, right now 1163 */ 1164 do { 1165 uobj = udv_attach((void *) &vp->v_rdev, 1166 (flags & MAP_SHARED) ? i : 1167 (i & ~VM_PROT_WRITE), foff, size); 1168 i--; 1169 } while ((uobj == NULL) && (i > 0)); 1170 if (uobj == NULL) 1171 return EINVAL; 1172 advice = UVM_ADV_RANDOM; 1173 } 1174 if ((flags & MAP_SHARED) == 0) { 1175 uvmflag |= UVM_FLAG_COPYONW; 1176 } 1177 1178 /* 1179 * Set vnode flags to indicate the new kinds of mapping. 1180 * We take the vnode lock in exclusive mode here to serialize 1181 * with direct I/O. 1182 * 1183 * Safe to check for these flag values without a lock, as 1184 * long as a reference to the vnode is held. 1185 */ 1186 needwritemap = (vp->v_iflag & VI_WRMAP) == 0 && 1187 (flags & MAP_SHARED) != 0 && 1188 (maxprot & VM_PROT_WRITE) != 0; 1189 if ((vp->v_vflag & VV_MAPPED) == 0 || needwritemap) { 1190 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1191 vp->v_vflag |= VV_MAPPED; 1192 if (needwritemap) { 1193 mutex_enter(vp->v_interlock); 1194 vp->v_iflag |= VI_WRMAP; 1195 mutex_exit(vp->v_interlock); 1196 } 1197 VOP_UNLOCK(vp); 1198 } 1199 } 1200 1201 uvmflag = UVM_MAPFLAG(prot, maxprot, 1202 (flags & MAP_SHARED) ? UVM_INH_SHARE : UVM_INH_COPY, 1203 advice, uvmflag); 1204 error = uvm_map(map, addr, size, uobj, foff, align, uvmflag); 1205 if (error) { 1206 if (uobj) 1207 uobj->pgops->pgo_detach(uobj); 1208 return error; 1209 } 1210 1211 /* 1212 * POSIX 1003.1b -- if our address space was configured 1213 * to lock all future mappings, wire the one we just made. 1214 * 1215 * Also handle the MAP_WIRED flag here. 1216 */ 1217 1218 if (prot == VM_PROT_NONE) { 1219 1220 /* 1221 * No more work to do in this case. 1222 */ 1223 1224 return (0); 1225 } 1226 if ((flags & MAP_WIRED) != 0 || (map->flags & VM_MAP_WIREFUTURE) != 0) { 1227 vm_map_lock(map); 1228 if (atop(size) + uvmexp.wired > uvmexp.wiredmax || 1229 (locklimit != 0 && 1230 size + ptoa(pmap_wired_count(vm_map_pmap(map))) > 1231 locklimit)) { 1232 vm_map_unlock(map); 1233 uvm_unmap(map, *addr, *addr + size); 1234 return ENOMEM; 1235 } 1236 1237 /* 1238 * uvm_map_pageable() always returns the map unlocked. 1239 */ 1240 1241 error = uvm_map_pageable(map, *addr, *addr + size, 1242 false, UVM_LK_ENTER); 1243 if (error) { 1244 uvm_unmap(map, *addr, *addr + size); 1245 return error; 1246 } 1247 return (0); 1248 } 1249 return 0; 1250 } 1251 1252 vaddr_t 1253 uvm_default_mapaddr(struct proc *p, vaddr_t base, vsize_t sz) 1254 { 1255 1256 return VM_DEFAULT_ADDRESS(base, sz); 1257 } 1258