1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 1988 University of Utah. 5 * Copyright (c) 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the Systems Programming Group of the University of Utah Computer 10 * Science Department. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$ 37 * 38 * @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94 39 * $FreeBSD: src/sys/vm/vm_mmap.c,v 1.108.2.6 2002/07/02 20:06:19 dillon Exp $ 40 */ 41 42 /* 43 * Mapped file (mmap) interface to VM 44 */ 45 46 #include <sys/param.h> 47 #include <sys/kernel.h> 48 #include <sys/systm.h> 49 #include <sys/sysproto.h> 50 #include <sys/filedesc.h> 51 #include <sys/kern_syscall.h> 52 #include <sys/proc.h> 53 #include <sys/priv.h> 54 #include <sys/resource.h> 55 #include <sys/resourcevar.h> 56 #include <sys/vnode.h> 57 #include <sys/fcntl.h> 58 #include <sys/file.h> 59 #include <sys/mman.h> 60 #include <sys/conf.h> 61 #include <sys/stat.h> 62 #include <sys/vmmeter.h> 63 #include <sys/sysctl.h> 64 65 #include <vm/vm.h> 66 #include <vm/vm_param.h> 67 #include <sys/lock.h> 68 #include <vm/pmap.h> 69 #include <vm/vm_map.h> 70 #include <vm/vm_object.h> 71 #include <vm/vm_page.h> 72 #include <vm/vm_pager.h> 73 #include <vm/vm_pageout.h> 74 #include <vm/vm_extern.h> 75 #include <vm/vm_kern.h> 76 77 #include <sys/file2.h> 78 #include <sys/thread.h> 79 #include <vm/vm_page2.h> 80 81 static int max_proc_mmap = 1000000; 82 SYSCTL_INT(_vm, OID_AUTO, max_proc_mmap, CTLFLAG_RW, &max_proc_mmap, 0, ""); 83 int vkernel_enable; 84 SYSCTL_INT(_vm, OID_AUTO, vkernel_enable, CTLFLAG_RW, &vkernel_enable, 0, ""); 85 86 /* 87 * sstk_args(int incr) 88 * 89 * MPSAFE 90 */ 91 int 92 sys_sstk(struct sstk_args *uap) 93 { 94 /* Not yet implemented */ 95 return (EOPNOTSUPP); 96 } 97 98 /* 99 * mmap_args(void *addr, size_t len, int prot, int flags, int fd, 100 * long pad, off_t pos) 101 * 102 * Memory Map (mmap) system call. Note that the file offset 103 * and address are allowed to be NOT page aligned, though if 104 * the MAP_FIXED flag it set, both must have the same remainder 105 * modulo the PAGE_SIZE (POSIX 1003.1b). If the address is not 106 * page-aligned, the actual mapping starts at trunc_page(addr) 107 * and the return value is adjusted up by the page offset. 108 * 109 * Generally speaking, only character devices which are themselves 110 * memory-based, such as a video framebuffer, can be mmap'd. Otherwise 111 * there would be no cache coherency between a descriptor and a VM mapping 112 * both to the same character device. 113 * 114 * Block devices can be mmap'd no matter what they represent. Cache coherency 115 * is maintained as long as you do not write directly to the underlying 116 * character device. 117 * 118 * No requirements 119 */ 120 int 121 kern_mmap(struct vmspace *vms, caddr_t uaddr, size_t ulen, 122 int uprot, int uflags, int fd, off_t upos, void **res) 123 { 124 struct thread *td = curthread; 125 struct proc *p = td->td_proc; 126 struct file *fp = NULL; 127 struct vnode *vp; 128 vm_offset_t addr; 129 vm_offset_t tmpaddr; 130 vm_size_t size, pageoff; 131 vm_prot_t prot, maxprot; 132 void *handle; 133 int flags, error; 134 off_t pos; 135 vm_object_t obj; 136 137 KKASSERT(p); 138 139 addr = (vm_offset_t) uaddr; 140 size = ulen; 141 prot = uprot & VM_PROT_ALL; 142 flags = uflags; 143 pos = upos; 144 145 /* 146 * Make sure mapping fits into numeric range etc. 147 * 148 * NOTE: We support the full unsigned range for size now. 149 */ 150 if (((flags & MAP_ANON) && (fd != -1 || pos != 0))) 151 return (EINVAL); 152 153 if (size == 0) 154 return (EINVAL); 155 156 if (flags & MAP_STACK) { 157 if (fd != -1) 158 return (EINVAL); 159 if ((prot & (PROT_READ|PROT_WRITE)) != (PROT_READ|PROT_WRITE)) 160 return (EINVAL); 161 flags |= MAP_ANON; 162 pos = 0; 163 } 164 165 /* 166 * Virtual page tables cannot be used with MAP_STACK. Apart from 167 * it not making any sense, the aux union is used by both 168 * types. 169 * 170 * Because the virtual page table is stored in the backing object 171 * and might be updated by the kernel, the mapping must be R+W. 172 */ 173 if (flags & MAP_VPAGETABLE) { 174 if (vkernel_enable == 0) 175 return (EOPNOTSUPP); 176 if (flags & MAP_STACK) 177 return (EINVAL); 178 if ((prot & (PROT_READ|PROT_WRITE)) != (PROT_READ|PROT_WRITE)) 179 return (EINVAL); 180 } 181 182 /* 183 * Align the file position to a page boundary, 184 * and save its page offset component. 185 */ 186 pageoff = (pos & PAGE_MASK); 187 pos -= pageoff; 188 189 /* Adjust size for rounding (on both ends). */ 190 size += pageoff; /* low end... */ 191 size = (vm_size_t) round_page(size); /* hi end */ 192 if (size < ulen) /* wrap */ 193 return(EINVAL); 194 195 /* 196 * Check for illegal addresses. Watch out for address wrap... Note 197 * that VM_*_ADDRESS are not constants due to casts (argh). 198 */ 199 if (flags & (MAP_FIXED | MAP_TRYFIXED)) { 200 /* 201 * The specified address must have the same remainder 202 * as the file offset taken modulo PAGE_SIZE, so it 203 * should be aligned after adjustment by pageoff. 204 */ 205 addr -= pageoff; 206 if (addr & PAGE_MASK) 207 return (EINVAL); 208 209 /* 210 * Address range must be all in user VM space and not wrap. 211 */ 212 tmpaddr = addr + size; 213 if (tmpaddr < addr) 214 return (EINVAL); 215 if (VM_MAX_USER_ADDRESS > 0 && tmpaddr > VM_MAX_USER_ADDRESS) 216 return (EINVAL); 217 if (VM_MIN_USER_ADDRESS > 0 && addr < VM_MIN_USER_ADDRESS) 218 return (EINVAL); 219 } else { 220 /* 221 * Get a hint of where to map. It also provides mmap offset 222 * randomization if enabled. 223 */ 224 addr = vm_map_hint(p, addr, prot); 225 } 226 227 if (flags & MAP_ANON) { 228 /* 229 * Mapping blank space is trivial. 230 */ 231 handle = NULL; 232 maxprot = VM_PROT_ALL; 233 } else { 234 /* 235 * Mapping file, get fp for validation. Obtain vnode and make 236 * sure it is of appropriate type. 237 */ 238 fp = holdfp(td, fd, -1); 239 if (fp == NULL) 240 return (EBADF); 241 if (fp->f_type != DTYPE_VNODE) { 242 error = EINVAL; 243 goto done; 244 } 245 /* 246 * POSIX shared-memory objects are defined to have 247 * kernel persistence, and are not defined to support 248 * read(2)/write(2) -- or even open(2). Thus, we can 249 * use MAP_ASYNC to trade on-disk coherence for speed. 250 * The shm_open(3) library routine turns on the FPOSIXSHM 251 * flag to request this behavior. 252 */ 253 if (fp->f_flag & FPOSIXSHM) 254 flags |= MAP_NOSYNC; 255 vp = (struct vnode *) fp->f_data; 256 257 /* 258 * Validate the vnode for the operation. 259 */ 260 switch(vp->v_type) { 261 case VREG: 262 /* 263 * Get the proper underlying object 264 */ 265 if ((obj = vp->v_object) == NULL) { 266 error = EINVAL; 267 goto done; 268 } 269 KKASSERT((struct vnode *)obj->handle == vp); 270 break; 271 case VCHR: 272 /* 273 * Make sure a device has not been revoked. 274 * Mappability is handled by the device layer. 275 */ 276 if (vp->v_rdev == NULL) { 277 error = EBADF; 278 goto done; 279 } 280 break; 281 default: 282 /* 283 * Nothing else is mappable. 284 */ 285 error = EINVAL; 286 goto done; 287 } 288 289 /* 290 * XXX hack to handle use of /dev/zero to map anon memory (ala 291 * SunOS). 292 */ 293 if (vp->v_type == VCHR && iszerodev(vp->v_rdev)) { 294 handle = NULL; 295 maxprot = VM_PROT_ALL; 296 flags |= MAP_ANON; 297 pos = 0; 298 } else { 299 /* 300 * cdevs does not provide private mappings of any kind. 301 */ 302 if (vp->v_type == VCHR && 303 (flags & (MAP_PRIVATE|MAP_COPY))) { 304 error = EINVAL; 305 goto done; 306 } 307 /* 308 * Ensure that file and memory protections are 309 * compatible. Note that we only worry about 310 * writability if mapping is shared; in this case, 311 * current and max prot are dictated by the open file. 312 * XXX use the vnode instead? Problem is: what 313 * credentials do we use for determination? What if 314 * proc does a setuid? 315 */ 316 maxprot = VM_PROT_EXECUTE; 317 if (fp->f_flag & FREAD) { 318 maxprot |= VM_PROT_READ; 319 } else if (prot & PROT_READ) { 320 error = EACCES; 321 goto done; 322 } 323 /* 324 * If we are sharing potential changes (either via 325 * MAP_SHARED or via the implicit sharing of character 326 * device mappings), and we are trying to get write 327 * permission although we opened it without asking 328 * for it, bail out. Check for superuser, only if 329 * we're at securelevel < 1, to allow the XIG X server 330 * to continue to work. 331 * 332 * PROT_WRITE + MAP_SHARED 333 */ 334 if ((flags & MAP_SHARED) != 0 || vp->v_type == VCHR) { 335 if ((fp->f_flag & FWRITE) != 0) { 336 struct vattr va; 337 if ((error = VOP_GETATTR(vp, &va))) { 338 goto done; 339 } 340 if ((va.va_flags & 341 (IMMUTABLE|APPEND)) == 0) { 342 maxprot |= VM_PROT_WRITE; 343 344 /* 345 * SHARED+RW file mmap() 346 * updates v_lastwrite_ts. 347 */ 348 if ((prot & PROT_WRITE) && 349 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY) == 0) { 350 vfs_timestamp(&vp->v_lastwrite_ts); 351 vsetflags(vp, VLASTWRITETS); 352 vn_unlock(vp); 353 } 354 } else if (prot & PROT_WRITE) { 355 error = EPERM; 356 goto done; 357 } 358 } else if ((prot & PROT_WRITE) != 0) { 359 error = EACCES; 360 goto done; 361 } 362 } else { 363 maxprot |= VM_PROT_WRITE; 364 } 365 handle = (void *)vp; 366 } 367 } 368 369 lwkt_gettoken(&vms->vm_map.token); 370 371 /* 372 * Do not allow more then a certain number of vm_map_entry structures 373 * per process. 0 to disable. 374 */ 375 if (max_proc_mmap && vms->vm_map.nentries >= max_proc_mmap) { 376 error = ENOMEM; 377 lwkt_reltoken(&vms->vm_map.token); 378 goto done; 379 } 380 381 error = vm_mmap(&vms->vm_map, &addr, size, prot, maxprot, 382 flags, handle, pos); 383 if (error == 0) 384 *res = (void *)(addr + pageoff); 385 386 lwkt_reltoken(&vms->vm_map.token); 387 done: 388 if (fp) 389 dropfp(td, fd, fp); 390 391 return (error); 392 } 393 394 /* 395 * mmap system call handler 396 * 397 * No requirements. 398 */ 399 int 400 sys_mmap(struct mmap_args *uap) 401 { 402 int error; 403 404 error = kern_mmap(curproc->p_vmspace, uap->addr, uap->len, 405 uap->prot, uap->flags, 406 uap->fd, uap->pos, &uap->sysmsg_resultp); 407 408 return (error); 409 } 410 411 /* 412 * msync system call handler 413 * 414 * msync_args(void *addr, size_t len, int flags) 415 * 416 * No requirements 417 */ 418 int 419 sys_msync(struct msync_args *uap) 420 { 421 struct proc *p = curproc; 422 vm_offset_t addr; 423 vm_offset_t tmpaddr; 424 vm_size_t size, pageoff; 425 int flags; 426 vm_map_t map; 427 int rv; 428 429 addr = (vm_offset_t) uap->addr; 430 size = uap->len; 431 flags = uap->flags; 432 433 pageoff = (addr & PAGE_MASK); 434 addr -= pageoff; 435 size += pageoff; 436 size = (vm_size_t) round_page(size); 437 if (size < uap->len) /* wrap */ 438 return(EINVAL); 439 tmpaddr = addr + size; /* workaround gcc4 opt */ 440 if (tmpaddr < addr) /* wrap */ 441 return(EINVAL); 442 443 if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE)) 444 return (EINVAL); 445 446 map = &p->p_vmspace->vm_map; 447 448 /* 449 * map->token serializes extracting the address range for size == 0 450 * msyncs with the vm_map_clean call; if the token were not held 451 * across the two calls, an intervening munmap/mmap pair, for example, 452 * could cause msync to occur on a wrong region. 453 */ 454 lwkt_gettoken(&map->token); 455 456 /* 457 * XXX Gak! If size is zero we are supposed to sync "all modified 458 * pages with the region containing addr". Unfortunately, we don't 459 * really keep track of individual mmaps so we approximate by flushing 460 * the range of the map entry containing addr. This can be incorrect 461 * if the region splits or is coalesced with a neighbor. 462 */ 463 if (size == 0) { 464 vm_map_entry_t entry; 465 466 vm_map_lock_read(map); 467 rv = vm_map_lookup_entry(map, addr, &entry); 468 if (rv == FALSE) { 469 vm_map_unlock_read(map); 470 rv = KERN_INVALID_ADDRESS; 471 goto done; 472 } 473 addr = entry->start; 474 size = entry->end - entry->start; 475 vm_map_unlock_read(map); 476 } 477 478 /* 479 * Clean the pages and interpret the return value. 480 */ 481 rv = vm_map_clean(map, addr, addr + size, (flags & MS_ASYNC) == 0, 482 (flags & MS_INVALIDATE) != 0); 483 done: 484 lwkt_reltoken(&map->token); 485 486 switch (rv) { 487 case KERN_SUCCESS: 488 break; 489 case KERN_INVALID_ADDRESS: 490 return (EINVAL); /* Sun returns ENOMEM? */ 491 case KERN_FAILURE: 492 return (EIO); 493 default: 494 return (EINVAL); 495 } 496 497 return (0); 498 } 499 500 /* 501 * munmap system call handler 502 * 503 * munmap_args(void *addr, size_t len) 504 * 505 * No requirements 506 */ 507 int 508 sys_munmap(struct munmap_args *uap) 509 { 510 struct proc *p = curproc; 511 vm_offset_t addr; 512 vm_offset_t tmpaddr; 513 vm_size_t size, pageoff; 514 vm_map_t map; 515 516 addr = (vm_offset_t) uap->addr; 517 size = uap->len; 518 519 pageoff = (addr & PAGE_MASK); 520 addr -= pageoff; 521 size += pageoff; 522 size = (vm_size_t) round_page(size); 523 if (size < uap->len) /* wrap */ 524 return(EINVAL); 525 tmpaddr = addr + size; /* workaround gcc4 opt */ 526 if (tmpaddr < addr) /* wrap */ 527 return(EINVAL); 528 529 if (size == 0) 530 return (0); 531 532 /* 533 * Check for illegal addresses. Watch out for address wrap... Note 534 * that VM_*_ADDRESS are not constants due to casts (argh). 535 */ 536 if (VM_MAX_USER_ADDRESS > 0 && tmpaddr > VM_MAX_USER_ADDRESS) 537 return (EINVAL); 538 if (VM_MIN_USER_ADDRESS > 0 && addr < VM_MIN_USER_ADDRESS) 539 return (EINVAL); 540 541 map = &p->p_vmspace->vm_map; 542 543 /* map->token serializes between the map check and the actual unmap */ 544 lwkt_gettoken(&map->token); 545 546 /* 547 * Make sure entire range is allocated. 548 */ 549 if (!vm_map_check_protection(map, addr, addr + size, 550 VM_PROT_NONE, FALSE)) { 551 lwkt_reltoken(&map->token); 552 return (EINVAL); 553 } 554 /* returns nothing but KERN_SUCCESS anyway */ 555 vm_map_remove(map, addr, addr + size); 556 lwkt_reltoken(&map->token); 557 return (0); 558 } 559 560 /* 561 * mprotect_args(const void *addr, size_t len, int prot) 562 * 563 * No requirements. 564 */ 565 int 566 sys_mprotect(struct mprotect_args *uap) 567 { 568 struct proc *p = curproc; 569 vm_offset_t addr; 570 vm_offset_t tmpaddr; 571 vm_size_t size, pageoff; 572 vm_prot_t prot; 573 int error; 574 575 addr = (vm_offset_t) uap->addr; 576 size = uap->len; 577 prot = uap->prot & VM_PROT_ALL; 578 579 pageoff = (addr & PAGE_MASK); 580 addr -= pageoff; 581 size += pageoff; 582 size = (vm_size_t) round_page(size); 583 if (size < uap->len) /* wrap */ 584 return(EINVAL); 585 tmpaddr = addr + size; /* workaround gcc4 opt */ 586 if (tmpaddr < addr) /* wrap */ 587 return(EINVAL); 588 589 switch (vm_map_protect(&p->p_vmspace->vm_map, addr, addr + size, 590 prot, FALSE)) { 591 case KERN_SUCCESS: 592 error = 0; 593 break; 594 case KERN_PROTECTION_FAILURE: 595 error = EACCES; 596 break; 597 default: 598 error = EINVAL; 599 break; 600 } 601 return (error); 602 } 603 604 /* 605 * minherit system call handler 606 * 607 * minherit_args(void *addr, size_t len, int inherit) 608 * 609 * No requirements. 610 */ 611 int 612 sys_minherit(struct minherit_args *uap) 613 { 614 struct proc *p = curproc; 615 vm_offset_t addr; 616 vm_offset_t tmpaddr; 617 vm_size_t size, pageoff; 618 vm_inherit_t inherit; 619 int error; 620 621 addr = (vm_offset_t)uap->addr; 622 size = uap->len; 623 inherit = uap->inherit; 624 625 pageoff = (addr & PAGE_MASK); 626 addr -= pageoff; 627 size += pageoff; 628 size = (vm_size_t) round_page(size); 629 if (size < uap->len) /* wrap */ 630 return(EINVAL); 631 tmpaddr = addr + size; /* workaround gcc4 opt */ 632 if (tmpaddr < addr) /* wrap */ 633 return(EINVAL); 634 635 switch (vm_map_inherit(&p->p_vmspace->vm_map, addr, 636 addr + size, inherit)) { 637 case KERN_SUCCESS: 638 error = 0; 639 break; 640 case KERN_PROTECTION_FAILURE: 641 error = EACCES; 642 break; 643 default: 644 error = EINVAL; 645 break; 646 } 647 return (error); 648 } 649 650 /* 651 * madvise system call handler 652 * 653 * madvise_args(void *addr, size_t len, int behav) 654 * 655 * No requirements. 656 */ 657 int 658 sys_madvise(struct madvise_args *uap) 659 { 660 struct proc *p = curproc; 661 vm_offset_t start, end; 662 vm_offset_t tmpaddr = (vm_offset_t)uap->addr + uap->len; 663 int error; 664 665 /* 666 * Check for illegal behavior 667 */ 668 if (uap->behav < 0 || uap->behav >= MADV_CONTROL_END) 669 return (EINVAL); 670 /* 671 * Check for illegal addresses. Watch out for address wrap... Note 672 * that VM_*_ADDRESS are not constants due to casts (argh). 673 */ 674 if (tmpaddr < (vm_offset_t)uap->addr) 675 return (EINVAL); 676 if (VM_MAX_USER_ADDRESS > 0 && tmpaddr > VM_MAX_USER_ADDRESS) 677 return (EINVAL); 678 if (VM_MIN_USER_ADDRESS > 0 && uap->addr < VM_MIN_USER_ADDRESS) 679 return (EINVAL); 680 681 /* 682 * Since this routine is only advisory, we default to conservative 683 * behavior. 684 */ 685 start = trunc_page((vm_offset_t)uap->addr); 686 end = round_page(tmpaddr); 687 688 error = vm_map_madvise(&p->p_vmspace->vm_map, start, end, 689 uap->behav, 0); 690 return (error); 691 } 692 693 /* 694 * mcontrol system call handler 695 * 696 * mcontrol_args(void *addr, size_t len, int behav, off_t value) 697 * 698 * No requirements 699 */ 700 int 701 sys_mcontrol(struct mcontrol_args *uap) 702 { 703 struct proc *p = curproc; 704 vm_offset_t start, end; 705 vm_offset_t tmpaddr = (vm_offset_t)uap->addr + uap->len; 706 int error; 707 708 /* 709 * Check for illegal behavior 710 */ 711 if (uap->behav < 0 || uap->behav > MADV_CONTROL_END) 712 return (EINVAL); 713 /* 714 * Check for illegal addresses. Watch out for address wrap... Note 715 * that VM_*_ADDRESS are not constants due to casts (argh). 716 */ 717 if (tmpaddr < (vm_offset_t) uap->addr) 718 return (EINVAL); 719 if (VM_MAX_USER_ADDRESS > 0 && tmpaddr > VM_MAX_USER_ADDRESS) 720 return (EINVAL); 721 if (VM_MIN_USER_ADDRESS > 0 && uap->addr < VM_MIN_USER_ADDRESS) 722 return (EINVAL); 723 724 /* 725 * Since this routine is only advisory, we default to conservative 726 * behavior. 727 */ 728 start = trunc_page((vm_offset_t)uap->addr); 729 end = round_page(tmpaddr); 730 731 error = vm_map_madvise(&p->p_vmspace->vm_map, start, end, 732 uap->behav, uap->value); 733 return (error); 734 } 735 736 737 /* 738 * mincore system call handler 739 * 740 * mincore_args(const void *addr, size_t len, char *vec) 741 * 742 * No requirements 743 */ 744 int 745 sys_mincore(struct mincore_args *uap) 746 { 747 struct proc *p = curproc; 748 vm_offset_t addr, first_addr; 749 vm_offset_t end, cend; 750 pmap_t pmap; 751 vm_map_t map; 752 char *vec; 753 int error; 754 int vecindex, lastvecindex; 755 vm_map_entry_t current; 756 vm_map_entry_t entry; 757 int mincoreinfo; 758 unsigned int timestamp; 759 760 /* 761 * Make sure that the addresses presented are valid for user 762 * mode. 763 */ 764 first_addr = addr = trunc_page((vm_offset_t) uap->addr); 765 end = addr + (vm_size_t)round_page(uap->len); 766 if (end < addr) 767 return (EINVAL); 768 if (VM_MAX_USER_ADDRESS > 0 && end > VM_MAX_USER_ADDRESS) 769 return (EINVAL); 770 771 /* 772 * Address of byte vector 773 */ 774 vec = uap->vec; 775 776 map = &p->p_vmspace->vm_map; 777 pmap = vmspace_pmap(p->p_vmspace); 778 779 lwkt_gettoken(&map->token); 780 vm_map_lock_read(map); 781 RestartScan: 782 timestamp = map->timestamp; 783 784 if (!vm_map_lookup_entry(map, addr, &entry)) 785 entry = entry->next; 786 787 /* 788 * Do this on a map entry basis so that if the pages are not 789 * in the current processes address space, we can easily look 790 * up the pages elsewhere. 791 */ 792 lastvecindex = -1; 793 for(current = entry; 794 (current != &map->header) && (current->start < end); 795 current = current->next) { 796 797 /* 798 * ignore submaps (for now) or null objects 799 */ 800 if (current->maptype != VM_MAPTYPE_NORMAL && 801 current->maptype != VM_MAPTYPE_VPAGETABLE) { 802 continue; 803 } 804 if (current->object.vm_object == NULL) 805 continue; 806 807 /* 808 * limit this scan to the current map entry and the 809 * limits for the mincore call 810 */ 811 if (addr < current->start) 812 addr = current->start; 813 cend = current->end; 814 if (cend > end) 815 cend = end; 816 817 /* 818 * scan this entry one page at a time 819 */ 820 while (addr < cend) { 821 /* 822 * Check pmap first, it is likely faster, also 823 * it can provide info as to whether we are the 824 * one referencing or modifying the page. 825 * 826 * If we have to check the VM object, only mess 827 * around with normal maps. Do not mess around 828 * with virtual page tables (XXX). 829 */ 830 mincoreinfo = pmap_mincore(pmap, addr); 831 if (mincoreinfo == 0 && 832 current->maptype == VM_MAPTYPE_NORMAL) { 833 vm_pindex_t pindex; 834 vm_ooffset_t offset; 835 vm_page_t m; 836 837 /* 838 * calculate the page index into the object 839 */ 840 offset = current->offset + (addr - current->start); 841 pindex = OFF_TO_IDX(offset); 842 843 /* 844 * if the page is resident, then gather 845 * information about it. spl protection is 846 * required to maintain the object 847 * association. And XXX what if the page is 848 * busy? What's the deal with that? 849 * 850 * XXX vm_token - legacy for pmap_ts_referenced 851 * in x86 and vkernel pmap code. 852 */ 853 lwkt_gettoken(&vm_token); 854 vm_object_hold(current->object.vm_object); 855 m = vm_page_lookup(current->object.vm_object, 856 pindex); 857 if (m && m->valid) { 858 mincoreinfo = MINCORE_INCORE; 859 if (m->dirty || pmap_is_modified(m)) 860 mincoreinfo |= MINCORE_MODIFIED_OTHER; 861 if ((m->flags & PG_REFERENCED) || 862 pmap_ts_referenced(m)) { 863 vm_page_flag_set(m, PG_REFERENCED); 864 mincoreinfo |= MINCORE_REFERENCED_OTHER; 865 } 866 } 867 vm_object_drop(current->object.vm_object); 868 lwkt_reltoken(&vm_token); 869 } 870 871 /* 872 * subyte may page fault. In case it needs to modify 873 * the map, we release the lock. 874 */ 875 vm_map_unlock_read(map); 876 877 /* 878 * calculate index into user supplied byte vector 879 */ 880 vecindex = OFF_TO_IDX(addr - first_addr); 881 882 /* 883 * If we have skipped map entries, we need to make sure that 884 * the byte vector is zeroed for those skipped entries. 885 */ 886 while((lastvecindex + 1) < vecindex) { 887 error = subyte( vec + lastvecindex, 0); 888 if (error) { 889 error = EFAULT; 890 goto done; 891 } 892 ++lastvecindex; 893 } 894 895 /* 896 * Pass the page information to the user 897 */ 898 error = subyte(vec + vecindex, mincoreinfo); 899 if (error) { 900 error = EFAULT; 901 goto done; 902 } 903 904 /* 905 * If the map has changed, due to the subyte, 906 * the previous output may be invalid. 907 */ 908 vm_map_lock_read(map); 909 if (timestamp != map->timestamp) 910 goto RestartScan; 911 912 lastvecindex = vecindex; 913 addr += PAGE_SIZE; 914 } 915 } 916 917 /* 918 * subyte may page fault. In case it needs to modify 919 * the map, we release the lock. 920 */ 921 vm_map_unlock_read(map); 922 923 /* 924 * Zero the last entries in the byte vector. 925 */ 926 vecindex = OFF_TO_IDX(end - first_addr); 927 while((lastvecindex + 1) < vecindex) { 928 error = subyte( vec + lastvecindex, 0); 929 if (error) { 930 error = EFAULT; 931 goto done; 932 } 933 ++lastvecindex; 934 } 935 936 /* 937 * If the map has changed, due to the subyte, the previous 938 * output may be invalid. 939 */ 940 vm_map_lock_read(map); 941 if (timestamp != map->timestamp) 942 goto RestartScan; 943 vm_map_unlock_read(map); 944 945 error = 0; 946 done: 947 lwkt_reltoken(&map->token); 948 return (error); 949 } 950 951 /* 952 * mlock system call handler 953 * 954 * mlock_args(const void *addr, size_t len) 955 * 956 * No requirements 957 */ 958 int 959 sys_mlock(struct mlock_args *uap) 960 { 961 vm_offset_t addr; 962 vm_offset_t tmpaddr; 963 vm_size_t size, pageoff; 964 struct thread *td = curthread; 965 struct proc *p = td->td_proc; 966 int error; 967 968 addr = (vm_offset_t) uap->addr; 969 size = uap->len; 970 971 pageoff = (addr & PAGE_MASK); 972 addr -= pageoff; 973 size += pageoff; 974 size = (vm_size_t) round_page(size); 975 if (size < uap->len) /* wrap */ 976 return (EINVAL); 977 if (size == 0) /* silently allow 0 size */ 978 return (0); 979 tmpaddr = addr + size; /* workaround gcc4 opt */ 980 if (tmpaddr < addr) /* wrap */ 981 return (EINVAL); 982 983 if (atop(size) + vmstats.v_wire_count > vm_page_max_wired) 984 return (EAGAIN); 985 986 /* 987 * We do not need to synchronize against other threads updating ucred; 988 * they update p->ucred, which is synchronized into td_ucred ourselves. 989 */ 990 #ifdef pmap_wired_count 991 if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) > 992 p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur) { 993 return (ENOMEM); 994 } 995 #else 996 error = priv_check_cred(td->td_ucred, PRIV_ROOT, 0); 997 if (error) { 998 return (error); 999 } 1000 #endif 1001 error = vm_map_unwire(&p->p_vmspace->vm_map, addr, addr + size, FALSE); 1002 return (error == KERN_SUCCESS ? 0 : ENOMEM); 1003 } 1004 1005 /* 1006 * mlockall(int how) 1007 * 1008 * No requirements 1009 */ 1010 int 1011 sys_mlockall(struct mlockall_args *uap) 1012 { 1013 struct thread *td = curthread; 1014 struct proc *p = td->td_proc; 1015 vm_map_t map = &p->p_vmspace->vm_map; 1016 vm_map_entry_t entry; 1017 int how = uap->how; 1018 int rc = KERN_SUCCESS; 1019 1020 if (((how & MCL_CURRENT) == 0) && ((how & MCL_FUTURE) == 0)) 1021 return (EINVAL); 1022 1023 rc = priv_check_cred(td->td_ucred, PRIV_ROOT, 0); 1024 if (rc) 1025 return (rc); 1026 1027 vm_map_lock(map); 1028 do { 1029 if (how & MCL_CURRENT) { 1030 for(entry = map->header.next; 1031 entry != &map->header; 1032 entry = entry->next); 1033 1034 rc = ENOSYS; 1035 break; 1036 } 1037 1038 if (how & MCL_FUTURE) 1039 map->flags |= MAP_WIREFUTURE; 1040 } while(0); 1041 vm_map_unlock(map); 1042 1043 return (rc); 1044 } 1045 1046 /* 1047 * munlockall(void) 1048 * 1049 * Unwire all user-wired map entries, cancel MCL_FUTURE. 1050 * 1051 * No requirements 1052 */ 1053 int 1054 sys_munlockall(struct munlockall_args *uap) 1055 { 1056 struct thread *td = curthread; 1057 struct proc *p = td->td_proc; 1058 vm_map_t map = &p->p_vmspace->vm_map; 1059 vm_map_entry_t entry; 1060 int rc = KERN_SUCCESS; 1061 1062 vm_map_lock(map); 1063 1064 /* Clear MAP_WIREFUTURE to cancel mlockall(MCL_FUTURE) */ 1065 map->flags &= ~MAP_WIREFUTURE; 1066 1067 retry: 1068 for (entry = map->header.next; 1069 entry != &map->header; 1070 entry = entry->next) { 1071 if ((entry->eflags & MAP_ENTRY_USER_WIRED) == 0) 1072 continue; 1073 1074 /* 1075 * If we encounter an in-transition entry, we release the 1076 * map lock and retry the scan; we do not decrement any 1077 * wired_count more than once because we do not touch 1078 * any entries with MAP_ENTRY_USER_WIRED not set. 1079 * 1080 * There is a potential interleaving with concurrent 1081 * mlockall()s here -- if we abort a scan, an mlockall() 1082 * could start, wire a number of entries before our 1083 * current position in, and then stall itself on this 1084 * or any other in-transition entry. If that occurs, when 1085 * we resume, we will unwire those entries. 1086 */ 1087 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 1088 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 1089 ++mycpu->gd_cnt.v_intrans_coll; 1090 ++mycpu->gd_cnt.v_intrans_wait; 1091 vm_map_transition_wait(map, 1); 1092 goto retry; 1093 } 1094 1095 KASSERT(entry->wired_count > 0, 1096 ("wired_count was 0 with USER_WIRED set! %p", entry)); 1097 1098 /* Drop wired count, if it hits zero, unwire the entry */ 1099 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 1100 entry->wired_count--; 1101 if (entry->wired_count == 0) 1102 vm_fault_unwire(map, entry); 1103 } 1104 1105 vm_map_unlock(map); 1106 1107 return (rc); 1108 } 1109 1110 /* 1111 * munlock system call handler 1112 * 1113 * munlock_args(const void *addr, size_t len) 1114 * 1115 * No requirements 1116 */ 1117 int 1118 sys_munlock(struct munlock_args *uap) 1119 { 1120 struct thread *td = curthread; 1121 struct proc *p = td->td_proc; 1122 vm_offset_t addr; 1123 vm_offset_t tmpaddr; 1124 vm_size_t size, pageoff; 1125 int error; 1126 1127 addr = (vm_offset_t) uap->addr; 1128 size = uap->len; 1129 1130 pageoff = (addr & PAGE_MASK); 1131 addr -= pageoff; 1132 size += pageoff; 1133 size = (vm_size_t) round_page(size); 1134 1135 tmpaddr = addr + size; 1136 if (tmpaddr < addr) /* wrap */ 1137 return (EINVAL); 1138 if (size == 0) /* silently allow 0 size */ 1139 return (0); 1140 1141 #ifndef pmap_wired_count 1142 error = priv_check(td, PRIV_ROOT); 1143 if (error) 1144 return (error); 1145 #endif 1146 1147 error = vm_map_unwire(&p->p_vmspace->vm_map, addr, addr + size, TRUE); 1148 return (error == KERN_SUCCESS ? 0 : ENOMEM); 1149 } 1150 1151 /* 1152 * Internal version of mmap. 1153 * Currently used by mmap, exec, and sys5 shared memory. 1154 * Handle is either a vnode pointer or NULL for MAP_ANON. 1155 * 1156 * No requirements 1157 */ 1158 int 1159 vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot, 1160 vm_prot_t maxprot, int flags, void *handle, vm_ooffset_t foff) 1161 { 1162 boolean_t fitit; 1163 vm_object_t object; 1164 vm_offset_t eaddr; 1165 vm_size_t esize; 1166 vm_size_t align; 1167 int (*uksmap)(cdev_t dev, vm_page_t fake); 1168 struct vnode *vp; 1169 struct thread *td = curthread; 1170 struct proc *p; 1171 int rv = KERN_SUCCESS; 1172 off_t objsize; 1173 int docow; 1174 int error; 1175 1176 if (size == 0) 1177 return (0); 1178 1179 objsize = round_page(size); 1180 if (objsize < size) 1181 return (EINVAL); 1182 size = objsize; 1183 1184 lwkt_gettoken(&map->token); 1185 1186 /* 1187 * XXX messy code, fixme 1188 * 1189 * NOTE: Overflow checks require discrete statements or GCC4 1190 * will optimize it out. 1191 */ 1192 if ((p = curproc) != NULL && map == &p->p_vmspace->vm_map) { 1193 esize = map->size + size; /* workaround gcc4 opt */ 1194 if (esize < map->size || 1195 esize > p->p_rlimit[RLIMIT_VMEM].rlim_cur) { 1196 lwkt_reltoken(&map->token); 1197 return(ENOMEM); 1198 } 1199 } 1200 1201 /* 1202 * We currently can only deal with page aligned file offsets. 1203 * The check is here rather than in the syscall because the 1204 * kernel calls this function internally for other mmaping 1205 * operations (such as in exec) and non-aligned offsets will 1206 * cause pmap inconsistencies...so we want to be sure to 1207 * disallow this in all cases. 1208 * 1209 * NOTE: Overflow checks require discrete statements or GCC4 1210 * will optimize it out. 1211 */ 1212 if (foff & PAGE_MASK) { 1213 lwkt_reltoken(&map->token); 1214 return (EINVAL); 1215 } 1216 1217 /* 1218 * Handle alignment. For large memory maps it is possible 1219 * that the MMU can optimize the page table so align anything 1220 * that is a multiple of SEG_SIZE to SEG_SIZE. 1221 * 1222 * Also align any large mapping (bigger than 16x SG_SIZE) to a 1223 * SEG_SIZE address boundary. 1224 */ 1225 if (flags & MAP_SIZEALIGN) { 1226 align = size; 1227 if ((align ^ (align - 1)) != (align << 1) - 1) { 1228 lwkt_reltoken(&map->token); 1229 return (EINVAL); 1230 } 1231 } else if ((flags & MAP_FIXED) == 0 && 1232 ((size & SEG_MASK) == 0 || size > SEG_SIZE * 16)) { 1233 align = SEG_SIZE; 1234 } else { 1235 align = PAGE_SIZE; 1236 } 1237 1238 if ((flags & (MAP_FIXED | MAP_TRYFIXED)) == 0) { 1239 fitit = TRUE; 1240 *addr = round_page(*addr); 1241 } else { 1242 if (*addr != trunc_page(*addr)) { 1243 lwkt_reltoken(&map->token); 1244 return (EINVAL); 1245 } 1246 eaddr = *addr + size; 1247 if (eaddr < *addr) { 1248 lwkt_reltoken(&map->token); 1249 return (EINVAL); 1250 } 1251 fitit = FALSE; 1252 if ((flags & MAP_TRYFIXED) == 0) 1253 vm_map_remove(map, *addr, *addr + size); 1254 } 1255 1256 uksmap = NULL; 1257 1258 /* 1259 * Lookup/allocate object. 1260 */ 1261 if (flags & MAP_ANON) { 1262 /* 1263 * Unnamed anonymous regions always start at 0. 1264 */ 1265 if (handle) { 1266 /* 1267 * Default memory object 1268 */ 1269 object = default_pager_alloc(handle, objsize, 1270 prot, foff); 1271 if (object == NULL) { 1272 lwkt_reltoken(&map->token); 1273 return(ENOMEM); 1274 } 1275 docow = MAP_PREFAULT_PARTIAL; 1276 } else { 1277 /* 1278 * Implicit single instance of a default memory 1279 * object, so we don't need a VM object yet. 1280 */ 1281 foff = 0; 1282 object = NULL; 1283 docow = 0; 1284 } 1285 vp = NULL; 1286 } else { 1287 vp = (struct vnode *)handle; 1288 1289 /* 1290 * Non-anonymous mappings of VCHR (aka not /dev/zero) 1291 * cannot specify MAP_STACK or MAP_VPAGETABLE. 1292 */ 1293 if (vp->v_type == VCHR) { 1294 if (flags & (MAP_STACK | MAP_VPAGETABLE)) { 1295 lwkt_reltoken(&map->token); 1296 return(EINVAL); 1297 } 1298 } 1299 1300 if (vp->v_type == VCHR && vp->v_rdev->si_ops->d_uksmap) { 1301 /* 1302 * Device mappings without a VM object, typically 1303 * sharing permanently allocated kernel memory or 1304 * process-context-specific (per-process) data. 1305 * 1306 * Force them to be shared. 1307 */ 1308 uksmap = vp->v_rdev->si_ops->d_uksmap; 1309 object = NULL; 1310 docow = MAP_PREFAULT_PARTIAL; 1311 flags &= ~(MAP_PRIVATE|MAP_COPY); 1312 flags |= MAP_SHARED; 1313 } else if (vp->v_type == VCHR) { 1314 /* 1315 * Device mappings (device size unknown?). 1316 * Force them to be shared. 1317 */ 1318 error = dev_dmmap_single(vp->v_rdev, &foff, objsize, 1319 &object, prot, NULL); 1320 1321 if (error == ENODEV) { 1322 handle = (void *)(intptr_t)vp->v_rdev; 1323 object = dev_pager_alloc(handle, objsize, prot, foff); 1324 if (object == NULL) { 1325 lwkt_reltoken(&map->token); 1326 return(EINVAL); 1327 } 1328 } else if (error) { 1329 lwkt_reltoken(&map->token); 1330 return(error); 1331 } 1332 1333 docow = MAP_PREFAULT_PARTIAL; 1334 flags &= ~(MAP_PRIVATE|MAP_COPY); 1335 flags |= MAP_SHARED; 1336 } else { 1337 /* 1338 * Regular file mapping (typically). The attribute 1339 * check is for the link count test only. mmapable 1340 * vnodes must already have a VM object assigned. 1341 */ 1342 struct vattr vat; 1343 int error; 1344 1345 error = VOP_GETATTR(vp, &vat); 1346 if (error) { 1347 lwkt_reltoken(&map->token); 1348 return (error); 1349 } 1350 docow = MAP_PREFAULT_PARTIAL; 1351 object = vnode_pager_reference(vp); 1352 if (object == NULL && vp->v_type == VREG) { 1353 lwkt_reltoken(&map->token); 1354 kprintf("Warning: cannot mmap vnode %p, no " 1355 "object\n", vp); 1356 return(EINVAL); 1357 } 1358 1359 /* 1360 * If it is a regular file without any references 1361 * we do not need to sync it. 1362 */ 1363 if (vp->v_type == VREG && vat.va_nlink == 0) { 1364 flags |= MAP_NOSYNC; 1365 } 1366 } 1367 } 1368 1369 /* 1370 * Deal with the adjusted flags 1371 */ 1372 if ((flags & (MAP_ANON|MAP_SHARED)) == 0) 1373 docow |= MAP_COPY_ON_WRITE; 1374 if (flags & MAP_NOSYNC) 1375 docow |= MAP_DISABLE_SYNCER; 1376 if (flags & MAP_NOCORE) 1377 docow |= MAP_DISABLE_COREDUMP; 1378 1379 /* 1380 * This may place the area in its own page directory if (size) is 1381 * large enough, otherwise it typically returns its argument. 1382 * 1383 * (object can be NULL) 1384 */ 1385 if (fitit) { 1386 *addr = pmap_addr_hint(object, *addr, size); 1387 } 1388 1389 /* 1390 * Stack mappings need special attention. 1391 * 1392 * Mappings that use virtual page tables will default to storing 1393 * the page table at offset 0. 1394 */ 1395 if (uksmap) { 1396 rv = vm_map_find(map, uksmap, vp->v_rdev, 1397 foff, addr, size, 1398 align, fitit, 1399 VM_MAPTYPE_UKSMAP, VM_SUBSYS_MMAP, 1400 prot, maxprot, docow); 1401 } else if (flags & MAP_STACK) { 1402 rv = vm_map_stack(map, *addr, size, flags, 1403 prot, maxprot, docow); 1404 } else if (flags & MAP_VPAGETABLE) { 1405 rv = vm_map_find(map, object, NULL, 1406 foff, addr, size, 1407 align, fitit, 1408 VM_MAPTYPE_VPAGETABLE, VM_SUBSYS_MMAP, 1409 prot, maxprot, docow); 1410 } else { 1411 rv = vm_map_find(map, object, NULL, 1412 foff, addr, size, 1413 align, fitit, 1414 VM_MAPTYPE_NORMAL, VM_SUBSYS_MMAP, 1415 prot, maxprot, docow); 1416 } 1417 1418 if (rv != KERN_SUCCESS) { 1419 /* 1420 * Lose the object reference. Will destroy the 1421 * object if it's an unnamed anonymous mapping 1422 * or named anonymous without other references. 1423 * 1424 * (NOTE: object can be NULL) 1425 */ 1426 vm_object_deallocate(object); 1427 goto out; 1428 } 1429 1430 /* 1431 * Shared memory is also shared with children. 1432 */ 1433 if (flags & (MAP_SHARED|MAP_INHERIT)) { 1434 rv = vm_map_inherit(map, *addr, *addr + size, VM_INHERIT_SHARE); 1435 if (rv != KERN_SUCCESS) { 1436 vm_map_remove(map, *addr, *addr + size); 1437 goto out; 1438 } 1439 } 1440 1441 /* If a process has marked all future mappings for wiring, do so */ 1442 if ((rv == KERN_SUCCESS) && (map->flags & MAP_WIREFUTURE)) 1443 vm_map_unwire(map, *addr, *addr + size, FALSE); 1444 1445 /* 1446 * Set the access time on the vnode 1447 */ 1448 if (vp != NULL) 1449 vn_mark_atime(vp, td); 1450 out: 1451 lwkt_reltoken(&map->token); 1452 1453 switch (rv) { 1454 case KERN_SUCCESS: 1455 return (0); 1456 case KERN_INVALID_ADDRESS: 1457 case KERN_NO_SPACE: 1458 return (ENOMEM); 1459 case KERN_PROTECTION_FAILURE: 1460 return (EACCES); 1461 default: 1462 return (EINVAL); 1463 } 1464 } 1465 1466 /* 1467 * Translate a Mach VM return code to zero on success or the appropriate errno 1468 * on failure. 1469 */ 1470 int 1471 vm_mmap_to_errno(int rv) 1472 { 1473 1474 switch (rv) { 1475 case KERN_SUCCESS: 1476 return (0); 1477 case KERN_INVALID_ADDRESS: 1478 case KERN_NO_SPACE: 1479 return (ENOMEM); 1480 case KERN_PROTECTION_FAILURE: 1481 return (EACCES); 1482 default: 1483 return (EINVAL); 1484 } 1485 } 1486