1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)vfs_syscalls.c 8.13 (Berkeley) 4/15/94 39 * $FreeBSD: src/sys/kern/vfs_syscalls.c,v 1.151.2.18 2003/04/04 20:35:58 tegge Exp $ 40 */ 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/buf.h> 45 #include <sys/conf.h> 46 #include <sys/sysent.h> 47 #include <sys/malloc.h> 48 #include <sys/mount.h> 49 #include <sys/mountctl.h> 50 #include <sys/sysproto.h> 51 #include <sys/filedesc.h> 52 #include <sys/kernel.h> 53 #include <sys/fcntl.h> 54 #include <sys/file.h> 55 #include <sys/linker.h> 56 #include <sys/stat.h> 57 #include <sys/unistd.h> 58 #include <sys/vnode.h> 59 #include <sys/proc.h> 60 #include <sys/priv.h> 61 #include <sys/jail.h> 62 #include <sys/namei.h> 63 #include <sys/nlookup.h> 64 #include <sys/dirent.h> 65 #include <sys/extattr.h> 66 #include <sys/spinlock.h> 67 #include <sys/kern_syscall.h> 68 #include <sys/objcache.h> 69 #include <sys/sysctl.h> 70 71 #include <sys/buf2.h> 72 #include <sys/file2.h> 73 #include <sys/spinlock2.h> 74 #include <sys/mplock2.h> 75 76 #include <vm/vm.h> 77 #include <vm/vm_object.h> 78 #include <vm/vm_page.h> 79 80 #include <machine/limits.h> 81 #include <machine/stdarg.h> 82 83 #include <vfs/union/union.h> 84 85 static void mount_warning(struct mount *mp, const char *ctl, ...) 86 __printflike(2, 3); 87 static int mount_path(struct proc *p, struct mount *mp, char **rb, char **fb); 88 static int checkvp_chdir (struct vnode *vn, struct thread *td); 89 static void checkdirs (struct nchandle *old_nch, struct nchandle *new_nch); 90 static int chroot_refuse_vdir_fds (struct filedesc *fdp); 91 static int chroot_visible_mnt(struct mount *mp, struct proc *p); 92 static int getutimes (const struct timeval *, struct timespec *); 93 static int setfown (struct mount *, struct vnode *, uid_t, gid_t); 94 static int setfmode (struct vnode *, int); 95 static int setfflags (struct vnode *, int); 96 static int setutimes (struct vnode *, struct vattr *, 97 const struct timespec *, int); 98 static int usermount = 0; /* if 1, non-root can mount fs. */ 99 100 int (*union_dircheckp) (struct thread *, struct vnode **, struct file *); 101 102 SYSCTL_INT(_vfs, OID_AUTO, usermount, CTLFLAG_RW, &usermount, 0, 103 "Allow non-root users to mount filesystems"); 104 105 /* 106 * Virtual File System System Calls 107 */ 108 109 /* 110 * Mount a file system. 111 * 112 * mount_args(char *type, char *path, int flags, caddr_t data) 113 * 114 * MPALMOSTSAFE 115 */ 116 int 117 sys_mount(struct mount_args *uap) 118 { 119 struct thread *td = curthread; 120 struct vnode *vp; 121 struct nchandle nch; 122 struct mount *mp, *nullmp; 123 struct vfsconf *vfsp; 124 int error, flag = 0, flag2 = 0; 125 int hasmount; 126 struct vattr va; 127 struct nlookupdata nd; 128 char fstypename[MFSNAMELEN]; 129 struct ucred *cred; 130 131 cred = td->td_ucred; 132 if (jailed(cred)) { 133 error = EPERM; 134 goto done; 135 } 136 if (usermount == 0 && (error = priv_check(td, PRIV_ROOT))) 137 goto done; 138 139 /* 140 * Do not allow NFS export by non-root users. 141 */ 142 if (uap->flags & MNT_EXPORTED) { 143 error = priv_check(td, PRIV_ROOT); 144 if (error) 145 goto done; 146 } 147 /* 148 * Silently enforce MNT_NOSUID and MNT_NODEV for non-root users 149 */ 150 if (priv_check(td, PRIV_ROOT)) 151 uap->flags |= MNT_NOSUID | MNT_NODEV; 152 153 /* 154 * Lookup the requested path and extract the nch and vnode. 155 */ 156 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 157 if (error == 0) { 158 if ((error = nlookup(&nd)) == 0) { 159 if (nd.nl_nch.ncp->nc_vp == NULL) 160 error = ENOENT; 161 } 162 } 163 if (error) { 164 nlookup_done(&nd); 165 goto done; 166 } 167 168 /* 169 * If the target filesystem is resolved via a nullfs mount, then 170 * nd.nl_nch.mount will be pointing to the nullfs mount structure 171 * instead of the target file system. We need it in case we are 172 * doing an update. 173 */ 174 nullmp = nd.nl_nch.mount; 175 176 /* 177 * Extract the locked+refd ncp and cleanup the nd structure 178 */ 179 nch = nd.nl_nch; 180 cache_zero(&nd.nl_nch); 181 nlookup_done(&nd); 182 183 if ((nch.ncp->nc_flag & NCF_ISMOUNTPT) && 184 (mp = cache_findmount(&nch)) != NULL) { 185 cache_dropmount(mp); 186 hasmount = 1; 187 } else { 188 hasmount = 0; 189 } 190 191 192 /* 193 * now we have the locked ref'd nch and unreferenced vnode. 194 */ 195 vp = nch.ncp->nc_vp; 196 if ((error = vget(vp, LK_EXCLUSIVE)) != 0) { 197 cache_put(&nch); 198 goto done; 199 } 200 cache_unlock(&nch); 201 202 /* 203 * Extract the file system type. We need to know this early, to take 204 * appropriate actions if we are dealing with a nullfs. 205 */ 206 if ((error = copyinstr(uap->type, fstypename, MFSNAMELEN, NULL)) != 0) { 207 cache_drop(&nch); 208 vput(vp); 209 goto done; 210 } 211 212 /* 213 * Now we have an unlocked ref'd nch and a locked ref'd vp 214 */ 215 if (uap->flags & MNT_UPDATE) { 216 if ((vp->v_flag & (VROOT|VPFSROOT)) == 0) { 217 cache_drop(&nch); 218 vput(vp); 219 error = EINVAL; 220 goto done; 221 } 222 223 if (strncmp(fstypename, "null", 5) == 0) { 224 KKASSERT(nullmp); 225 mp = nullmp; 226 } else { 227 mp = vp->v_mount; 228 } 229 230 flag = mp->mnt_flag; 231 flag2 = mp->mnt_kern_flag; 232 /* 233 * We only allow the filesystem to be reloaded if it 234 * is currently mounted read-only. 235 */ 236 if ((uap->flags & MNT_RELOAD) && 237 ((mp->mnt_flag & MNT_RDONLY) == 0)) { 238 cache_drop(&nch); 239 vput(vp); 240 error = EOPNOTSUPP; /* Needs translation */ 241 goto done; 242 } 243 /* 244 * Only root, or the user that did the original mount is 245 * permitted to update it. 246 */ 247 if (mp->mnt_stat.f_owner != cred->cr_uid && 248 (error = priv_check(td, PRIV_ROOT))) { 249 cache_drop(&nch); 250 vput(vp); 251 goto done; 252 } 253 if (vfs_busy(mp, LK_NOWAIT)) { 254 cache_drop(&nch); 255 vput(vp); 256 error = EBUSY; 257 goto done; 258 } 259 if (hasmount) { 260 cache_drop(&nch); 261 vfs_unbusy(mp); 262 vput(vp); 263 error = EBUSY; 264 goto done; 265 } 266 mp->mnt_flag |= 267 uap->flags & (MNT_RELOAD | MNT_FORCE | MNT_UPDATE); 268 lwkt_gettoken(&mp->mnt_token); 269 vn_unlock(vp); 270 goto update; 271 } 272 273 /* 274 * If the user is not root, ensure that they own the directory 275 * onto which we are attempting to mount. 276 */ 277 if ((error = VOP_GETATTR(vp, &va)) || 278 (va.va_uid != cred->cr_uid && 279 (error = priv_check(td, PRIV_ROOT)))) { 280 cache_drop(&nch); 281 vput(vp); 282 goto done; 283 } 284 if ((error = vinvalbuf(vp, V_SAVE, 0, 0)) != 0) { 285 cache_drop(&nch); 286 vput(vp); 287 goto done; 288 } 289 if (vp->v_type != VDIR) { 290 cache_drop(&nch); 291 vput(vp); 292 error = ENOTDIR; 293 goto done; 294 } 295 if (vp->v_mount->mnt_kern_flag & MNTK_NOSTKMNT) { 296 cache_drop(&nch); 297 vput(vp); 298 error = EPERM; 299 goto done; 300 } 301 vfsp = vfsconf_find_by_name(fstypename); 302 if (vfsp == NULL) { 303 linker_file_t lf; 304 305 /* Only load modules for root (very important!) */ 306 if ((error = priv_check(td, PRIV_ROOT)) != 0) { 307 cache_drop(&nch); 308 vput(vp); 309 goto done; 310 } 311 error = linker_load_file(fstypename, &lf); 312 if (error || lf == NULL) { 313 cache_drop(&nch); 314 vput(vp); 315 if (lf == NULL) 316 error = ENODEV; 317 goto done; 318 } 319 lf->userrefs++; 320 /* lookup again, see if the VFS was loaded */ 321 vfsp = vfsconf_find_by_name(fstypename); 322 if (vfsp == NULL) { 323 lf->userrefs--; 324 linker_file_unload(lf); 325 cache_drop(&nch); 326 vput(vp); 327 error = ENODEV; 328 goto done; 329 } 330 } 331 if (hasmount) { 332 cache_drop(&nch); 333 vput(vp); 334 error = EBUSY; 335 goto done; 336 } 337 338 /* 339 * Allocate and initialize the filesystem. 340 */ 341 mp = kmalloc(sizeof(struct mount), M_MOUNT, M_ZERO|M_WAITOK); 342 mount_init(mp); 343 vfs_busy(mp, LK_NOWAIT); 344 mp->mnt_op = vfsp->vfc_vfsops; 345 mp->mnt_vfc = vfsp; 346 vfsp->vfc_refcount++; 347 mp->mnt_stat.f_type = vfsp->vfc_typenum; 348 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; 349 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 350 mp->mnt_stat.f_owner = cred->cr_uid; 351 lwkt_gettoken(&mp->mnt_token); 352 vn_unlock(vp); 353 update: 354 /* 355 * (per-mount token acquired at this point) 356 * 357 * Set the mount level flags. 358 */ 359 if (uap->flags & MNT_RDONLY) 360 mp->mnt_flag |= MNT_RDONLY; 361 else if (mp->mnt_flag & MNT_RDONLY) 362 mp->mnt_kern_flag |= MNTK_WANTRDWR; 363 mp->mnt_flag &=~ (MNT_NOSUID | MNT_NOEXEC | MNT_NODEV | 364 MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_NOATIME | 365 MNT_NOSYMFOLLOW | MNT_IGNORE | MNT_TRIM | 366 MNT_NOCLUSTERR | MNT_NOCLUSTERW | MNT_SUIDDIR); 367 mp->mnt_flag |= uap->flags & (MNT_NOSUID | MNT_NOEXEC | 368 MNT_NODEV | MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_FORCE | 369 MNT_NOSYMFOLLOW | MNT_IGNORE | MNT_TRIM | 370 MNT_NOATIME | MNT_NOCLUSTERR | MNT_NOCLUSTERW | MNT_SUIDDIR); 371 /* 372 * Mount the filesystem. 373 * XXX The final recipients of VFS_MOUNT just overwrite the ndp they 374 * get. 375 */ 376 error = VFS_MOUNT(mp, uap->path, uap->data, cred); 377 if (mp->mnt_flag & MNT_UPDATE) { 378 if (mp->mnt_kern_flag & MNTK_WANTRDWR) 379 mp->mnt_flag &= ~MNT_RDONLY; 380 mp->mnt_flag &=~ (MNT_UPDATE | MNT_RELOAD | MNT_FORCE); 381 mp->mnt_kern_flag &=~ MNTK_WANTRDWR; 382 if (error) { 383 mp->mnt_flag = flag; 384 mp->mnt_kern_flag = flag2; 385 } 386 lwkt_reltoken(&mp->mnt_token); 387 vfs_unbusy(mp); 388 vrele(vp); 389 cache_drop(&nch); 390 goto done; 391 } 392 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 393 394 /* 395 * Put the new filesystem on the mount list after root. The mount 396 * point gets its own mnt_ncmountpt (unless the VFS already set one 397 * up) which represents the root of the mount. The lookup code 398 * detects the mount point going forward and checks the root of 399 * the mount going backwards. 400 * 401 * It is not necessary to invalidate or purge the vnode underneath 402 * because elements under the mount will be given their own glue 403 * namecache record. 404 */ 405 if (!error) { 406 if (mp->mnt_ncmountpt.ncp == NULL) { 407 /* 408 * allocate, then unlock, but leave the ref intact 409 */ 410 cache_allocroot(&mp->mnt_ncmountpt, mp, NULL); 411 cache_unlock(&mp->mnt_ncmountpt); 412 } 413 mp->mnt_ncmounton = nch; /* inherits ref */ 414 nch.ncp->nc_flag |= NCF_ISMOUNTPT; 415 cache_ismounting(mp); 416 417 mountlist_insert(mp, MNTINS_LAST); 418 vn_unlock(vp); 419 checkdirs(&mp->mnt_ncmounton, &mp->mnt_ncmountpt); 420 error = vfs_allocate_syncvnode(mp); 421 lwkt_reltoken(&mp->mnt_token); 422 vfs_unbusy(mp); 423 error = VFS_START(mp, 0); 424 vrele(vp); 425 } else { 426 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_coherency_ops); 427 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_journal_ops); 428 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_norm_ops); 429 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_spec_ops); 430 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_fifo_ops); 431 mp->mnt_vfc->vfc_refcount--; 432 lwkt_reltoken(&mp->mnt_token); 433 vfs_unbusy(mp); 434 kfree(mp, M_MOUNT); 435 cache_drop(&nch); 436 vput(vp); 437 } 438 done: 439 return (error); 440 } 441 442 /* 443 * Scan all active processes to see if any of them have a current 444 * or root directory onto which the new filesystem has just been 445 * mounted. If so, replace them with the new mount point. 446 * 447 * Both old_nch and new_nch are ref'd on call but not locked. 448 * new_nch must be temporarily locked so it can be associated with the 449 * vnode representing the root of the mount point. 450 */ 451 struct checkdirs_info { 452 struct nchandle old_nch; 453 struct nchandle new_nch; 454 struct vnode *old_vp; 455 struct vnode *new_vp; 456 }; 457 458 static int checkdirs_callback(struct proc *p, void *data); 459 460 static void 461 checkdirs(struct nchandle *old_nch, struct nchandle *new_nch) 462 { 463 struct checkdirs_info info; 464 struct vnode *olddp; 465 struct vnode *newdp; 466 struct mount *mp; 467 468 /* 469 * If the old mount point's vnode has a usecount of 1, it is not 470 * being held as a descriptor anywhere. 471 */ 472 olddp = old_nch->ncp->nc_vp; 473 if (olddp == NULL || olddp->v_sysref.refcnt == 1) 474 return; 475 476 /* 477 * Force the root vnode of the new mount point to be resolved 478 * so we can update any matching processes. 479 */ 480 mp = new_nch->mount; 481 if (VFS_ROOT(mp, &newdp)) 482 panic("mount: lost mount"); 483 vn_unlock(newdp); 484 cache_lock(new_nch); 485 vn_lock(newdp, LK_EXCLUSIVE | LK_RETRY); 486 cache_setunresolved(new_nch); 487 cache_setvp(new_nch, newdp); 488 cache_unlock(new_nch); 489 490 /* 491 * Special handling of the root node 492 */ 493 if (rootvnode == olddp) { 494 vref(newdp); 495 vfs_cache_setroot(newdp, cache_hold(new_nch)); 496 } 497 498 /* 499 * Pass newdp separately so the callback does not have to access 500 * it via new_nch->ncp->nc_vp. 501 */ 502 info.old_nch = *old_nch; 503 info.new_nch = *new_nch; 504 info.new_vp = newdp; 505 allproc_scan(checkdirs_callback, &info); 506 vput(newdp); 507 } 508 509 /* 510 * NOTE: callback is not MP safe because the scanned process's filedesc 511 * structure can be ripped out from under us, amoung other things. 512 */ 513 static int 514 checkdirs_callback(struct proc *p, void *data) 515 { 516 struct checkdirs_info *info = data; 517 struct filedesc *fdp; 518 struct nchandle ncdrop1; 519 struct nchandle ncdrop2; 520 struct vnode *vprele1; 521 struct vnode *vprele2; 522 523 if ((fdp = p->p_fd) != NULL) { 524 cache_zero(&ncdrop1); 525 cache_zero(&ncdrop2); 526 vprele1 = NULL; 527 vprele2 = NULL; 528 529 /* 530 * MPUNSAFE - XXX fdp can be pulled out from under a 531 * foreign process. 532 * 533 * A shared filedesc is ok, we don't have to copy it 534 * because we are making this change globally. 535 */ 536 spin_lock(&fdp->fd_spin); 537 if (fdp->fd_ncdir.mount == info->old_nch.mount && 538 fdp->fd_ncdir.ncp == info->old_nch.ncp) { 539 vprele1 = fdp->fd_cdir; 540 vref(info->new_vp); 541 fdp->fd_cdir = info->new_vp; 542 ncdrop1 = fdp->fd_ncdir; 543 cache_copy(&info->new_nch, &fdp->fd_ncdir); 544 } 545 if (fdp->fd_nrdir.mount == info->old_nch.mount && 546 fdp->fd_nrdir.ncp == info->old_nch.ncp) { 547 vprele2 = fdp->fd_rdir; 548 vref(info->new_vp); 549 fdp->fd_rdir = info->new_vp; 550 ncdrop2 = fdp->fd_nrdir; 551 cache_copy(&info->new_nch, &fdp->fd_nrdir); 552 } 553 spin_unlock(&fdp->fd_spin); 554 if (ncdrop1.ncp) 555 cache_drop(&ncdrop1); 556 if (ncdrop2.ncp) 557 cache_drop(&ncdrop2); 558 if (vprele1) 559 vrele(vprele1); 560 if (vprele2) 561 vrele(vprele2); 562 } 563 return(0); 564 } 565 566 /* 567 * Unmount a file system. 568 * 569 * Note: unmount takes a path to the vnode mounted on as argument, 570 * not special file (as before). 571 * 572 * umount_args(char *path, int flags) 573 * 574 * MPALMOSTSAFE 575 */ 576 int 577 sys_unmount(struct unmount_args *uap) 578 { 579 struct thread *td = curthread; 580 struct proc *p __debugvar = td->td_proc; 581 struct mount *mp = NULL; 582 struct nlookupdata nd; 583 int error; 584 585 KKASSERT(p); 586 get_mplock(); 587 if (td->td_ucred->cr_prison != NULL) { 588 error = EPERM; 589 goto done; 590 } 591 if (usermount == 0 && (error = priv_check(td, PRIV_ROOT))) 592 goto done; 593 594 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 595 if (error == 0) 596 error = nlookup(&nd); 597 if (error) 598 goto out; 599 600 mp = nd.nl_nch.mount; 601 602 /* 603 * Only root, or the user that did the original mount is 604 * permitted to unmount this filesystem. 605 */ 606 if ((mp->mnt_stat.f_owner != td->td_ucred->cr_uid) && 607 (error = priv_check(td, PRIV_ROOT))) 608 goto out; 609 610 /* 611 * Don't allow unmounting the root file system. 612 */ 613 if (mp->mnt_flag & MNT_ROOTFS) { 614 error = EINVAL; 615 goto out; 616 } 617 618 /* 619 * Must be the root of the filesystem 620 */ 621 if (nd.nl_nch.ncp != mp->mnt_ncmountpt.ncp) { 622 error = EINVAL; 623 goto out; 624 } 625 626 out: 627 nlookup_done(&nd); 628 if (error == 0) 629 error = dounmount(mp, uap->flags); 630 done: 631 rel_mplock(); 632 return (error); 633 } 634 635 /* 636 * Do the actual file system unmount. 637 */ 638 static int 639 dounmount_interlock(struct mount *mp) 640 { 641 if (mp->mnt_kern_flag & MNTK_UNMOUNT) 642 return (EBUSY); 643 mp->mnt_kern_flag |= MNTK_UNMOUNT; 644 return(0); 645 } 646 647 static int 648 unmount_allproc_cb(struct proc *p, void *arg) 649 { 650 struct mount *mp; 651 652 if (p->p_textnch.ncp == NULL) 653 return 0; 654 655 mp = (struct mount *)arg; 656 if (p->p_textnch.mount == mp) 657 cache_drop(&p->p_textnch); 658 659 return 0; 660 } 661 662 int 663 dounmount(struct mount *mp, int flags) 664 { 665 struct namecache *ncp; 666 struct nchandle nch; 667 struct vnode *vp; 668 int error; 669 int async_flag; 670 int lflags; 671 int freeok = 1; 672 int retry; 673 674 lwkt_gettoken(&mp->mnt_token); 675 /* 676 * Exclusive access for unmounting purposes 677 */ 678 if ((error = mountlist_interlock(dounmount_interlock, mp)) != 0) 679 goto out; 680 681 /* 682 * Allow filesystems to detect that a forced unmount is in progress. 683 */ 684 if (flags & MNT_FORCE) 685 mp->mnt_kern_flag |= MNTK_UNMOUNTF; 686 lflags = LK_EXCLUSIVE | ((flags & MNT_FORCE) ? 0 : LK_TIMELOCK); 687 error = lockmgr(&mp->mnt_lock, lflags); 688 if (error) { 689 mp->mnt_kern_flag &= ~(MNTK_UNMOUNT | MNTK_UNMOUNTF); 690 if (mp->mnt_kern_flag & MNTK_MWAIT) { 691 mp->mnt_kern_flag &= ~MNTK_MWAIT; 692 wakeup(mp); 693 } 694 goto out; 695 } 696 697 if (mp->mnt_flag & MNT_EXPUBLIC) 698 vfs_setpublicfs(NULL, NULL, NULL); 699 700 vfs_msync(mp, MNT_WAIT); 701 async_flag = mp->mnt_flag & MNT_ASYNC; 702 mp->mnt_flag &=~ MNT_ASYNC; 703 704 /* 705 * If this filesystem isn't aliasing other filesystems, 706 * try to invalidate any remaining namecache entries and 707 * check the count afterwords. 708 */ 709 if ((mp->mnt_kern_flag & MNTK_NCALIASED) == 0) { 710 cache_lock(&mp->mnt_ncmountpt); 711 cache_inval(&mp->mnt_ncmountpt, CINV_DESTROY|CINV_CHILDREN); 712 cache_unlock(&mp->mnt_ncmountpt); 713 714 if ((ncp = mp->mnt_ncmountpt.ncp) != NULL && 715 (ncp->nc_refs != 1 || TAILQ_FIRST(&ncp->nc_list))) { 716 allproc_scan(&unmount_allproc_cb, mp); 717 } 718 719 if ((ncp = mp->mnt_ncmountpt.ncp) != NULL && 720 (ncp->nc_refs != 1 || TAILQ_FIRST(&ncp->nc_list))) { 721 722 if ((flags & MNT_FORCE) == 0) { 723 error = EBUSY; 724 mount_warning(mp, "Cannot unmount: " 725 "%d namecache " 726 "references still " 727 "present", 728 ncp->nc_refs - 1); 729 } else { 730 mount_warning(mp, "Forced unmount: " 731 "%d namecache " 732 "references still " 733 "present", 734 ncp->nc_refs - 1); 735 freeok = 0; 736 } 737 } 738 } 739 740 /* 741 * Decomission our special mnt_syncer vnode. This also stops 742 * the vnlru code. If we are unable to unmount we recommission 743 * the vnode. 744 * 745 * Then sync the filesystem. 746 */ 747 if ((vp = mp->mnt_syncer) != NULL) { 748 mp->mnt_syncer = NULL; 749 vrele(vp); 750 } 751 if ((mp->mnt_flag & MNT_RDONLY) == 0) 752 VFS_SYNC(mp, MNT_WAIT); 753 754 /* 755 * nchandle records ref the mount structure. Expect a count of 1 756 * (our mount->mnt_ncmountpt). 757 * 758 * Scans can get temporary refs on a mountpoint (thought really 759 * heavy duty stuff like cache_findmount() do not). 760 */ 761 for (retry = 0; retry < 10 && mp->mnt_refs != 1; ++retry) { 762 cache_unmounting(mp); 763 tsleep(&mp->mnt_refs, 0, "mntbsy", hz / 10 + 1); 764 } 765 if (mp->mnt_refs != 1) { 766 if ((flags & MNT_FORCE) == 0) { 767 mount_warning(mp, "Cannot unmount: " 768 "%d mount refs still present", 769 mp->mnt_refs); 770 error = EBUSY; 771 } else { 772 mount_warning(mp, "Forced unmount: " 773 "%d mount refs still present", 774 mp->mnt_refs); 775 freeok = 0; 776 } 777 } 778 779 /* 780 * So far so good, sync the filesystem once more and 781 * call the VFS unmount code if the sync succeeds. 782 */ 783 if (error == 0) { 784 if (((mp->mnt_flag & MNT_RDONLY) || 785 (error = VFS_SYNC(mp, MNT_WAIT)) == 0) || 786 (flags & MNT_FORCE)) { 787 error = VFS_UNMOUNT(mp, flags); 788 } 789 } 790 791 /* 792 * If an error occurred we can still recover, restoring the 793 * syncer vnode and misc flags. 794 */ 795 if (error) { 796 if (mp->mnt_syncer == NULL) 797 vfs_allocate_syncvnode(mp); 798 mp->mnt_kern_flag &= ~(MNTK_UNMOUNT | MNTK_UNMOUNTF); 799 mp->mnt_flag |= async_flag; 800 lockmgr(&mp->mnt_lock, LK_RELEASE); 801 if (mp->mnt_kern_flag & MNTK_MWAIT) { 802 mp->mnt_kern_flag &= ~MNTK_MWAIT; 803 wakeup(mp); 804 } 805 goto out; 806 } 807 /* 808 * Clean up any journals still associated with the mount after 809 * filesystem activity has ceased. 810 */ 811 journal_remove_all_journals(mp, 812 ((flags & MNT_FORCE) ? MC_JOURNAL_STOP_IMM : 0)); 813 814 mountlist_remove(mp); 815 816 /* 817 * Remove any installed vnode ops here so the individual VFSs don't 818 * have to. 819 */ 820 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_coherency_ops); 821 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_journal_ops); 822 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_norm_ops); 823 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_spec_ops); 824 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_fifo_ops); 825 826 if (mp->mnt_ncmountpt.ncp != NULL) { 827 nch = mp->mnt_ncmountpt; 828 cache_zero(&mp->mnt_ncmountpt); 829 cache_clrmountpt(&nch); 830 cache_drop(&nch); 831 } 832 if (mp->mnt_ncmounton.ncp != NULL) { 833 cache_unmounting(mp); 834 nch = mp->mnt_ncmounton; 835 cache_zero(&mp->mnt_ncmounton); 836 cache_clrmountpt(&nch); 837 cache_drop(&nch); 838 } 839 840 mp->mnt_vfc->vfc_refcount--; 841 if (!TAILQ_EMPTY(&mp->mnt_nvnodelist)) 842 panic("unmount: dangling vnode"); 843 lockmgr(&mp->mnt_lock, LK_RELEASE); 844 if (mp->mnt_kern_flag & MNTK_MWAIT) { 845 mp->mnt_kern_flag &= ~MNTK_MWAIT; 846 wakeup(mp); 847 } 848 849 /* 850 * If we reach here and freeok != 0 we must free the mount. 851 * If refs > 1 cycle and wait, just in case someone tried 852 * to busy the mount after we decided to do the unmount. 853 */ 854 if (freeok) { 855 while (mp->mnt_refs > 1) { 856 cache_unmounting(mp); 857 wakeup(mp); 858 tsleep(&mp->mnt_refs, 0, "umntrwait", hz / 10 + 1); 859 } 860 lwkt_reltoken(&mp->mnt_token); 861 kfree(mp, M_MOUNT); 862 mp = NULL; 863 } 864 error = 0; 865 out: 866 if (mp) 867 lwkt_reltoken(&mp->mnt_token); 868 return (error); 869 } 870 871 static 872 void 873 mount_warning(struct mount *mp, const char *ctl, ...) 874 { 875 char *ptr; 876 char *buf; 877 __va_list va; 878 879 __va_start(va, ctl); 880 if (cache_fullpath(NULL, &mp->mnt_ncmounton, NULL, 881 &ptr, &buf, 0) == 0) { 882 kprintf("unmount(%s): ", ptr); 883 kvprintf(ctl, va); 884 kprintf("\n"); 885 kfree(buf, M_TEMP); 886 } else { 887 kprintf("unmount(%p", mp); 888 if (mp->mnt_ncmounton.ncp && mp->mnt_ncmounton.ncp->nc_name) 889 kprintf(",%s", mp->mnt_ncmounton.ncp->nc_name); 890 kprintf("): "); 891 kvprintf(ctl, va); 892 kprintf("\n"); 893 } 894 __va_end(va); 895 } 896 897 /* 898 * Shim cache_fullpath() to handle the case where a process is chrooted into 899 * a subdirectory of a mount. In this case if the root mount matches the 900 * process root directory's mount we have to specify the process's root 901 * directory instead of the mount point, because the mount point might 902 * be above the root directory. 903 */ 904 static 905 int 906 mount_path(struct proc *p, struct mount *mp, char **rb, char **fb) 907 { 908 struct nchandle *nch; 909 910 if (p && p->p_fd->fd_nrdir.mount == mp) 911 nch = &p->p_fd->fd_nrdir; 912 else 913 nch = &mp->mnt_ncmountpt; 914 return(cache_fullpath(p, nch, NULL, rb, fb, 0)); 915 } 916 917 /* 918 * Sync each mounted filesystem. 919 */ 920 921 #ifdef DEBUG 922 static int syncprt = 0; 923 SYSCTL_INT(_debug, OID_AUTO, syncprt, CTLFLAG_RW, &syncprt, 0, ""); 924 #endif /* DEBUG */ 925 926 static int sync_callback(struct mount *mp, void *data); 927 928 int 929 sys_sync(struct sync_args *uap) 930 { 931 mountlist_scan(sync_callback, NULL, MNTSCAN_FORWARD); 932 #ifdef DEBUG 933 /* 934 * print out buffer pool stat information on each sync() call. 935 */ 936 if (syncprt) 937 vfs_bufstats(); 938 #endif /* DEBUG */ 939 return (0); 940 } 941 942 static 943 int 944 sync_callback(struct mount *mp, void *data __unused) 945 { 946 int asyncflag; 947 948 if ((mp->mnt_flag & MNT_RDONLY) == 0) { 949 asyncflag = mp->mnt_flag & MNT_ASYNC; 950 mp->mnt_flag &= ~MNT_ASYNC; 951 vfs_msync(mp, MNT_NOWAIT); 952 VFS_SYNC(mp, MNT_NOWAIT); 953 mp->mnt_flag |= asyncflag; 954 } 955 return(0); 956 } 957 958 /* XXX PRISON: could be per prison flag */ 959 static int prison_quotas; 960 #if 0 961 SYSCTL_INT(_kern_prison, OID_AUTO, quotas, CTLFLAG_RW, &prison_quotas, 0, ""); 962 #endif 963 964 /* 965 * quotactl_args(char *path, int fcmd, int uid, caddr_t arg) 966 * 967 * Change filesystem quotas. 968 * 969 * MPALMOSTSAFE 970 */ 971 int 972 sys_quotactl(struct quotactl_args *uap) 973 { 974 struct nlookupdata nd; 975 struct thread *td; 976 struct mount *mp; 977 int error; 978 979 get_mplock(); 980 td = curthread; 981 if (td->td_ucred->cr_prison && !prison_quotas) { 982 error = EPERM; 983 goto done; 984 } 985 986 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 987 if (error == 0) 988 error = nlookup(&nd); 989 if (error == 0) { 990 mp = nd.nl_nch.mount; 991 error = VFS_QUOTACTL(mp, uap->cmd, uap->uid, 992 uap->arg, nd.nl_cred); 993 } 994 nlookup_done(&nd); 995 done: 996 rel_mplock(); 997 return (error); 998 } 999 1000 /* 1001 * mountctl(char *path, int op, int fd, const void *ctl, int ctllen, 1002 * void *buf, int buflen) 1003 * 1004 * This function operates on a mount point and executes the specified 1005 * operation using the specified control data, and possibly returns data. 1006 * 1007 * The actual number of bytes stored in the result buffer is returned, 0 1008 * if none, otherwise an error is returned. 1009 * 1010 * MPALMOSTSAFE 1011 */ 1012 int 1013 sys_mountctl(struct mountctl_args *uap) 1014 { 1015 struct thread *td = curthread; 1016 struct proc *p = td->td_proc; 1017 struct file *fp; 1018 void *ctl = NULL; 1019 void *buf = NULL; 1020 char *path = NULL; 1021 int error; 1022 1023 /* 1024 * Sanity and permissions checks. We must be root. 1025 */ 1026 KKASSERT(p); 1027 if (td->td_ucred->cr_prison != NULL) 1028 return (EPERM); 1029 if ((uap->op != MOUNTCTL_MOUNTFLAGS) && 1030 (error = priv_check(td, PRIV_ROOT)) != 0) 1031 return (error); 1032 1033 /* 1034 * Argument length checks 1035 */ 1036 if (uap->ctllen < 0 || uap->ctllen > 1024) 1037 return (EINVAL); 1038 if (uap->buflen < 0 || uap->buflen > 16 * 1024) 1039 return (EINVAL); 1040 if (uap->path == NULL) 1041 return (EINVAL); 1042 1043 /* 1044 * Allocate the necessary buffers and copyin data 1045 */ 1046 path = objcache_get(namei_oc, M_WAITOK); 1047 error = copyinstr(uap->path, path, MAXPATHLEN, NULL); 1048 if (error) 1049 goto done; 1050 1051 if (uap->ctllen) { 1052 ctl = kmalloc(uap->ctllen + 1, M_TEMP, M_WAITOK|M_ZERO); 1053 error = copyin(uap->ctl, ctl, uap->ctllen); 1054 if (error) 1055 goto done; 1056 } 1057 if (uap->buflen) 1058 buf = kmalloc(uap->buflen + 1, M_TEMP, M_WAITOK|M_ZERO); 1059 1060 /* 1061 * Validate the descriptor 1062 */ 1063 if (uap->fd >= 0) { 1064 fp = holdfp(p->p_fd, uap->fd, -1); 1065 if (fp == NULL) { 1066 error = EBADF; 1067 goto done; 1068 } 1069 } else { 1070 fp = NULL; 1071 } 1072 1073 /* 1074 * Execute the internal kernel function and clean up. 1075 */ 1076 get_mplock(); 1077 error = kern_mountctl(path, uap->op, fp, ctl, uap->ctllen, buf, uap->buflen, &uap->sysmsg_result); 1078 rel_mplock(); 1079 if (fp) 1080 fdrop(fp); 1081 if (error == 0 && uap->sysmsg_result > 0) 1082 error = copyout(buf, uap->buf, uap->sysmsg_result); 1083 done: 1084 if (path) 1085 objcache_put(namei_oc, path); 1086 if (ctl) 1087 kfree(ctl, M_TEMP); 1088 if (buf) 1089 kfree(buf, M_TEMP); 1090 return (error); 1091 } 1092 1093 /* 1094 * Execute a mount control operation by resolving the path to a mount point 1095 * and calling vop_mountctl(). 1096 * 1097 * Use the mount point from the nch instead of the vnode so nullfs mounts 1098 * can properly spike the VOP. 1099 */ 1100 int 1101 kern_mountctl(const char *path, int op, struct file *fp, 1102 const void *ctl, int ctllen, 1103 void *buf, int buflen, int *res) 1104 { 1105 struct vnode *vp; 1106 struct mount *mp; 1107 struct nlookupdata nd; 1108 int error; 1109 1110 *res = 0; 1111 vp = NULL; 1112 error = nlookup_init(&nd, path, UIO_SYSSPACE, NLC_FOLLOW); 1113 if (error == 0) 1114 error = nlookup(&nd); 1115 if (error == 0) 1116 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp); 1117 mp = nd.nl_nch.mount; 1118 nlookup_done(&nd); 1119 if (error) 1120 return (error); 1121 vn_unlock(vp); 1122 1123 /* 1124 * Must be the root of the filesystem 1125 */ 1126 if ((vp->v_flag & (VROOT|VPFSROOT)) == 0) { 1127 vrele(vp); 1128 return (EINVAL); 1129 } 1130 error = vop_mountctl(mp->mnt_vn_use_ops, vp, op, fp, ctl, ctllen, 1131 buf, buflen, res); 1132 vrele(vp); 1133 return (error); 1134 } 1135 1136 int 1137 kern_statfs(struct nlookupdata *nd, struct statfs *buf) 1138 { 1139 struct thread *td = curthread; 1140 struct proc *p = td->td_proc; 1141 struct mount *mp; 1142 struct statfs *sp; 1143 char *fullpath, *freepath; 1144 int error; 1145 1146 if ((error = nlookup(nd)) != 0) 1147 return (error); 1148 mp = nd->nl_nch.mount; 1149 sp = &mp->mnt_stat; 1150 if ((error = VFS_STATFS(mp, sp, nd->nl_cred)) != 0) 1151 return (error); 1152 1153 error = mount_path(p, mp, &fullpath, &freepath); 1154 if (error) 1155 return(error); 1156 bzero(sp->f_mntonname, sizeof(sp->f_mntonname)); 1157 strlcpy(sp->f_mntonname, fullpath, sizeof(sp->f_mntonname)); 1158 kfree(freepath, M_TEMP); 1159 1160 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK; 1161 bcopy(sp, buf, sizeof(*buf)); 1162 /* Only root should have access to the fsid's. */ 1163 if (priv_check(td, PRIV_ROOT)) 1164 buf->f_fsid.val[0] = buf->f_fsid.val[1] = 0; 1165 return (0); 1166 } 1167 1168 /* 1169 * statfs_args(char *path, struct statfs *buf) 1170 * 1171 * Get filesystem statistics. 1172 */ 1173 int 1174 sys_statfs(struct statfs_args *uap) 1175 { 1176 struct nlookupdata nd; 1177 struct statfs buf; 1178 int error; 1179 1180 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 1181 if (error == 0) 1182 error = kern_statfs(&nd, &buf); 1183 nlookup_done(&nd); 1184 if (error == 0) 1185 error = copyout(&buf, uap->buf, sizeof(*uap->buf)); 1186 return (error); 1187 } 1188 1189 int 1190 kern_fstatfs(int fd, struct statfs *buf) 1191 { 1192 struct thread *td = curthread; 1193 struct proc *p = td->td_proc; 1194 struct file *fp; 1195 struct mount *mp; 1196 struct statfs *sp; 1197 char *fullpath, *freepath; 1198 int error; 1199 1200 KKASSERT(p); 1201 if ((error = holdvnode(p->p_fd, fd, &fp)) != 0) 1202 return (error); 1203 1204 /* 1205 * Try to use mount info from any overlays rather than the 1206 * mount info for the underlying vnode, otherwise we will 1207 * fail when operating on null-mounted paths inside a chroot. 1208 */ 1209 if ((mp = fp->f_nchandle.mount) == NULL) 1210 mp = ((struct vnode *)fp->f_data)->v_mount; 1211 if (mp == NULL) { 1212 error = EBADF; 1213 goto done; 1214 } 1215 if (fp->f_cred == NULL) { 1216 error = EINVAL; 1217 goto done; 1218 } 1219 sp = &mp->mnt_stat; 1220 if ((error = VFS_STATFS(mp, sp, fp->f_cred)) != 0) 1221 goto done; 1222 1223 if ((error = mount_path(p, mp, &fullpath, &freepath)) != 0) 1224 goto done; 1225 bzero(sp->f_mntonname, sizeof(sp->f_mntonname)); 1226 strlcpy(sp->f_mntonname, fullpath, sizeof(sp->f_mntonname)); 1227 kfree(freepath, M_TEMP); 1228 1229 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK; 1230 bcopy(sp, buf, sizeof(*buf)); 1231 1232 /* Only root should have access to the fsid's. */ 1233 if (priv_check(td, PRIV_ROOT)) 1234 buf->f_fsid.val[0] = buf->f_fsid.val[1] = 0; 1235 error = 0; 1236 done: 1237 fdrop(fp); 1238 return (error); 1239 } 1240 1241 /* 1242 * fstatfs_args(int fd, struct statfs *buf) 1243 * 1244 * Get filesystem statistics. 1245 */ 1246 int 1247 sys_fstatfs(struct fstatfs_args *uap) 1248 { 1249 struct statfs buf; 1250 int error; 1251 1252 error = kern_fstatfs(uap->fd, &buf); 1253 1254 if (error == 0) 1255 error = copyout(&buf, uap->buf, sizeof(*uap->buf)); 1256 return (error); 1257 } 1258 1259 int 1260 kern_statvfs(struct nlookupdata *nd, struct statvfs *buf) 1261 { 1262 struct mount *mp; 1263 struct statvfs *sp; 1264 int error; 1265 1266 if ((error = nlookup(nd)) != 0) 1267 return (error); 1268 mp = nd->nl_nch.mount; 1269 sp = &mp->mnt_vstat; 1270 if ((error = VFS_STATVFS(mp, sp, nd->nl_cred)) != 0) 1271 return (error); 1272 1273 sp->f_flag = 0; 1274 if (mp->mnt_flag & MNT_RDONLY) 1275 sp->f_flag |= ST_RDONLY; 1276 if (mp->mnt_flag & MNT_NOSUID) 1277 sp->f_flag |= ST_NOSUID; 1278 bcopy(sp, buf, sizeof(*buf)); 1279 return (0); 1280 } 1281 1282 /* 1283 * statfs_args(char *path, struct statfs *buf) 1284 * 1285 * Get filesystem statistics. 1286 */ 1287 int 1288 sys_statvfs(struct statvfs_args *uap) 1289 { 1290 struct nlookupdata nd; 1291 struct statvfs buf; 1292 int error; 1293 1294 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 1295 if (error == 0) 1296 error = kern_statvfs(&nd, &buf); 1297 nlookup_done(&nd); 1298 if (error == 0) 1299 error = copyout(&buf, uap->buf, sizeof(*uap->buf)); 1300 return (error); 1301 } 1302 1303 int 1304 kern_fstatvfs(int fd, struct statvfs *buf) 1305 { 1306 struct thread *td = curthread; 1307 struct proc *p = td->td_proc; 1308 struct file *fp; 1309 struct mount *mp; 1310 struct statvfs *sp; 1311 int error; 1312 1313 KKASSERT(p); 1314 if ((error = holdvnode(p->p_fd, fd, &fp)) != 0) 1315 return (error); 1316 if ((mp = fp->f_nchandle.mount) == NULL) 1317 mp = ((struct vnode *)fp->f_data)->v_mount; 1318 if (mp == NULL) { 1319 error = EBADF; 1320 goto done; 1321 } 1322 if (fp->f_cred == NULL) { 1323 error = EINVAL; 1324 goto done; 1325 } 1326 sp = &mp->mnt_vstat; 1327 if ((error = VFS_STATVFS(mp, sp, fp->f_cred)) != 0) 1328 goto done; 1329 1330 sp->f_flag = 0; 1331 if (mp->mnt_flag & MNT_RDONLY) 1332 sp->f_flag |= ST_RDONLY; 1333 if (mp->mnt_flag & MNT_NOSUID) 1334 sp->f_flag |= ST_NOSUID; 1335 1336 bcopy(sp, buf, sizeof(*buf)); 1337 error = 0; 1338 done: 1339 fdrop(fp); 1340 return (error); 1341 } 1342 1343 /* 1344 * fstatfs_args(int fd, struct statfs *buf) 1345 * 1346 * Get filesystem statistics. 1347 */ 1348 int 1349 sys_fstatvfs(struct fstatvfs_args *uap) 1350 { 1351 struct statvfs buf; 1352 int error; 1353 1354 error = kern_fstatvfs(uap->fd, &buf); 1355 1356 if (error == 0) 1357 error = copyout(&buf, uap->buf, sizeof(*uap->buf)); 1358 return (error); 1359 } 1360 1361 /* 1362 * getfsstat_args(struct statfs *buf, long bufsize, int flags) 1363 * 1364 * Get statistics on all filesystems. 1365 */ 1366 1367 struct getfsstat_info { 1368 struct statfs *sfsp; 1369 long count; 1370 long maxcount; 1371 int error; 1372 int flags; 1373 struct thread *td; 1374 }; 1375 1376 static int getfsstat_callback(struct mount *, void *); 1377 1378 int 1379 sys_getfsstat(struct getfsstat_args *uap) 1380 { 1381 struct thread *td = curthread; 1382 struct getfsstat_info info; 1383 1384 bzero(&info, sizeof(info)); 1385 1386 info.maxcount = uap->bufsize / sizeof(struct statfs); 1387 info.sfsp = uap->buf; 1388 info.count = 0; 1389 info.flags = uap->flags; 1390 info.td = td; 1391 1392 mountlist_scan(getfsstat_callback, &info, MNTSCAN_FORWARD); 1393 if (info.sfsp && info.count > info.maxcount) 1394 uap->sysmsg_result = info.maxcount; 1395 else 1396 uap->sysmsg_result = info.count; 1397 return (info.error); 1398 } 1399 1400 static int 1401 getfsstat_callback(struct mount *mp, void *data) 1402 { 1403 struct getfsstat_info *info = data; 1404 struct statfs *sp; 1405 char *freepath; 1406 char *fullpath; 1407 int error; 1408 1409 if (info->sfsp && info->count < info->maxcount) { 1410 if (info->td->td_proc && 1411 !chroot_visible_mnt(mp, info->td->td_proc)) { 1412 return(0); 1413 } 1414 sp = &mp->mnt_stat; 1415 1416 /* 1417 * If MNT_NOWAIT or MNT_LAZY is specified, do not 1418 * refresh the fsstat cache. MNT_NOWAIT or MNT_LAZY 1419 * overrides MNT_WAIT. 1420 */ 1421 if (((info->flags & (MNT_LAZY|MNT_NOWAIT)) == 0 || 1422 (info->flags & MNT_WAIT)) && 1423 (error = VFS_STATFS(mp, sp, info->td->td_ucred))) { 1424 return(0); 1425 } 1426 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK; 1427 1428 error = mount_path(info->td->td_proc, mp, &fullpath, &freepath); 1429 if (error) { 1430 info->error = error; 1431 return(-1); 1432 } 1433 bzero(sp->f_mntonname, sizeof(sp->f_mntonname)); 1434 strlcpy(sp->f_mntonname, fullpath, sizeof(sp->f_mntonname)); 1435 kfree(freepath, M_TEMP); 1436 1437 error = copyout(sp, info->sfsp, sizeof(*sp)); 1438 if (error) { 1439 info->error = error; 1440 return (-1); 1441 } 1442 ++info->sfsp; 1443 } 1444 info->count++; 1445 return(0); 1446 } 1447 1448 /* 1449 * getvfsstat_args(struct statfs *buf, struct statvfs *vbuf, 1450 long bufsize, int flags) 1451 * 1452 * Get statistics on all filesystems. 1453 */ 1454 1455 struct getvfsstat_info { 1456 struct statfs *sfsp; 1457 struct statvfs *vsfsp; 1458 long count; 1459 long maxcount; 1460 int error; 1461 int flags; 1462 struct thread *td; 1463 }; 1464 1465 static int getvfsstat_callback(struct mount *, void *); 1466 1467 int 1468 sys_getvfsstat(struct getvfsstat_args *uap) 1469 { 1470 struct thread *td = curthread; 1471 struct getvfsstat_info info; 1472 1473 bzero(&info, sizeof(info)); 1474 1475 info.maxcount = uap->vbufsize / sizeof(struct statvfs); 1476 info.sfsp = uap->buf; 1477 info.vsfsp = uap->vbuf; 1478 info.count = 0; 1479 info.flags = uap->flags; 1480 info.td = td; 1481 1482 mountlist_scan(getvfsstat_callback, &info, MNTSCAN_FORWARD); 1483 if (info.vsfsp && info.count > info.maxcount) 1484 uap->sysmsg_result = info.maxcount; 1485 else 1486 uap->sysmsg_result = info.count; 1487 return (info.error); 1488 } 1489 1490 static int 1491 getvfsstat_callback(struct mount *mp, void *data) 1492 { 1493 struct getvfsstat_info *info = data; 1494 struct statfs *sp; 1495 struct statvfs *vsp; 1496 char *freepath; 1497 char *fullpath; 1498 int error; 1499 1500 if (info->vsfsp && info->count < info->maxcount) { 1501 if (info->td->td_proc && 1502 !chroot_visible_mnt(mp, info->td->td_proc)) { 1503 return(0); 1504 } 1505 sp = &mp->mnt_stat; 1506 vsp = &mp->mnt_vstat; 1507 1508 /* 1509 * If MNT_NOWAIT or MNT_LAZY is specified, do not 1510 * refresh the fsstat cache. MNT_NOWAIT or MNT_LAZY 1511 * overrides MNT_WAIT. 1512 */ 1513 if (((info->flags & (MNT_LAZY|MNT_NOWAIT)) == 0 || 1514 (info->flags & MNT_WAIT)) && 1515 (error = VFS_STATFS(mp, sp, info->td->td_ucred))) { 1516 return(0); 1517 } 1518 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK; 1519 1520 if (((info->flags & (MNT_LAZY|MNT_NOWAIT)) == 0 || 1521 (info->flags & MNT_WAIT)) && 1522 (error = VFS_STATVFS(mp, vsp, info->td->td_ucred))) { 1523 return(0); 1524 } 1525 vsp->f_flag = 0; 1526 if (mp->mnt_flag & MNT_RDONLY) 1527 vsp->f_flag |= ST_RDONLY; 1528 if (mp->mnt_flag & MNT_NOSUID) 1529 vsp->f_flag |= ST_NOSUID; 1530 1531 error = mount_path(info->td->td_proc, mp, &fullpath, &freepath); 1532 if (error) { 1533 info->error = error; 1534 return(-1); 1535 } 1536 bzero(sp->f_mntonname, sizeof(sp->f_mntonname)); 1537 strlcpy(sp->f_mntonname, fullpath, sizeof(sp->f_mntonname)); 1538 kfree(freepath, M_TEMP); 1539 1540 error = copyout(sp, info->sfsp, sizeof(*sp)); 1541 if (error == 0) 1542 error = copyout(vsp, info->vsfsp, sizeof(*vsp)); 1543 if (error) { 1544 info->error = error; 1545 return (-1); 1546 } 1547 ++info->sfsp; 1548 ++info->vsfsp; 1549 } 1550 info->count++; 1551 return(0); 1552 } 1553 1554 1555 /* 1556 * fchdir_args(int fd) 1557 * 1558 * Change current working directory to a given file descriptor. 1559 */ 1560 int 1561 sys_fchdir(struct fchdir_args *uap) 1562 { 1563 struct thread *td = curthread; 1564 struct proc *p = td->td_proc; 1565 struct filedesc *fdp = p->p_fd; 1566 struct vnode *vp, *ovp; 1567 struct mount *mp; 1568 struct file *fp; 1569 struct nchandle nch, onch, tnch; 1570 int error; 1571 1572 if ((error = holdvnode(fdp, uap->fd, &fp)) != 0) 1573 return (error); 1574 lwkt_gettoken(&p->p_token); 1575 vp = (struct vnode *)fp->f_data; 1576 vref(vp); 1577 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1578 if (fp->f_nchandle.ncp == NULL) 1579 error = ENOTDIR; 1580 else 1581 error = checkvp_chdir(vp, td); 1582 if (error) { 1583 vput(vp); 1584 goto done; 1585 } 1586 cache_copy(&fp->f_nchandle, &nch); 1587 1588 /* 1589 * If the ncp has become a mount point, traverse through 1590 * the mount point. 1591 */ 1592 1593 while (!error && (nch.ncp->nc_flag & NCF_ISMOUNTPT) && 1594 (mp = cache_findmount(&nch)) != NULL 1595 ) { 1596 error = nlookup_mp(mp, &tnch); 1597 if (error == 0) { 1598 cache_unlock(&tnch); /* leave ref intact */ 1599 vput(vp); 1600 vp = tnch.ncp->nc_vp; 1601 error = vget(vp, LK_SHARED); 1602 KKASSERT(error == 0); 1603 cache_drop(&nch); 1604 nch = tnch; 1605 } 1606 cache_dropmount(mp); 1607 } 1608 if (error == 0) { 1609 ovp = fdp->fd_cdir; 1610 onch = fdp->fd_ncdir; 1611 vn_unlock(vp); /* leave ref intact */ 1612 fdp->fd_cdir = vp; 1613 fdp->fd_ncdir = nch; 1614 cache_drop(&onch); 1615 vrele(ovp); 1616 } else { 1617 cache_drop(&nch); 1618 vput(vp); 1619 } 1620 fdrop(fp); 1621 done: 1622 lwkt_reltoken(&p->p_token); 1623 return (error); 1624 } 1625 1626 int 1627 kern_chdir(struct nlookupdata *nd) 1628 { 1629 struct thread *td = curthread; 1630 struct proc *p = td->td_proc; 1631 struct filedesc *fdp = p->p_fd; 1632 struct vnode *vp, *ovp; 1633 struct nchandle onch; 1634 int error; 1635 1636 if ((error = nlookup(nd)) != 0) 1637 return (error); 1638 if ((vp = nd->nl_nch.ncp->nc_vp) == NULL) 1639 return (ENOENT); 1640 if ((error = vget(vp, LK_SHARED)) != 0) 1641 return (error); 1642 1643 lwkt_gettoken(&p->p_token); 1644 error = checkvp_chdir(vp, td); 1645 vn_unlock(vp); 1646 if (error == 0) { 1647 ovp = fdp->fd_cdir; 1648 onch = fdp->fd_ncdir; 1649 cache_unlock(&nd->nl_nch); /* leave reference intact */ 1650 fdp->fd_ncdir = nd->nl_nch; 1651 fdp->fd_cdir = vp; 1652 cache_drop(&onch); 1653 vrele(ovp); 1654 cache_zero(&nd->nl_nch); 1655 } else { 1656 vrele(vp); 1657 } 1658 lwkt_reltoken(&p->p_token); 1659 return (error); 1660 } 1661 1662 /* 1663 * chdir_args(char *path) 1664 * 1665 * Change current working directory (``.''). 1666 */ 1667 int 1668 sys_chdir(struct chdir_args *uap) 1669 { 1670 struct nlookupdata nd; 1671 int error; 1672 1673 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 1674 if (error == 0) 1675 error = kern_chdir(&nd); 1676 nlookup_done(&nd); 1677 return (error); 1678 } 1679 1680 /* 1681 * Helper function for raised chroot(2) security function: Refuse if 1682 * any filedescriptors are open directories. 1683 */ 1684 static int 1685 chroot_refuse_vdir_fds(struct filedesc *fdp) 1686 { 1687 struct vnode *vp; 1688 struct file *fp; 1689 int error; 1690 int fd; 1691 1692 for (fd = 0; fd < fdp->fd_nfiles ; fd++) { 1693 if ((error = holdvnode(fdp, fd, &fp)) != 0) 1694 continue; 1695 vp = (struct vnode *)fp->f_data; 1696 if (vp->v_type != VDIR) { 1697 fdrop(fp); 1698 continue; 1699 } 1700 fdrop(fp); 1701 return(EPERM); 1702 } 1703 return (0); 1704 } 1705 1706 /* 1707 * This sysctl determines if we will allow a process to chroot(2) if it 1708 * has a directory open: 1709 * 0: disallowed for all processes. 1710 * 1: allowed for processes that were not already chroot(2)'ed. 1711 * 2: allowed for all processes. 1712 */ 1713 1714 static int chroot_allow_open_directories = 1; 1715 1716 SYSCTL_INT(_kern, OID_AUTO, chroot_allow_open_directories, CTLFLAG_RW, 1717 &chroot_allow_open_directories, 0, ""); 1718 1719 /* 1720 * chroot to the specified namecache entry. We obtain the vp from the 1721 * namecache data. The passed ncp must be locked and referenced and will 1722 * remain locked and referenced on return. 1723 */ 1724 int 1725 kern_chroot(struct nchandle *nch) 1726 { 1727 struct thread *td = curthread; 1728 struct proc *p = td->td_proc; 1729 struct filedesc *fdp = p->p_fd; 1730 struct vnode *vp; 1731 int error; 1732 1733 /* 1734 * Only privileged user can chroot 1735 */ 1736 error = priv_check_cred(td->td_ucred, PRIV_VFS_CHROOT, 0); 1737 if (error) 1738 return (error); 1739 1740 /* 1741 * Disallow open directory descriptors (fchdir() breakouts). 1742 */ 1743 if (chroot_allow_open_directories == 0 || 1744 (chroot_allow_open_directories == 1 && fdp->fd_rdir != rootvnode)) { 1745 if ((error = chroot_refuse_vdir_fds(fdp)) != 0) 1746 return (error); 1747 } 1748 if ((vp = nch->ncp->nc_vp) == NULL) 1749 return (ENOENT); 1750 1751 if ((error = vget(vp, LK_SHARED)) != 0) 1752 return (error); 1753 1754 /* 1755 * Check the validity of vp as a directory to change to and 1756 * associate it with rdir/jdir. 1757 */ 1758 error = checkvp_chdir(vp, td); 1759 vn_unlock(vp); /* leave reference intact */ 1760 if (error == 0) { 1761 vrele(fdp->fd_rdir); 1762 fdp->fd_rdir = vp; /* reference inherited by fd_rdir */ 1763 cache_drop(&fdp->fd_nrdir); 1764 cache_copy(nch, &fdp->fd_nrdir); 1765 if (fdp->fd_jdir == NULL) { 1766 fdp->fd_jdir = vp; 1767 vref(fdp->fd_jdir); 1768 cache_copy(nch, &fdp->fd_njdir); 1769 } 1770 } else { 1771 vrele(vp); 1772 } 1773 return (error); 1774 } 1775 1776 /* 1777 * chroot_args(char *path) 1778 * 1779 * Change notion of root (``/'') directory. 1780 */ 1781 int 1782 sys_chroot(struct chroot_args *uap) 1783 { 1784 struct thread *td __debugvar = curthread; 1785 struct nlookupdata nd; 1786 int error; 1787 1788 KKASSERT(td->td_proc); 1789 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 1790 if (error == 0) { 1791 nd.nl_flags |= NLC_EXEC; 1792 error = nlookup(&nd); 1793 if (error == 0) 1794 error = kern_chroot(&nd.nl_nch); 1795 } 1796 nlookup_done(&nd); 1797 return(error); 1798 } 1799 1800 int 1801 sys_chroot_kernel(struct chroot_kernel_args *uap) 1802 { 1803 struct thread *td = curthread; 1804 struct nlookupdata nd; 1805 struct nchandle *nch; 1806 struct vnode *vp; 1807 int error; 1808 1809 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 1810 if (error) 1811 goto error_nond; 1812 1813 error = nlookup(&nd); 1814 if (error) 1815 goto error_out; 1816 1817 nch = &nd.nl_nch; 1818 1819 error = priv_check_cred(td->td_ucred, PRIV_VFS_CHROOT, 0); 1820 if (error) 1821 goto error_out; 1822 1823 if ((vp = nch->ncp->nc_vp) == NULL) { 1824 error = ENOENT; 1825 goto error_out; 1826 } 1827 1828 if ((error = cache_vref(nch, nd.nl_cred, &vp)) != 0) 1829 goto error_out; 1830 1831 kprintf("chroot_kernel: set new rootnch/rootvnode to %s\n", uap->path); 1832 get_mplock(); 1833 vfs_cache_setroot(vp, cache_hold(nch)); 1834 rel_mplock(); 1835 1836 error_out: 1837 nlookup_done(&nd); 1838 error_nond: 1839 return(error); 1840 } 1841 1842 /* 1843 * Common routine for chroot and chdir. Given a locked, referenced vnode, 1844 * determine whether it is legal to chdir to the vnode. The vnode's state 1845 * is not changed by this call. 1846 */ 1847 int 1848 checkvp_chdir(struct vnode *vp, struct thread *td) 1849 { 1850 int error; 1851 1852 if (vp->v_type != VDIR) 1853 error = ENOTDIR; 1854 else 1855 error = VOP_EACCESS(vp, VEXEC, td->td_ucred); 1856 return (error); 1857 } 1858 1859 int 1860 kern_open(struct nlookupdata *nd, int oflags, int mode, int *res) 1861 { 1862 struct thread *td = curthread; 1863 struct proc *p = td->td_proc; 1864 struct lwp *lp = td->td_lwp; 1865 struct filedesc *fdp = p->p_fd; 1866 int cmode, flags; 1867 struct file *nfp; 1868 struct file *fp; 1869 struct vnode *vp; 1870 int type, indx, error = 0; 1871 struct flock lf; 1872 1873 if ((oflags & O_ACCMODE) == O_ACCMODE) 1874 return (EINVAL); 1875 flags = FFLAGS(oflags); 1876 error = falloc(lp, &nfp, NULL); 1877 if (error) 1878 return (error); 1879 fp = nfp; 1880 cmode = ((mode &~ fdp->fd_cmask) & ALLPERMS) & ~S_ISTXT; 1881 1882 /* 1883 * XXX p_dupfd is a real mess. It allows a device to return a 1884 * file descriptor to be duplicated rather then doing the open 1885 * itself. 1886 */ 1887 lp->lwp_dupfd = -1; 1888 1889 /* 1890 * Call vn_open() to do the lookup and assign the vnode to the 1891 * file pointer. vn_open() does not change the ref count on fp 1892 * and the vnode, on success, will be inherited by the file pointer 1893 * and unlocked. 1894 */ 1895 nd->nl_flags |= NLC_LOCKVP; 1896 error = vn_open(nd, fp, flags, cmode); 1897 nlookup_done(nd); 1898 if (error) { 1899 /* 1900 * handle special fdopen() case. bleh. dupfdopen() is 1901 * responsible for dropping the old contents of ofiles[indx] 1902 * if it succeeds. 1903 * 1904 * Note that fsetfd() will add a ref to fp which represents 1905 * the fd_files[] assignment. We must still drop our 1906 * reference. 1907 */ 1908 if ((error == ENODEV || error == ENXIO) && lp->lwp_dupfd >= 0) { 1909 if (fdalloc(p, 0, &indx) == 0) { 1910 error = dupfdopen(fdp, indx, lp->lwp_dupfd, flags, error); 1911 if (error == 0) { 1912 *res = indx; 1913 fdrop(fp); /* our ref */ 1914 return (0); 1915 } 1916 fsetfd(fdp, NULL, indx); 1917 } 1918 } 1919 fdrop(fp); /* our ref */ 1920 if (error == ERESTART) 1921 error = EINTR; 1922 return (error); 1923 } 1924 1925 /* 1926 * ref the vnode for ourselves so it can't be ripped out from under 1927 * is. XXX need an ND flag to request that the vnode be returned 1928 * anyway. 1929 * 1930 * Reserve a file descriptor but do not assign it until the open 1931 * succeeds. 1932 */ 1933 vp = (struct vnode *)fp->f_data; 1934 vref(vp); 1935 if ((error = fdalloc(p, 0, &indx)) != 0) { 1936 fdrop(fp); 1937 vrele(vp); 1938 return (error); 1939 } 1940 1941 /* 1942 * If no error occurs the vp will have been assigned to the file 1943 * pointer. 1944 */ 1945 lp->lwp_dupfd = 0; 1946 1947 if (flags & (O_EXLOCK | O_SHLOCK)) { 1948 lf.l_whence = SEEK_SET; 1949 lf.l_start = 0; 1950 lf.l_len = 0; 1951 if (flags & O_EXLOCK) 1952 lf.l_type = F_WRLCK; 1953 else 1954 lf.l_type = F_RDLCK; 1955 if (flags & FNONBLOCK) 1956 type = 0; 1957 else 1958 type = F_WAIT; 1959 1960 if ((error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type)) != 0) { 1961 /* 1962 * lock request failed. Clean up the reserved 1963 * descriptor. 1964 */ 1965 vrele(vp); 1966 fsetfd(fdp, NULL, indx); 1967 fdrop(fp); 1968 return (error); 1969 } 1970 fp->f_flag |= FHASLOCK; 1971 } 1972 #if 0 1973 /* 1974 * Assert that all regular file vnodes were created with a object. 1975 */ 1976 KASSERT(vp->v_type != VREG || vp->v_object != NULL, 1977 ("open: regular file has no backing object after vn_open")); 1978 #endif 1979 1980 vrele(vp); 1981 1982 /* 1983 * release our private reference, leaving the one associated with the 1984 * descriptor table intact. 1985 */ 1986 fsetfd(fdp, fp, indx); 1987 fdrop(fp); 1988 *res = indx; 1989 if (oflags & O_CLOEXEC) 1990 error = fsetfdflags(fdp, *res, UF_EXCLOSE); 1991 return (error); 1992 } 1993 1994 /* 1995 * open_args(char *path, int flags, int mode) 1996 * 1997 * Check permissions, allocate an open file structure, 1998 * and call the device open routine if any. 1999 */ 2000 int 2001 sys_open(struct open_args *uap) 2002 { 2003 struct nlookupdata nd; 2004 int error; 2005 2006 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 2007 if (error == 0) { 2008 error = kern_open(&nd, uap->flags, 2009 uap->mode, &uap->sysmsg_result); 2010 } 2011 nlookup_done(&nd); 2012 return (error); 2013 } 2014 2015 /* 2016 * openat_args(int fd, char *path, int flags, int mode) 2017 */ 2018 int 2019 sys_openat(struct openat_args *uap) 2020 { 2021 struct nlookupdata nd; 2022 int error; 2023 struct file *fp; 2024 2025 error = nlookup_init_at(&nd, &fp, uap->fd, uap->path, UIO_USERSPACE, 0); 2026 if (error == 0) { 2027 error = kern_open(&nd, uap->flags, uap->mode, 2028 &uap->sysmsg_result); 2029 } 2030 nlookup_done_at(&nd, fp); 2031 return (error); 2032 } 2033 2034 int 2035 kern_mknod(struct nlookupdata *nd, int mode, int rmajor, int rminor) 2036 { 2037 struct thread *td = curthread; 2038 struct proc *p = td->td_proc; 2039 struct vnode *vp; 2040 struct vattr vattr; 2041 int error; 2042 int whiteout = 0; 2043 2044 KKASSERT(p); 2045 2046 VATTR_NULL(&vattr); 2047 vattr.va_mode = (mode & ALLPERMS) &~ p->p_fd->fd_cmask; 2048 vattr.va_rmajor = rmajor; 2049 vattr.va_rminor = rminor; 2050 2051 switch (mode & S_IFMT) { 2052 case S_IFMT: /* used by badsect to flag bad sectors */ 2053 error = priv_check_cred(td->td_ucred, PRIV_VFS_MKNOD_BAD, 0); 2054 vattr.va_type = VBAD; 2055 break; 2056 case S_IFCHR: 2057 error = priv_check(td, PRIV_VFS_MKNOD_DEV); 2058 vattr.va_type = VCHR; 2059 break; 2060 case S_IFBLK: 2061 error = priv_check(td, PRIV_VFS_MKNOD_DEV); 2062 vattr.va_type = VBLK; 2063 break; 2064 case S_IFWHT: 2065 error = priv_check_cred(td->td_ucred, PRIV_VFS_MKNOD_WHT, 0); 2066 whiteout = 1; 2067 break; 2068 case S_IFDIR: /* special directories support for HAMMER */ 2069 error = priv_check_cred(td->td_ucred, PRIV_VFS_MKNOD_DIR, 0); 2070 vattr.va_type = VDIR; 2071 break; 2072 default: 2073 error = EINVAL; 2074 break; 2075 } 2076 2077 if (error) 2078 return (error); 2079 2080 bwillinode(1); 2081 nd->nl_flags |= NLC_CREATE | NLC_REFDVP; 2082 if ((error = nlookup(nd)) != 0) 2083 return (error); 2084 if (nd->nl_nch.ncp->nc_vp) 2085 return (EEXIST); 2086 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 2087 return (error); 2088 2089 if (whiteout) { 2090 error = VOP_NWHITEOUT(&nd->nl_nch, nd->nl_dvp, 2091 nd->nl_cred, NAMEI_CREATE); 2092 } else { 2093 vp = NULL; 2094 error = VOP_NMKNOD(&nd->nl_nch, nd->nl_dvp, 2095 &vp, nd->nl_cred, &vattr); 2096 if (error == 0) 2097 vput(vp); 2098 } 2099 return (error); 2100 } 2101 2102 /* 2103 * mknod_args(char *path, int mode, int dev) 2104 * 2105 * Create a special file. 2106 */ 2107 int 2108 sys_mknod(struct mknod_args *uap) 2109 { 2110 struct nlookupdata nd; 2111 int error; 2112 2113 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 2114 if (error == 0) { 2115 error = kern_mknod(&nd, uap->mode, 2116 umajor(uap->dev), uminor(uap->dev)); 2117 } 2118 nlookup_done(&nd); 2119 return (error); 2120 } 2121 2122 /* 2123 * mknodat_args(int fd, char *path, mode_t mode, dev_t dev) 2124 * 2125 * Create a special file. The path is relative to the directory associated 2126 * with fd. 2127 */ 2128 int 2129 sys_mknodat(struct mknodat_args *uap) 2130 { 2131 struct nlookupdata nd; 2132 struct file *fp; 2133 int error; 2134 2135 error = nlookup_init_at(&nd, &fp, uap->fd, uap->path, UIO_USERSPACE, 0); 2136 if (error == 0) { 2137 error = kern_mknod(&nd, uap->mode, 2138 umajor(uap->dev), uminor(uap->dev)); 2139 } 2140 nlookup_done_at(&nd, fp); 2141 return (error); 2142 } 2143 2144 int 2145 kern_mkfifo(struct nlookupdata *nd, int mode) 2146 { 2147 struct thread *td = curthread; 2148 struct proc *p = td->td_proc; 2149 struct vattr vattr; 2150 struct vnode *vp; 2151 int error; 2152 2153 bwillinode(1); 2154 2155 nd->nl_flags |= NLC_CREATE | NLC_REFDVP; 2156 if ((error = nlookup(nd)) != 0) 2157 return (error); 2158 if (nd->nl_nch.ncp->nc_vp) 2159 return (EEXIST); 2160 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 2161 return (error); 2162 2163 VATTR_NULL(&vattr); 2164 vattr.va_type = VFIFO; 2165 vattr.va_mode = (mode & ALLPERMS) &~ p->p_fd->fd_cmask; 2166 vp = NULL; 2167 error = VOP_NMKNOD(&nd->nl_nch, nd->nl_dvp, &vp, nd->nl_cred, &vattr); 2168 if (error == 0) 2169 vput(vp); 2170 return (error); 2171 } 2172 2173 /* 2174 * mkfifo_args(char *path, int mode) 2175 * 2176 * Create a named pipe. 2177 */ 2178 int 2179 sys_mkfifo(struct mkfifo_args *uap) 2180 { 2181 struct nlookupdata nd; 2182 int error; 2183 2184 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 2185 if (error == 0) 2186 error = kern_mkfifo(&nd, uap->mode); 2187 nlookup_done(&nd); 2188 return (error); 2189 } 2190 2191 /* 2192 * mkfifoat_args(int fd, char *path, mode_t mode) 2193 * 2194 * Create a named pipe. The path is relative to the directory associated 2195 * with fd. 2196 */ 2197 int 2198 sys_mkfifoat(struct mkfifoat_args *uap) 2199 { 2200 struct nlookupdata nd; 2201 struct file *fp; 2202 int error; 2203 2204 error = nlookup_init_at(&nd, &fp, uap->fd, uap->path, UIO_USERSPACE, 0); 2205 if (error == 0) 2206 error = kern_mkfifo(&nd, uap->mode); 2207 nlookup_done_at(&nd, fp); 2208 return (error); 2209 } 2210 2211 static int hardlink_check_uid = 0; 2212 SYSCTL_INT(_security, OID_AUTO, hardlink_check_uid, CTLFLAG_RW, 2213 &hardlink_check_uid, 0, 2214 "Unprivileged processes cannot create hard links to files owned by other " 2215 "users"); 2216 static int hardlink_check_gid = 0; 2217 SYSCTL_INT(_security, OID_AUTO, hardlink_check_gid, CTLFLAG_RW, 2218 &hardlink_check_gid, 0, 2219 "Unprivileged processes cannot create hard links to files owned by other " 2220 "groups"); 2221 2222 static int 2223 can_hardlink(struct vnode *vp, struct thread *td, struct ucred *cred) 2224 { 2225 struct vattr va; 2226 int error; 2227 2228 /* 2229 * Shortcut if disabled 2230 */ 2231 if (hardlink_check_uid == 0 && hardlink_check_gid == 0) 2232 return (0); 2233 2234 /* 2235 * Privileged user can always hardlink 2236 */ 2237 if (priv_check_cred(cred, PRIV_VFS_LINK, 0) == 0) 2238 return (0); 2239 2240 /* 2241 * Otherwise only if the originating file is owned by the 2242 * same user or group. Note that any group is allowed if 2243 * the file is owned by the caller. 2244 */ 2245 error = VOP_GETATTR(vp, &va); 2246 if (error != 0) 2247 return (error); 2248 2249 if (hardlink_check_uid) { 2250 if (cred->cr_uid != va.va_uid) 2251 return (EPERM); 2252 } 2253 2254 if (hardlink_check_gid) { 2255 if (cred->cr_uid != va.va_uid && !groupmember(va.va_gid, cred)) 2256 return (EPERM); 2257 } 2258 2259 return (0); 2260 } 2261 2262 int 2263 kern_link(struct nlookupdata *nd, struct nlookupdata *linknd) 2264 { 2265 struct thread *td = curthread; 2266 struct vnode *vp; 2267 int error; 2268 2269 /* 2270 * Lookup the source and obtained a locked vnode. 2271 * 2272 * You may only hardlink a file which you have write permission 2273 * on or which you own. 2274 * 2275 * XXX relookup on vget failure / race ? 2276 */ 2277 bwillinode(1); 2278 nd->nl_flags |= NLC_WRITE | NLC_OWN | NLC_HLINK; 2279 if ((error = nlookup(nd)) != 0) 2280 return (error); 2281 vp = nd->nl_nch.ncp->nc_vp; 2282 KKASSERT(vp != NULL); 2283 if (vp->v_type == VDIR) 2284 return (EPERM); /* POSIX */ 2285 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 2286 return (error); 2287 if ((error = vget(vp, LK_EXCLUSIVE)) != 0) 2288 return (error); 2289 2290 /* 2291 * Unlock the source so we can lookup the target without deadlocking 2292 * (XXX vp is locked already, possible other deadlock?). The target 2293 * must not exist. 2294 */ 2295 KKASSERT(nd->nl_flags & NLC_NCPISLOCKED); 2296 nd->nl_flags &= ~NLC_NCPISLOCKED; 2297 cache_unlock(&nd->nl_nch); 2298 vn_unlock(vp); 2299 2300 linknd->nl_flags |= NLC_CREATE | NLC_REFDVP; 2301 if ((error = nlookup(linknd)) != 0) { 2302 vrele(vp); 2303 return (error); 2304 } 2305 if (linknd->nl_nch.ncp->nc_vp) { 2306 vrele(vp); 2307 return (EEXIST); 2308 } 2309 if ((error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY)) != 0) { 2310 vrele(vp); 2311 return (error); 2312 } 2313 2314 /* 2315 * Finally run the new API VOP. 2316 */ 2317 error = can_hardlink(vp, td, td->td_ucred); 2318 if (error == 0) { 2319 error = VOP_NLINK(&linknd->nl_nch, linknd->nl_dvp, 2320 vp, linknd->nl_cred); 2321 } 2322 vput(vp); 2323 return (error); 2324 } 2325 2326 /* 2327 * link_args(char *path, char *link) 2328 * 2329 * Make a hard file link. 2330 */ 2331 int 2332 sys_link(struct link_args *uap) 2333 { 2334 struct nlookupdata nd, linknd; 2335 int error; 2336 2337 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 2338 if (error == 0) { 2339 error = nlookup_init(&linknd, uap->link, UIO_USERSPACE, 0); 2340 if (error == 0) 2341 error = kern_link(&nd, &linknd); 2342 nlookup_done(&linknd); 2343 } 2344 nlookup_done(&nd); 2345 return (error); 2346 } 2347 2348 /* 2349 * linkat_args(int fd1, char *path1, int fd2, char *path2, int flags) 2350 * 2351 * Make a hard file link. The path1 argument is relative to the directory 2352 * associated with fd1, and similarly the path2 argument is relative to 2353 * the directory associated with fd2. 2354 */ 2355 int 2356 sys_linkat(struct linkat_args *uap) 2357 { 2358 struct nlookupdata nd, linknd; 2359 struct file *fp1, *fp2; 2360 int error; 2361 2362 error = nlookup_init_at(&nd, &fp1, uap->fd1, uap->path1, UIO_USERSPACE, 2363 (uap->flags & AT_SYMLINK_FOLLOW) ? NLC_FOLLOW : 0); 2364 if (error == 0) { 2365 error = nlookup_init_at(&linknd, &fp2, uap->fd2, 2366 uap->path2, UIO_USERSPACE, 0); 2367 if (error == 0) 2368 error = kern_link(&nd, &linknd); 2369 nlookup_done_at(&linknd, fp2); 2370 } 2371 nlookup_done_at(&nd, fp1); 2372 return (error); 2373 } 2374 2375 int 2376 kern_symlink(struct nlookupdata *nd, char *path, int mode) 2377 { 2378 struct vattr vattr; 2379 struct vnode *vp; 2380 struct vnode *dvp; 2381 int error; 2382 2383 bwillinode(1); 2384 nd->nl_flags |= NLC_CREATE | NLC_REFDVP; 2385 if ((error = nlookup(nd)) != 0) 2386 return (error); 2387 if (nd->nl_nch.ncp->nc_vp) 2388 return (EEXIST); 2389 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 2390 return (error); 2391 dvp = nd->nl_dvp; 2392 VATTR_NULL(&vattr); 2393 vattr.va_mode = mode; 2394 error = VOP_NSYMLINK(&nd->nl_nch, dvp, &vp, nd->nl_cred, &vattr, path); 2395 if (error == 0) 2396 vput(vp); 2397 return (error); 2398 } 2399 2400 /* 2401 * symlink(char *path, char *link) 2402 * 2403 * Make a symbolic link. 2404 */ 2405 int 2406 sys_symlink(struct symlink_args *uap) 2407 { 2408 struct thread *td = curthread; 2409 struct nlookupdata nd; 2410 char *path; 2411 int error; 2412 int mode; 2413 2414 path = objcache_get(namei_oc, M_WAITOK); 2415 error = copyinstr(uap->path, path, MAXPATHLEN, NULL); 2416 if (error == 0) { 2417 error = nlookup_init(&nd, uap->link, UIO_USERSPACE, 0); 2418 if (error == 0) { 2419 mode = ACCESSPERMS & ~td->td_proc->p_fd->fd_cmask; 2420 error = kern_symlink(&nd, path, mode); 2421 } 2422 nlookup_done(&nd); 2423 } 2424 objcache_put(namei_oc, path); 2425 return (error); 2426 } 2427 2428 /* 2429 * symlinkat_args(char *path1, int fd, char *path2) 2430 * 2431 * Make a symbolic link. The path2 argument is relative to the directory 2432 * associated with fd. 2433 */ 2434 int 2435 sys_symlinkat(struct symlinkat_args *uap) 2436 { 2437 struct thread *td = curthread; 2438 struct nlookupdata nd; 2439 struct file *fp; 2440 char *path1; 2441 int error; 2442 int mode; 2443 2444 path1 = objcache_get(namei_oc, M_WAITOK); 2445 error = copyinstr(uap->path1, path1, MAXPATHLEN, NULL); 2446 if (error == 0) { 2447 error = nlookup_init_at(&nd, &fp, uap->fd, uap->path2, 2448 UIO_USERSPACE, 0); 2449 if (error == 0) { 2450 mode = ACCESSPERMS & ~td->td_proc->p_fd->fd_cmask; 2451 error = kern_symlink(&nd, path1, mode); 2452 } 2453 nlookup_done_at(&nd, fp); 2454 } 2455 objcache_put(namei_oc, path1); 2456 return (error); 2457 } 2458 2459 /* 2460 * undelete_args(char *path) 2461 * 2462 * Delete a whiteout from the filesystem. 2463 */ 2464 int 2465 sys_undelete(struct undelete_args *uap) 2466 { 2467 struct nlookupdata nd; 2468 int error; 2469 2470 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 2471 bwillinode(1); 2472 nd.nl_flags |= NLC_DELETE | NLC_REFDVP; 2473 if (error == 0) 2474 error = nlookup(&nd); 2475 if (error == 0) 2476 error = ncp_writechk(&nd.nl_nch); 2477 if (error == 0) { 2478 error = VOP_NWHITEOUT(&nd.nl_nch, nd.nl_dvp, nd.nl_cred, 2479 NAMEI_DELETE); 2480 } 2481 nlookup_done(&nd); 2482 return (error); 2483 } 2484 2485 int 2486 kern_unlink(struct nlookupdata *nd) 2487 { 2488 int error; 2489 2490 bwillinode(1); 2491 nd->nl_flags |= NLC_DELETE | NLC_REFDVP; 2492 if ((error = nlookup(nd)) != 0) 2493 return (error); 2494 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 2495 return (error); 2496 error = VOP_NREMOVE(&nd->nl_nch, nd->nl_dvp, nd->nl_cred); 2497 return (error); 2498 } 2499 2500 /* 2501 * unlink_args(char *path) 2502 * 2503 * Delete a name from the filesystem. 2504 */ 2505 int 2506 sys_unlink(struct unlink_args *uap) 2507 { 2508 struct nlookupdata nd; 2509 int error; 2510 2511 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 2512 if (error == 0) 2513 error = kern_unlink(&nd); 2514 nlookup_done(&nd); 2515 return (error); 2516 } 2517 2518 2519 /* 2520 * unlinkat_args(int fd, char *path, int flags) 2521 * 2522 * Delete the file or directory entry pointed to by fd/path. 2523 */ 2524 int 2525 sys_unlinkat(struct unlinkat_args *uap) 2526 { 2527 struct nlookupdata nd; 2528 struct file *fp; 2529 int error; 2530 2531 if (uap->flags & ~AT_REMOVEDIR) 2532 return (EINVAL); 2533 2534 error = nlookup_init_at(&nd, &fp, uap->fd, uap->path, UIO_USERSPACE, 0); 2535 if (error == 0) { 2536 if (uap->flags & AT_REMOVEDIR) 2537 error = kern_rmdir(&nd); 2538 else 2539 error = kern_unlink(&nd); 2540 } 2541 nlookup_done_at(&nd, fp); 2542 return (error); 2543 } 2544 2545 int 2546 kern_lseek(int fd, off_t offset, int whence, off_t *res) 2547 { 2548 struct thread *td = curthread; 2549 struct proc *p = td->td_proc; 2550 struct file *fp; 2551 struct vnode *vp; 2552 struct vattr vattr; 2553 off_t new_offset; 2554 int error; 2555 2556 fp = holdfp(p->p_fd, fd, -1); 2557 if (fp == NULL) 2558 return (EBADF); 2559 if (fp->f_type != DTYPE_VNODE) { 2560 error = ESPIPE; 2561 goto done; 2562 } 2563 vp = (struct vnode *)fp->f_data; 2564 2565 switch (whence) { 2566 case L_INCR: 2567 spin_lock(&fp->f_spin); 2568 new_offset = fp->f_offset + offset; 2569 error = 0; 2570 break; 2571 case L_XTND: 2572 error = VOP_GETATTR(vp, &vattr); 2573 spin_lock(&fp->f_spin); 2574 new_offset = offset + vattr.va_size; 2575 break; 2576 case L_SET: 2577 new_offset = offset; 2578 error = 0; 2579 spin_lock(&fp->f_spin); 2580 break; 2581 default: 2582 new_offset = 0; 2583 error = EINVAL; 2584 spin_lock(&fp->f_spin); 2585 break; 2586 } 2587 2588 /* 2589 * Validate the seek position. Negative offsets are not allowed 2590 * for regular files or directories. 2591 * 2592 * Normally we would also not want to allow negative offsets for 2593 * character and block-special devices. However kvm addresses 2594 * on 64 bit architectures might appear to be negative and must 2595 * be allowed. 2596 */ 2597 if (error == 0) { 2598 if (new_offset < 0 && 2599 (vp->v_type == VREG || vp->v_type == VDIR)) { 2600 error = EINVAL; 2601 } else { 2602 fp->f_offset = new_offset; 2603 } 2604 } 2605 *res = fp->f_offset; 2606 spin_unlock(&fp->f_spin); 2607 done: 2608 fdrop(fp); 2609 return (error); 2610 } 2611 2612 /* 2613 * lseek_args(int fd, int pad, off_t offset, int whence) 2614 * 2615 * Reposition read/write file offset. 2616 */ 2617 int 2618 sys_lseek(struct lseek_args *uap) 2619 { 2620 int error; 2621 2622 error = kern_lseek(uap->fd, uap->offset, uap->whence, 2623 &uap->sysmsg_offset); 2624 2625 return (error); 2626 } 2627 2628 /* 2629 * Check if current process can access given file. amode is a bitmask of *_OK 2630 * access bits. flags is a bitmask of AT_* flags. 2631 */ 2632 int 2633 kern_access(struct nlookupdata *nd, int amode, int flags) 2634 { 2635 struct vnode *vp; 2636 int error, mode; 2637 2638 if (flags & ~AT_EACCESS) 2639 return (EINVAL); 2640 if ((error = nlookup(nd)) != 0) 2641 return (error); 2642 retry: 2643 error = cache_vget(&nd->nl_nch, nd->nl_cred, LK_EXCLUSIVE, &vp); 2644 if (error) 2645 return (error); 2646 2647 /* Flags == 0 means only check for existence. */ 2648 if (amode) { 2649 mode = 0; 2650 if (amode & R_OK) 2651 mode |= VREAD; 2652 if (amode & W_OK) 2653 mode |= VWRITE; 2654 if (amode & X_OK) 2655 mode |= VEXEC; 2656 if ((mode & VWRITE) == 0 || 2657 (error = vn_writechk(vp, &nd->nl_nch)) == 0) 2658 error = VOP_ACCESS_FLAGS(vp, mode, flags, nd->nl_cred); 2659 2660 /* 2661 * If the file handle is stale we have to re-resolve the 2662 * entry. This is a hack at the moment. 2663 */ 2664 if (error == ESTALE) { 2665 vput(vp); 2666 cache_setunresolved(&nd->nl_nch); 2667 error = cache_resolve(&nd->nl_nch, nd->nl_cred); 2668 if (error == 0) { 2669 vp = NULL; 2670 goto retry; 2671 } 2672 return(error); 2673 } 2674 } 2675 vput(vp); 2676 return (error); 2677 } 2678 2679 /* 2680 * access_args(char *path, int flags) 2681 * 2682 * Check access permissions. 2683 */ 2684 int 2685 sys_access(struct access_args *uap) 2686 { 2687 struct nlookupdata nd; 2688 int error; 2689 2690 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 2691 if (error == 0) 2692 error = kern_access(&nd, uap->flags, 0); 2693 nlookup_done(&nd); 2694 return (error); 2695 } 2696 2697 2698 /* 2699 * eaccess_args(char *path, int flags) 2700 * 2701 * Check access permissions. 2702 */ 2703 int 2704 sys_eaccess(struct eaccess_args *uap) 2705 { 2706 struct nlookupdata nd; 2707 int error; 2708 2709 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 2710 if (error == 0) 2711 error = kern_access(&nd, uap->flags, AT_EACCESS); 2712 nlookup_done(&nd); 2713 return (error); 2714 } 2715 2716 2717 /* 2718 * faccessat_args(int fd, char *path, int amode, int flags) 2719 * 2720 * Check access permissions. 2721 */ 2722 int 2723 sys_faccessat(struct faccessat_args *uap) 2724 { 2725 struct nlookupdata nd; 2726 struct file *fp; 2727 int error; 2728 2729 error = nlookup_init_at(&nd, &fp, uap->fd, uap->path, UIO_USERSPACE, 2730 NLC_FOLLOW); 2731 if (error == 0) 2732 error = kern_access(&nd, uap->amode, uap->flags); 2733 nlookup_done_at(&nd, fp); 2734 return (error); 2735 } 2736 2737 2738 int 2739 kern_stat(struct nlookupdata *nd, struct stat *st) 2740 { 2741 int error; 2742 struct vnode *vp; 2743 2744 if ((error = nlookup(nd)) != 0) 2745 return (error); 2746 again: 2747 if ((vp = nd->nl_nch.ncp->nc_vp) == NULL) 2748 return (ENOENT); 2749 2750 if ((error = vget(vp, LK_SHARED)) != 0) 2751 return (error); 2752 error = vn_stat(vp, st, nd->nl_cred); 2753 2754 /* 2755 * If the file handle is stale we have to re-resolve the entry. This 2756 * is a hack at the moment. 2757 */ 2758 if (error == ESTALE) { 2759 vput(vp); 2760 cache_setunresolved(&nd->nl_nch); 2761 error = cache_resolve(&nd->nl_nch, nd->nl_cred); 2762 if (error == 0) 2763 goto again; 2764 } else { 2765 vput(vp); 2766 } 2767 return (error); 2768 } 2769 2770 /* 2771 * stat_args(char *path, struct stat *ub) 2772 * 2773 * Get file status; this version follows links. 2774 */ 2775 int 2776 sys_stat(struct stat_args *uap) 2777 { 2778 struct nlookupdata nd; 2779 struct stat st; 2780 int error; 2781 2782 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 2783 if (error == 0) { 2784 error = kern_stat(&nd, &st); 2785 if (error == 0) 2786 error = copyout(&st, uap->ub, sizeof(*uap->ub)); 2787 } 2788 nlookup_done(&nd); 2789 return (error); 2790 } 2791 2792 /* 2793 * lstat_args(char *path, struct stat *ub) 2794 * 2795 * Get file status; this version does not follow links. 2796 */ 2797 int 2798 sys_lstat(struct lstat_args *uap) 2799 { 2800 struct nlookupdata nd; 2801 struct stat st; 2802 int error; 2803 2804 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 2805 if (error == 0) { 2806 error = kern_stat(&nd, &st); 2807 if (error == 0) 2808 error = copyout(&st, uap->ub, sizeof(*uap->ub)); 2809 } 2810 nlookup_done(&nd); 2811 return (error); 2812 } 2813 2814 /* 2815 * fstatat_args(int fd, char *path, struct stat *sb, int flags) 2816 * 2817 * Get status of file pointed to by fd/path. 2818 */ 2819 int 2820 sys_fstatat(struct fstatat_args *uap) 2821 { 2822 struct nlookupdata nd; 2823 struct stat st; 2824 int error; 2825 int flags; 2826 struct file *fp; 2827 2828 if (uap->flags & ~AT_SYMLINK_NOFOLLOW) 2829 return (EINVAL); 2830 2831 flags = (uap->flags & AT_SYMLINK_NOFOLLOW) ? 0 : NLC_FOLLOW; 2832 2833 error = nlookup_init_at(&nd, &fp, uap->fd, uap->path, 2834 UIO_USERSPACE, flags); 2835 if (error == 0) { 2836 error = kern_stat(&nd, &st); 2837 if (error == 0) 2838 error = copyout(&st, uap->sb, sizeof(*uap->sb)); 2839 } 2840 nlookup_done_at(&nd, fp); 2841 return (error); 2842 } 2843 2844 static int 2845 kern_pathconf(char *path, int name, int flags, register_t *sysmsg_regp) 2846 { 2847 struct nlookupdata nd; 2848 struct vnode *vp; 2849 int error; 2850 2851 vp = NULL; 2852 error = nlookup_init(&nd, path, UIO_USERSPACE, flags); 2853 if (error == 0) 2854 error = nlookup(&nd); 2855 if (error == 0) 2856 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp); 2857 nlookup_done(&nd); 2858 if (error == 0) { 2859 error = VOP_PATHCONF(vp, name, sysmsg_regp); 2860 vput(vp); 2861 } 2862 return (error); 2863 } 2864 2865 /* 2866 * pathconf_Args(char *path, int name) 2867 * 2868 * Get configurable pathname variables. 2869 */ 2870 int 2871 sys_pathconf(struct pathconf_args *uap) 2872 { 2873 return (kern_pathconf(uap->path, uap->name, NLC_FOLLOW, 2874 &uap->sysmsg_reg)); 2875 } 2876 2877 /* 2878 * lpathconf_Args(char *path, int name) 2879 * 2880 * Get configurable pathname variables, but don't follow symlinks. 2881 */ 2882 int 2883 sys_lpathconf(struct lpathconf_args *uap) 2884 { 2885 return (kern_pathconf(uap->path, uap->name, 0, &uap->sysmsg_reg)); 2886 } 2887 2888 /* 2889 * XXX: daver 2890 * kern_readlink isn't properly split yet. There is a copyin burried 2891 * in VOP_READLINK(). 2892 */ 2893 int 2894 kern_readlink(struct nlookupdata *nd, char *buf, int count, int *res) 2895 { 2896 struct thread *td = curthread; 2897 struct vnode *vp; 2898 struct iovec aiov; 2899 struct uio auio; 2900 int error; 2901 2902 if ((error = nlookup(nd)) != 0) 2903 return (error); 2904 error = cache_vget(&nd->nl_nch, nd->nl_cred, LK_EXCLUSIVE, &vp); 2905 if (error) 2906 return (error); 2907 if (vp->v_type != VLNK) { 2908 error = EINVAL; 2909 } else { 2910 aiov.iov_base = buf; 2911 aiov.iov_len = count; 2912 auio.uio_iov = &aiov; 2913 auio.uio_iovcnt = 1; 2914 auio.uio_offset = 0; 2915 auio.uio_rw = UIO_READ; 2916 auio.uio_segflg = UIO_USERSPACE; 2917 auio.uio_td = td; 2918 auio.uio_resid = count; 2919 error = VOP_READLINK(vp, &auio, td->td_ucred); 2920 } 2921 vput(vp); 2922 *res = count - auio.uio_resid; 2923 return (error); 2924 } 2925 2926 /* 2927 * readlink_args(char *path, char *buf, int count) 2928 * 2929 * Return target name of a symbolic link. 2930 */ 2931 int 2932 sys_readlink(struct readlink_args *uap) 2933 { 2934 struct nlookupdata nd; 2935 int error; 2936 2937 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 2938 if (error == 0) { 2939 error = kern_readlink(&nd, uap->buf, uap->count, 2940 &uap->sysmsg_result); 2941 } 2942 nlookup_done(&nd); 2943 return (error); 2944 } 2945 2946 /* 2947 * readlinkat_args(int fd, char *path, char *buf, size_t bufsize) 2948 * 2949 * Return target name of a symbolic link. The path is relative to the 2950 * directory associated with fd. 2951 */ 2952 int 2953 sys_readlinkat(struct readlinkat_args *uap) 2954 { 2955 struct nlookupdata nd; 2956 struct file *fp; 2957 int error; 2958 2959 error = nlookup_init_at(&nd, &fp, uap->fd, uap->path, UIO_USERSPACE, 0); 2960 if (error == 0) { 2961 error = kern_readlink(&nd, uap->buf, uap->bufsize, 2962 &uap->sysmsg_result); 2963 } 2964 nlookup_done_at(&nd, fp); 2965 return (error); 2966 } 2967 2968 static int 2969 setfflags(struct vnode *vp, int flags) 2970 { 2971 struct thread *td = curthread; 2972 int error; 2973 struct vattr vattr; 2974 2975 /* 2976 * Prevent non-root users from setting flags on devices. When 2977 * a device is reused, users can retain ownership of the device 2978 * if they are allowed to set flags and programs assume that 2979 * chown can't fail when done as root. 2980 */ 2981 if ((vp->v_type == VCHR || vp->v_type == VBLK) && 2982 ((error = priv_check_cred(td->td_ucred, PRIV_VFS_CHFLAGS_DEV, 0)) != 0)) 2983 return (error); 2984 2985 /* 2986 * note: vget is required for any operation that might mod the vnode 2987 * so VINACTIVE is properly cleared. 2988 */ 2989 if ((error = vget(vp, LK_EXCLUSIVE)) == 0) { 2990 VATTR_NULL(&vattr); 2991 vattr.va_flags = flags; 2992 error = VOP_SETATTR(vp, &vattr, td->td_ucred); 2993 vput(vp); 2994 } 2995 return (error); 2996 } 2997 2998 /* 2999 * chflags(char *path, int flags) 3000 * 3001 * Change flags of a file given a path name. 3002 */ 3003 int 3004 sys_chflags(struct chflags_args *uap) 3005 { 3006 struct nlookupdata nd; 3007 struct vnode *vp; 3008 int error; 3009 3010 vp = NULL; 3011 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 3012 if (error == 0) 3013 error = nlookup(&nd); 3014 if (error == 0) 3015 error = ncp_writechk(&nd.nl_nch); 3016 if (error == 0) 3017 error = cache_vref(&nd.nl_nch, nd.nl_cred, &vp); 3018 nlookup_done(&nd); 3019 if (error == 0) { 3020 error = setfflags(vp, uap->flags); 3021 vrele(vp); 3022 } 3023 return (error); 3024 } 3025 3026 /* 3027 * lchflags(char *path, int flags) 3028 * 3029 * Change flags of a file given a path name, but don't follow symlinks. 3030 */ 3031 int 3032 sys_lchflags(struct lchflags_args *uap) 3033 { 3034 struct nlookupdata nd; 3035 struct vnode *vp; 3036 int error; 3037 3038 vp = NULL; 3039 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 3040 if (error == 0) 3041 error = nlookup(&nd); 3042 if (error == 0) 3043 error = ncp_writechk(&nd.nl_nch); 3044 if (error == 0) 3045 error = cache_vref(&nd.nl_nch, nd.nl_cred, &vp); 3046 nlookup_done(&nd); 3047 if (error == 0) { 3048 error = setfflags(vp, uap->flags); 3049 vrele(vp); 3050 } 3051 return (error); 3052 } 3053 3054 /* 3055 * fchflags_args(int fd, int flags) 3056 * 3057 * Change flags of a file given a file descriptor. 3058 */ 3059 int 3060 sys_fchflags(struct fchflags_args *uap) 3061 { 3062 struct thread *td = curthread; 3063 struct proc *p = td->td_proc; 3064 struct file *fp; 3065 int error; 3066 3067 if ((error = holdvnode(p->p_fd, uap->fd, &fp)) != 0) 3068 return (error); 3069 if (fp->f_nchandle.ncp) 3070 error = ncp_writechk(&fp->f_nchandle); 3071 if (error == 0) 3072 error = setfflags((struct vnode *) fp->f_data, uap->flags); 3073 fdrop(fp); 3074 return (error); 3075 } 3076 3077 static int 3078 setfmode(struct vnode *vp, int mode) 3079 { 3080 struct thread *td = curthread; 3081 int error; 3082 struct vattr vattr; 3083 3084 /* 3085 * note: vget is required for any operation that might mod the vnode 3086 * so VINACTIVE is properly cleared. 3087 */ 3088 if ((error = vget(vp, LK_EXCLUSIVE)) == 0) { 3089 VATTR_NULL(&vattr); 3090 vattr.va_mode = mode & ALLPERMS; 3091 error = VOP_SETATTR(vp, &vattr, td->td_ucred); 3092 vput(vp); 3093 } 3094 return error; 3095 } 3096 3097 int 3098 kern_chmod(struct nlookupdata *nd, int mode) 3099 { 3100 struct vnode *vp; 3101 int error; 3102 3103 if ((error = nlookup(nd)) != 0) 3104 return (error); 3105 if ((error = cache_vref(&nd->nl_nch, nd->nl_cred, &vp)) != 0) 3106 return (error); 3107 if ((error = ncp_writechk(&nd->nl_nch)) == 0) 3108 error = setfmode(vp, mode); 3109 vrele(vp); 3110 return (error); 3111 } 3112 3113 /* 3114 * chmod_args(char *path, int mode) 3115 * 3116 * Change mode of a file given path name. 3117 */ 3118 int 3119 sys_chmod(struct chmod_args *uap) 3120 { 3121 struct nlookupdata nd; 3122 int error; 3123 3124 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 3125 if (error == 0) 3126 error = kern_chmod(&nd, uap->mode); 3127 nlookup_done(&nd); 3128 return (error); 3129 } 3130 3131 /* 3132 * lchmod_args(char *path, int mode) 3133 * 3134 * Change mode of a file given path name (don't follow links.) 3135 */ 3136 int 3137 sys_lchmod(struct lchmod_args *uap) 3138 { 3139 struct nlookupdata nd; 3140 int error; 3141 3142 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 3143 if (error == 0) 3144 error = kern_chmod(&nd, uap->mode); 3145 nlookup_done(&nd); 3146 return (error); 3147 } 3148 3149 /* 3150 * fchmod_args(int fd, int mode) 3151 * 3152 * Change mode of a file given a file descriptor. 3153 */ 3154 int 3155 sys_fchmod(struct fchmod_args *uap) 3156 { 3157 struct thread *td = curthread; 3158 struct proc *p = td->td_proc; 3159 struct file *fp; 3160 int error; 3161 3162 if ((error = holdvnode(p->p_fd, uap->fd, &fp)) != 0) 3163 return (error); 3164 if (fp->f_nchandle.ncp) 3165 error = ncp_writechk(&fp->f_nchandle); 3166 if (error == 0) 3167 error = setfmode((struct vnode *)fp->f_data, uap->mode); 3168 fdrop(fp); 3169 return (error); 3170 } 3171 3172 /* 3173 * fchmodat_args(char *path, int mode) 3174 * 3175 * Change mode of a file pointed to by fd/path. 3176 */ 3177 int 3178 sys_fchmodat(struct fchmodat_args *uap) 3179 { 3180 struct nlookupdata nd; 3181 struct file *fp; 3182 int error; 3183 int flags; 3184 3185 if (uap->flags & ~AT_SYMLINK_NOFOLLOW) 3186 return (EINVAL); 3187 flags = (uap->flags & AT_SYMLINK_NOFOLLOW) ? 0 : NLC_FOLLOW; 3188 3189 error = nlookup_init_at(&nd, &fp, uap->fd, uap->path, 3190 UIO_USERSPACE, flags); 3191 if (error == 0) 3192 error = kern_chmod(&nd, uap->mode); 3193 nlookup_done_at(&nd, fp); 3194 return (error); 3195 } 3196 3197 static int 3198 setfown(struct mount *mp, struct vnode *vp, uid_t uid, gid_t gid) 3199 { 3200 struct thread *td = curthread; 3201 int error; 3202 struct vattr vattr; 3203 uid_t o_uid; 3204 gid_t o_gid; 3205 uint64_t size; 3206 3207 /* 3208 * note: vget is required for any operation that might mod the vnode 3209 * so VINACTIVE is properly cleared. 3210 */ 3211 if ((error = vget(vp, LK_EXCLUSIVE)) == 0) { 3212 if ((error = VOP_GETATTR(vp, &vattr)) != 0) 3213 return error; 3214 o_uid = vattr.va_uid; 3215 o_gid = vattr.va_gid; 3216 size = vattr.va_size; 3217 3218 VATTR_NULL(&vattr); 3219 vattr.va_uid = uid; 3220 vattr.va_gid = gid; 3221 error = VOP_SETATTR(vp, &vattr, td->td_ucred); 3222 vput(vp); 3223 } 3224 3225 if (error == 0) { 3226 if (uid == -1) 3227 uid = o_uid; 3228 if (gid == -1) 3229 gid = o_gid; 3230 VFS_ACCOUNT(mp, o_uid, o_gid, -size); 3231 VFS_ACCOUNT(mp, uid, gid, size); 3232 } 3233 3234 return error; 3235 } 3236 3237 int 3238 kern_chown(struct nlookupdata *nd, int uid, int gid) 3239 { 3240 struct vnode *vp; 3241 int error; 3242 3243 if ((error = nlookup(nd)) != 0) 3244 return (error); 3245 if ((error = cache_vref(&nd->nl_nch, nd->nl_cred, &vp)) != 0) 3246 return (error); 3247 if ((error = ncp_writechk(&nd->nl_nch)) == 0) 3248 error = setfown(nd->nl_nch.mount, vp, uid, gid); 3249 vrele(vp); 3250 return (error); 3251 } 3252 3253 /* 3254 * chown(char *path, int uid, int gid) 3255 * 3256 * Set ownership given a path name. 3257 */ 3258 int 3259 sys_chown(struct chown_args *uap) 3260 { 3261 struct nlookupdata nd; 3262 int error; 3263 3264 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 3265 if (error == 0) 3266 error = kern_chown(&nd, uap->uid, uap->gid); 3267 nlookup_done(&nd); 3268 return (error); 3269 } 3270 3271 /* 3272 * lchown_args(char *path, int uid, int gid) 3273 * 3274 * Set ownership given a path name, do not cross symlinks. 3275 */ 3276 int 3277 sys_lchown(struct lchown_args *uap) 3278 { 3279 struct nlookupdata nd; 3280 int error; 3281 3282 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 3283 if (error == 0) 3284 error = kern_chown(&nd, uap->uid, uap->gid); 3285 nlookup_done(&nd); 3286 return (error); 3287 } 3288 3289 /* 3290 * fchown_args(int fd, int uid, int gid) 3291 * 3292 * Set ownership given a file descriptor. 3293 */ 3294 int 3295 sys_fchown(struct fchown_args *uap) 3296 { 3297 struct thread *td = curthread; 3298 struct proc *p = td->td_proc; 3299 struct file *fp; 3300 int error; 3301 3302 if ((error = holdvnode(p->p_fd, uap->fd, &fp)) != 0) 3303 return (error); 3304 if (fp->f_nchandle.ncp) 3305 error = ncp_writechk(&fp->f_nchandle); 3306 if (error == 0) 3307 error = setfown(p->p_fd->fd_ncdir.mount, 3308 (struct vnode *)fp->f_data, uap->uid, uap->gid); 3309 fdrop(fp); 3310 return (error); 3311 } 3312 3313 /* 3314 * fchownat(int fd, char *path, int uid, int gid, int flags) 3315 * 3316 * Set ownership of file pointed to by fd/path. 3317 */ 3318 int 3319 sys_fchownat(struct fchownat_args *uap) 3320 { 3321 struct nlookupdata nd; 3322 struct file *fp; 3323 int error; 3324 int flags; 3325 3326 if (uap->flags & ~AT_SYMLINK_NOFOLLOW) 3327 return (EINVAL); 3328 flags = (uap->flags & AT_SYMLINK_NOFOLLOW) ? 0 : NLC_FOLLOW; 3329 3330 error = nlookup_init_at(&nd, &fp, uap->fd, uap->path, 3331 UIO_USERSPACE, flags); 3332 if (error == 0) 3333 error = kern_chown(&nd, uap->uid, uap->gid); 3334 nlookup_done_at(&nd, fp); 3335 return (error); 3336 } 3337 3338 3339 static int 3340 getutimes(const struct timeval *tvp, struct timespec *tsp) 3341 { 3342 struct timeval tv[2]; 3343 3344 if (tvp == NULL) { 3345 microtime(&tv[0]); 3346 TIMEVAL_TO_TIMESPEC(&tv[0], &tsp[0]); 3347 tsp[1] = tsp[0]; 3348 } else { 3349 TIMEVAL_TO_TIMESPEC(&tvp[0], &tsp[0]); 3350 TIMEVAL_TO_TIMESPEC(&tvp[1], &tsp[1]); 3351 } 3352 return 0; 3353 } 3354 3355 static int 3356 setutimes(struct vnode *vp, struct vattr *vattr, 3357 const struct timespec *ts, int nullflag) 3358 { 3359 struct thread *td = curthread; 3360 int error; 3361 3362 VATTR_NULL(vattr); 3363 vattr->va_atime = ts[0]; 3364 vattr->va_mtime = ts[1]; 3365 if (nullflag) 3366 vattr->va_vaflags |= VA_UTIMES_NULL; 3367 error = VOP_SETATTR(vp, vattr, td->td_ucred); 3368 3369 return error; 3370 } 3371 3372 int 3373 kern_utimes(struct nlookupdata *nd, struct timeval *tptr) 3374 { 3375 struct timespec ts[2]; 3376 struct vnode *vp; 3377 struct vattr vattr; 3378 int error; 3379 3380 if ((error = getutimes(tptr, ts)) != 0) 3381 return (error); 3382 3383 /* 3384 * NOTE: utimes() succeeds for the owner even if the file 3385 * is not user-writable. 3386 */ 3387 nd->nl_flags |= NLC_OWN | NLC_WRITE; 3388 3389 if ((error = nlookup(nd)) != 0) 3390 return (error); 3391 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 3392 return (error); 3393 if ((error = cache_vref(&nd->nl_nch, nd->nl_cred, &vp)) != 0) 3394 return (error); 3395 3396 /* 3397 * note: vget is required for any operation that might mod the vnode 3398 * so VINACTIVE is properly cleared. 3399 */ 3400 if ((error = vn_writechk(vp, &nd->nl_nch)) == 0) { 3401 error = vget(vp, LK_EXCLUSIVE); 3402 if (error == 0) { 3403 error = setutimes(vp, &vattr, ts, (tptr == NULL)); 3404 vput(vp); 3405 } 3406 } 3407 vrele(vp); 3408 return (error); 3409 } 3410 3411 /* 3412 * utimes_args(char *path, struct timeval *tptr) 3413 * 3414 * Set the access and modification times of a file. 3415 */ 3416 int 3417 sys_utimes(struct utimes_args *uap) 3418 { 3419 struct timeval tv[2]; 3420 struct nlookupdata nd; 3421 int error; 3422 3423 if (uap->tptr) { 3424 error = copyin(uap->tptr, tv, sizeof(tv)); 3425 if (error) 3426 return (error); 3427 } 3428 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 3429 if (error == 0) 3430 error = kern_utimes(&nd, uap->tptr ? tv : NULL); 3431 nlookup_done(&nd); 3432 return (error); 3433 } 3434 3435 /* 3436 * lutimes_args(char *path, struct timeval *tptr) 3437 * 3438 * Set the access and modification times of a file. 3439 */ 3440 int 3441 sys_lutimes(struct lutimes_args *uap) 3442 { 3443 struct timeval tv[2]; 3444 struct nlookupdata nd; 3445 int error; 3446 3447 if (uap->tptr) { 3448 error = copyin(uap->tptr, tv, sizeof(tv)); 3449 if (error) 3450 return (error); 3451 } 3452 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 3453 if (error == 0) 3454 error = kern_utimes(&nd, uap->tptr ? tv : NULL); 3455 nlookup_done(&nd); 3456 return (error); 3457 } 3458 3459 /* 3460 * Set utimes on a file descriptor. The creds used to open the 3461 * file are used to determine whether the operation is allowed 3462 * or not. 3463 */ 3464 int 3465 kern_futimes(int fd, struct timeval *tptr) 3466 { 3467 struct thread *td = curthread; 3468 struct proc *p = td->td_proc; 3469 struct timespec ts[2]; 3470 struct file *fp; 3471 struct vnode *vp; 3472 struct vattr vattr; 3473 int error; 3474 3475 error = getutimes(tptr, ts); 3476 if (error) 3477 return (error); 3478 if ((error = holdvnode(p->p_fd, fd, &fp)) != 0) 3479 return (error); 3480 if (fp->f_nchandle.ncp) 3481 error = ncp_writechk(&fp->f_nchandle); 3482 if (error == 0) { 3483 vp = fp->f_data; 3484 error = vget(vp, LK_EXCLUSIVE); 3485 if (error == 0) { 3486 error = VOP_GETATTR(vp, &vattr); 3487 if (error == 0) { 3488 error = naccess_va(&vattr, NLC_OWN | NLC_WRITE, 3489 fp->f_cred); 3490 } 3491 if (error == 0) { 3492 error = setutimes(vp, &vattr, ts, 3493 (tptr == NULL)); 3494 } 3495 vput(vp); 3496 } 3497 } 3498 fdrop(fp); 3499 return (error); 3500 } 3501 3502 /* 3503 * futimes_args(int fd, struct timeval *tptr) 3504 * 3505 * Set the access and modification times of a file. 3506 */ 3507 int 3508 sys_futimes(struct futimes_args *uap) 3509 { 3510 struct timeval tv[2]; 3511 int error; 3512 3513 if (uap->tptr) { 3514 error = copyin(uap->tptr, tv, sizeof(tv)); 3515 if (error) 3516 return (error); 3517 } 3518 error = kern_futimes(uap->fd, uap->tptr ? tv : NULL); 3519 3520 return (error); 3521 } 3522 3523 int 3524 kern_truncate(struct nlookupdata *nd, off_t length) 3525 { 3526 struct vnode *vp; 3527 struct vattr vattr; 3528 int error; 3529 uid_t uid = 0; 3530 gid_t gid = 0; 3531 uint64_t old_size = 0; 3532 3533 if (length < 0) 3534 return(EINVAL); 3535 nd->nl_flags |= NLC_WRITE | NLC_TRUNCATE; 3536 if ((error = nlookup(nd)) != 0) 3537 return (error); 3538 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 3539 return (error); 3540 if ((error = cache_vref(&nd->nl_nch, nd->nl_cred, &vp)) != 0) 3541 return (error); 3542 if ((error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY)) != 0) { 3543 vrele(vp); 3544 return (error); 3545 } 3546 if (vp->v_type == VDIR) { 3547 error = EISDIR; 3548 goto done; 3549 } 3550 if (vfs_quota_enabled) { 3551 error = VOP_GETATTR(vp, &vattr); 3552 KASSERT(error == 0, ("kern_truncate(): VOP_GETATTR didn't return 0")); 3553 uid = vattr.va_uid; 3554 gid = vattr.va_gid; 3555 old_size = vattr.va_size; 3556 } 3557 3558 if ((error = vn_writechk(vp, &nd->nl_nch)) == 0) { 3559 VATTR_NULL(&vattr); 3560 vattr.va_size = length; 3561 error = VOP_SETATTR(vp, &vattr, nd->nl_cred); 3562 VFS_ACCOUNT(nd->nl_nch.mount, uid, gid, length - old_size); 3563 } 3564 done: 3565 vput(vp); 3566 return (error); 3567 } 3568 3569 /* 3570 * truncate(char *path, int pad, off_t length) 3571 * 3572 * Truncate a file given its path name. 3573 */ 3574 int 3575 sys_truncate(struct truncate_args *uap) 3576 { 3577 struct nlookupdata nd; 3578 int error; 3579 3580 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 3581 if (error == 0) 3582 error = kern_truncate(&nd, uap->length); 3583 nlookup_done(&nd); 3584 return error; 3585 } 3586 3587 int 3588 kern_ftruncate(int fd, off_t length) 3589 { 3590 struct thread *td = curthread; 3591 struct proc *p = td->td_proc; 3592 struct vattr vattr; 3593 struct vnode *vp; 3594 struct file *fp; 3595 int error; 3596 uid_t uid = 0; 3597 gid_t gid = 0; 3598 uint64_t old_size = 0; 3599 struct mount *mp; 3600 3601 if (length < 0) 3602 return(EINVAL); 3603 if ((error = holdvnode(p->p_fd, fd, &fp)) != 0) 3604 return (error); 3605 if (fp->f_nchandle.ncp) { 3606 error = ncp_writechk(&fp->f_nchandle); 3607 if (error) 3608 goto done; 3609 } 3610 if ((fp->f_flag & FWRITE) == 0) { 3611 error = EINVAL; 3612 goto done; 3613 } 3614 if (fp->f_flag & FAPPENDONLY) { /* inode was set s/uapnd */ 3615 error = EINVAL; 3616 goto done; 3617 } 3618 vp = (struct vnode *)fp->f_data; 3619 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 3620 if (vp->v_type == VDIR) { 3621 error = EISDIR; 3622 goto done; 3623 } 3624 3625 if (vfs_quota_enabled) { 3626 error = VOP_GETATTR(vp, &vattr); 3627 KASSERT(error == 0, ("kern_ftruncate(): VOP_GETATTR didn't return 0")); 3628 uid = vattr.va_uid; 3629 gid = vattr.va_gid; 3630 old_size = vattr.va_size; 3631 } 3632 3633 if ((error = vn_writechk(vp, NULL)) == 0) { 3634 VATTR_NULL(&vattr); 3635 vattr.va_size = length; 3636 error = VOP_SETATTR(vp, &vattr, fp->f_cred); 3637 mp = vq_vptomp(vp); 3638 VFS_ACCOUNT(mp, uid, gid, length - old_size); 3639 } 3640 vn_unlock(vp); 3641 done: 3642 fdrop(fp); 3643 return (error); 3644 } 3645 3646 /* 3647 * ftruncate_args(int fd, int pad, off_t length) 3648 * 3649 * Truncate a file given a file descriptor. 3650 */ 3651 int 3652 sys_ftruncate(struct ftruncate_args *uap) 3653 { 3654 int error; 3655 3656 error = kern_ftruncate(uap->fd, uap->length); 3657 3658 return (error); 3659 } 3660 3661 /* 3662 * fsync(int fd) 3663 * 3664 * Sync an open file. 3665 */ 3666 int 3667 sys_fsync(struct fsync_args *uap) 3668 { 3669 struct thread *td = curthread; 3670 struct proc *p = td->td_proc; 3671 struct vnode *vp; 3672 struct file *fp; 3673 vm_object_t obj; 3674 int error; 3675 3676 if ((error = holdvnode(p->p_fd, uap->fd, &fp)) != 0) 3677 return (error); 3678 vp = (struct vnode *)fp->f_data; 3679 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 3680 if ((obj = vp->v_object) != NULL) { 3681 if (vp->v_mount == NULL || 3682 (vp->v_mount->mnt_kern_flag & MNTK_NOMSYNC) == 0) { 3683 vm_object_page_clean(obj, 0, 0, 0); 3684 } 3685 } 3686 error = VOP_FSYNC(vp, MNT_WAIT, VOP_FSYNC_SYSCALL); 3687 if (error == 0 && vp->v_mount) 3688 error = buf_fsync(vp); 3689 vn_unlock(vp); 3690 fdrop(fp); 3691 3692 return (error); 3693 } 3694 3695 int 3696 kern_rename(struct nlookupdata *fromnd, struct nlookupdata *tond) 3697 { 3698 struct nchandle fnchd; 3699 struct nchandle tnchd; 3700 struct namecache *ncp; 3701 struct vnode *fdvp; 3702 struct vnode *tdvp; 3703 struct mount *mp; 3704 int error; 3705 3706 bwillinode(1); 3707 fromnd->nl_flags |= NLC_REFDVP | NLC_RENAME_SRC; 3708 if ((error = nlookup(fromnd)) != 0) 3709 return (error); 3710 if ((fnchd.ncp = fromnd->nl_nch.ncp->nc_parent) == NULL) 3711 return (ENOENT); 3712 fnchd.mount = fromnd->nl_nch.mount; 3713 cache_hold(&fnchd); 3714 3715 /* 3716 * unlock the source nch so we can lookup the target nch without 3717 * deadlocking. The target may or may not exist so we do not check 3718 * for a target vp like kern_mkdir() and other creation functions do. 3719 * 3720 * The source and target directories are ref'd and rechecked after 3721 * everything is relocked to determine if the source or target file 3722 * has been renamed. 3723 */ 3724 KKASSERT(fromnd->nl_flags & NLC_NCPISLOCKED); 3725 fromnd->nl_flags &= ~NLC_NCPISLOCKED; 3726 cache_unlock(&fromnd->nl_nch); 3727 3728 tond->nl_flags |= NLC_RENAME_DST | NLC_REFDVP; 3729 if ((error = nlookup(tond)) != 0) { 3730 cache_drop(&fnchd); 3731 return (error); 3732 } 3733 if ((tnchd.ncp = tond->nl_nch.ncp->nc_parent) == NULL) { 3734 cache_drop(&fnchd); 3735 return (ENOENT); 3736 } 3737 tnchd.mount = tond->nl_nch.mount; 3738 cache_hold(&tnchd); 3739 3740 /* 3741 * If the source and target are the same there is nothing to do 3742 */ 3743 if (fromnd->nl_nch.ncp == tond->nl_nch.ncp) { 3744 cache_drop(&fnchd); 3745 cache_drop(&tnchd); 3746 return (0); 3747 } 3748 3749 /* 3750 * Mount points cannot be renamed or overwritten 3751 */ 3752 if ((fromnd->nl_nch.ncp->nc_flag | tond->nl_nch.ncp->nc_flag) & 3753 NCF_ISMOUNTPT 3754 ) { 3755 cache_drop(&fnchd); 3756 cache_drop(&tnchd); 3757 return (EINVAL); 3758 } 3759 3760 /* 3761 * Relock the source ncp. cache_relock() will deal with any 3762 * deadlocks against the already-locked tond and will also 3763 * make sure both are resolved. 3764 * 3765 * NOTE AFTER RELOCKING: The source or target ncp may have become 3766 * invalid while they were unlocked, nc_vp and nc_mount could 3767 * be NULL. 3768 */ 3769 cache_relock(&fromnd->nl_nch, fromnd->nl_cred, 3770 &tond->nl_nch, tond->nl_cred); 3771 fromnd->nl_flags |= NLC_NCPISLOCKED; 3772 3773 /* 3774 * If either fromnd or tond are marked destroyed a ripout occured 3775 * out from under us and we must retry. 3776 */ 3777 if ((fromnd->nl_nch.ncp->nc_flag & (NCF_DESTROYED | NCF_UNRESOLVED)) || 3778 fromnd->nl_nch.ncp->nc_vp == NULL || 3779 (tond->nl_nch.ncp->nc_flag & NCF_DESTROYED)) { 3780 kprintf("kern_rename: retry due to ripout on: " 3781 "\"%s\" -> \"%s\"\n", 3782 fromnd->nl_nch.ncp->nc_name, 3783 tond->nl_nch.ncp->nc_name); 3784 cache_drop(&fnchd); 3785 cache_drop(&tnchd); 3786 return (EAGAIN); 3787 } 3788 3789 /* 3790 * make sure the parent directories linkages are the same 3791 */ 3792 if (fnchd.ncp != fromnd->nl_nch.ncp->nc_parent || 3793 tnchd.ncp != tond->nl_nch.ncp->nc_parent) { 3794 cache_drop(&fnchd); 3795 cache_drop(&tnchd); 3796 return (ENOENT); 3797 } 3798 3799 /* 3800 * Both the source and target must be within the same filesystem and 3801 * in the same filesystem as their parent directories within the 3802 * namecache topology. 3803 * 3804 * NOTE: fromnd's nc_mount or nc_vp could be NULL. 3805 */ 3806 mp = fnchd.mount; 3807 if (mp != tnchd.mount || mp != fromnd->nl_nch.mount || 3808 mp != tond->nl_nch.mount) { 3809 cache_drop(&fnchd); 3810 cache_drop(&tnchd); 3811 return (EXDEV); 3812 } 3813 3814 /* 3815 * Make sure the mount point is writable 3816 */ 3817 if ((error = ncp_writechk(&tond->nl_nch)) != 0) { 3818 cache_drop(&fnchd); 3819 cache_drop(&tnchd); 3820 return (error); 3821 } 3822 3823 /* 3824 * If the target exists and either the source or target is a directory, 3825 * then both must be directories. 3826 * 3827 * Due to relocking of the source, fromnd->nl_nch.ncp->nc_vp might h 3828 * have become NULL. 3829 */ 3830 if (tond->nl_nch.ncp->nc_vp) { 3831 if (fromnd->nl_nch.ncp->nc_vp == NULL) { 3832 error = ENOENT; 3833 } else if (fromnd->nl_nch.ncp->nc_vp->v_type == VDIR) { 3834 if (tond->nl_nch.ncp->nc_vp->v_type != VDIR) 3835 error = ENOTDIR; 3836 } else if (tond->nl_nch.ncp->nc_vp->v_type == VDIR) { 3837 error = EISDIR; 3838 } 3839 } 3840 3841 /* 3842 * You cannot rename a source into itself or a subdirectory of itself. 3843 * We check this by travsersing the target directory upwards looking 3844 * for a match against the source. 3845 * 3846 * XXX MPSAFE 3847 */ 3848 if (error == 0) { 3849 for (ncp = tnchd.ncp; ncp; ncp = ncp->nc_parent) { 3850 if (fromnd->nl_nch.ncp == ncp) { 3851 error = EINVAL; 3852 break; 3853 } 3854 } 3855 } 3856 3857 cache_drop(&fnchd); 3858 cache_drop(&tnchd); 3859 3860 /* 3861 * Even though the namespaces are different, they may still represent 3862 * hardlinks to the same file. The filesystem might have a hard time 3863 * with this so we issue a NREMOVE of the source instead of a NRENAME 3864 * when we detect the situation. 3865 */ 3866 if (error == 0) { 3867 fdvp = fromnd->nl_dvp; 3868 tdvp = tond->nl_dvp; 3869 if (fdvp == NULL || tdvp == NULL) { 3870 error = EPERM; 3871 } else if (fromnd->nl_nch.ncp->nc_vp == tond->nl_nch.ncp->nc_vp) { 3872 error = VOP_NREMOVE(&fromnd->nl_nch, fdvp, 3873 fromnd->nl_cred); 3874 } else { 3875 error = VOP_NRENAME(&fromnd->nl_nch, &tond->nl_nch, 3876 fdvp, tdvp, tond->nl_cred); 3877 } 3878 } 3879 return (error); 3880 } 3881 3882 /* 3883 * rename_args(char *from, char *to) 3884 * 3885 * Rename files. Source and destination must either both be directories, 3886 * or both not be directories. If target is a directory, it must be empty. 3887 */ 3888 int 3889 sys_rename(struct rename_args *uap) 3890 { 3891 struct nlookupdata fromnd, tond; 3892 int error; 3893 3894 do { 3895 error = nlookup_init(&fromnd, uap->from, UIO_USERSPACE, 0); 3896 if (error == 0) { 3897 error = nlookup_init(&tond, uap->to, UIO_USERSPACE, 0); 3898 if (error == 0) 3899 error = kern_rename(&fromnd, &tond); 3900 nlookup_done(&tond); 3901 } 3902 nlookup_done(&fromnd); 3903 } while (error == EAGAIN); 3904 return (error); 3905 } 3906 3907 /* 3908 * renameat_args(int oldfd, char *old, int newfd, char *new) 3909 * 3910 * Rename files using paths relative to the directories associated with 3911 * oldfd and newfd. Source and destination must either both be directories, 3912 * or both not be directories. If target is a directory, it must be empty. 3913 */ 3914 int 3915 sys_renameat(struct renameat_args *uap) 3916 { 3917 struct nlookupdata oldnd, newnd; 3918 struct file *oldfp, *newfp; 3919 int error; 3920 3921 do { 3922 error = nlookup_init_at(&oldnd, &oldfp, 3923 uap->oldfd, uap->old, 3924 UIO_USERSPACE, 0); 3925 if (error == 0) { 3926 error = nlookup_init_at(&newnd, &newfp, 3927 uap->newfd, uap->new, 3928 UIO_USERSPACE, 0); 3929 if (error == 0) 3930 error = kern_rename(&oldnd, &newnd); 3931 nlookup_done_at(&newnd, newfp); 3932 } 3933 nlookup_done_at(&oldnd, oldfp); 3934 } while (error == EAGAIN); 3935 return (error); 3936 } 3937 3938 int 3939 kern_mkdir(struct nlookupdata *nd, int mode) 3940 { 3941 struct thread *td = curthread; 3942 struct proc *p = td->td_proc; 3943 struct vnode *vp; 3944 struct vattr vattr; 3945 int error; 3946 3947 bwillinode(1); 3948 nd->nl_flags |= NLC_WILLBEDIR | NLC_CREATE | NLC_REFDVP; 3949 if ((error = nlookup(nd)) != 0) 3950 return (error); 3951 3952 if (nd->nl_nch.ncp->nc_vp) 3953 return (EEXIST); 3954 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 3955 return (error); 3956 VATTR_NULL(&vattr); 3957 vattr.va_type = VDIR; 3958 vattr.va_mode = (mode & ACCESSPERMS) &~ p->p_fd->fd_cmask; 3959 3960 vp = NULL; 3961 error = VOP_NMKDIR(&nd->nl_nch, nd->nl_dvp, &vp, td->td_ucred, &vattr); 3962 if (error == 0) 3963 vput(vp); 3964 return (error); 3965 } 3966 3967 /* 3968 * mkdir_args(char *path, int mode) 3969 * 3970 * Make a directory file. 3971 */ 3972 int 3973 sys_mkdir(struct mkdir_args *uap) 3974 { 3975 struct nlookupdata nd; 3976 int error; 3977 3978 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 3979 if (error == 0) 3980 error = kern_mkdir(&nd, uap->mode); 3981 nlookup_done(&nd); 3982 return (error); 3983 } 3984 3985 /* 3986 * mkdirat_args(int fd, char *path, mode_t mode) 3987 * 3988 * Make a directory file. The path is relative to the directory associated 3989 * with fd. 3990 */ 3991 int 3992 sys_mkdirat(struct mkdirat_args *uap) 3993 { 3994 struct nlookupdata nd; 3995 struct file *fp; 3996 int error; 3997 3998 error = nlookup_init_at(&nd, &fp, uap->fd, uap->path, UIO_USERSPACE, 0); 3999 if (error == 0) 4000 error = kern_mkdir(&nd, uap->mode); 4001 nlookup_done_at(&nd, fp); 4002 return (error); 4003 } 4004 4005 int 4006 kern_rmdir(struct nlookupdata *nd) 4007 { 4008 int error; 4009 4010 bwillinode(1); 4011 nd->nl_flags |= NLC_DELETE | NLC_REFDVP; 4012 if ((error = nlookup(nd)) != 0) 4013 return (error); 4014 4015 /* 4016 * Do not allow directories representing mount points to be 4017 * deleted, even if empty. Check write perms on mount point 4018 * in case the vnode is aliased (aka nullfs). 4019 */ 4020 if (nd->nl_nch.ncp->nc_flag & (NCF_ISMOUNTPT)) 4021 return (EBUSY); 4022 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 4023 return (error); 4024 error = VOP_NRMDIR(&nd->nl_nch, nd->nl_dvp, nd->nl_cred); 4025 return (error); 4026 } 4027 4028 /* 4029 * rmdir_args(char *path) 4030 * 4031 * Remove a directory file. 4032 */ 4033 int 4034 sys_rmdir(struct rmdir_args *uap) 4035 { 4036 struct nlookupdata nd; 4037 int error; 4038 4039 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 4040 if (error == 0) 4041 error = kern_rmdir(&nd); 4042 nlookup_done(&nd); 4043 return (error); 4044 } 4045 4046 int 4047 kern_getdirentries(int fd, char *buf, u_int count, long *basep, int *res, 4048 enum uio_seg direction) 4049 { 4050 struct thread *td = curthread; 4051 struct proc *p = td->td_proc; 4052 struct vnode *vp; 4053 struct file *fp; 4054 struct uio auio; 4055 struct iovec aiov; 4056 off_t loff; 4057 int error, eofflag; 4058 4059 if ((error = holdvnode(p->p_fd, fd, &fp)) != 0) 4060 return (error); 4061 if ((fp->f_flag & FREAD) == 0) { 4062 error = EBADF; 4063 goto done; 4064 } 4065 vp = (struct vnode *)fp->f_data; 4066 unionread: 4067 if (vp->v_type != VDIR) { 4068 error = EINVAL; 4069 goto done; 4070 } 4071 aiov.iov_base = buf; 4072 aiov.iov_len = count; 4073 auio.uio_iov = &aiov; 4074 auio.uio_iovcnt = 1; 4075 auio.uio_rw = UIO_READ; 4076 auio.uio_segflg = direction; 4077 auio.uio_td = td; 4078 auio.uio_resid = count; 4079 loff = auio.uio_offset = fp->f_offset; 4080 error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, NULL, NULL); 4081 fp->f_offset = auio.uio_offset; 4082 if (error) 4083 goto done; 4084 if (count == auio.uio_resid) { 4085 if (union_dircheckp) { 4086 error = union_dircheckp(td, &vp, fp); 4087 if (error == -1) 4088 goto unionread; 4089 if (error) 4090 goto done; 4091 } 4092 #if 0 4093 if ((vp->v_flag & VROOT) && 4094 (vp->v_mount->mnt_flag & MNT_UNION)) { 4095 struct vnode *tvp = vp; 4096 vp = vp->v_mount->mnt_vnodecovered; 4097 vref(vp); 4098 fp->f_data = vp; 4099 fp->f_offset = 0; 4100 vrele(tvp); 4101 goto unionread; 4102 } 4103 #endif 4104 } 4105 4106 /* 4107 * WARNING! *basep may not be wide enough to accomodate the 4108 * seek offset. XXX should we hack this to return the upper 32 bits 4109 * for offsets greater then 4G? 4110 */ 4111 if (basep) { 4112 *basep = (long)loff; 4113 } 4114 *res = count - auio.uio_resid; 4115 done: 4116 fdrop(fp); 4117 return (error); 4118 } 4119 4120 /* 4121 * getdirentries_args(int fd, char *buf, u_int conut, long *basep) 4122 * 4123 * Read a block of directory entries in a file system independent format. 4124 */ 4125 int 4126 sys_getdirentries(struct getdirentries_args *uap) 4127 { 4128 long base; 4129 int error; 4130 4131 error = kern_getdirentries(uap->fd, uap->buf, uap->count, &base, 4132 &uap->sysmsg_result, UIO_USERSPACE); 4133 4134 if (error == 0 && uap->basep) 4135 error = copyout(&base, uap->basep, sizeof(*uap->basep)); 4136 return (error); 4137 } 4138 4139 /* 4140 * getdents_args(int fd, char *buf, size_t count) 4141 */ 4142 int 4143 sys_getdents(struct getdents_args *uap) 4144 { 4145 int error; 4146 4147 error = kern_getdirentries(uap->fd, uap->buf, uap->count, NULL, 4148 &uap->sysmsg_result, UIO_USERSPACE); 4149 4150 return (error); 4151 } 4152 4153 /* 4154 * Set the mode mask for creation of filesystem nodes. 4155 * 4156 * umask(int newmask) 4157 */ 4158 int 4159 sys_umask(struct umask_args *uap) 4160 { 4161 struct thread *td = curthread; 4162 struct proc *p = td->td_proc; 4163 struct filedesc *fdp; 4164 4165 fdp = p->p_fd; 4166 uap->sysmsg_result = fdp->fd_cmask; 4167 fdp->fd_cmask = uap->newmask & ALLPERMS; 4168 return (0); 4169 } 4170 4171 /* 4172 * revoke(char *path) 4173 * 4174 * Void all references to file by ripping underlying filesystem 4175 * away from vnode. 4176 */ 4177 int 4178 sys_revoke(struct revoke_args *uap) 4179 { 4180 struct nlookupdata nd; 4181 struct vattr vattr; 4182 struct vnode *vp; 4183 struct ucred *cred; 4184 int error; 4185 4186 vp = NULL; 4187 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 4188 if (error == 0) 4189 error = nlookup(&nd); 4190 if (error == 0) 4191 error = cache_vref(&nd.nl_nch, nd.nl_cred, &vp); 4192 cred = crhold(nd.nl_cred); 4193 nlookup_done(&nd); 4194 if (error == 0) { 4195 if (error == 0) 4196 error = VOP_GETATTR(vp, &vattr); 4197 if (error == 0 && cred->cr_uid != vattr.va_uid) 4198 error = priv_check_cred(cred, PRIV_VFS_REVOKE, 0); 4199 if (error == 0 && (vp->v_type == VCHR || vp->v_type == VBLK)) { 4200 if (vcount(vp) > 0) 4201 error = vrevoke(vp, cred); 4202 } else if (error == 0) { 4203 error = vrevoke(vp, cred); 4204 } 4205 vrele(vp); 4206 } 4207 if (cred) 4208 crfree(cred); 4209 return (error); 4210 } 4211 4212 /* 4213 * getfh_args(char *fname, fhandle_t *fhp) 4214 * 4215 * Get (NFS) file handle 4216 * 4217 * NOTE: We use the fsid of the covering mount, even if it is a nullfs 4218 * mount. This allows nullfs mounts to be explicitly exported. 4219 * 4220 * WARNING: nullfs mounts of HAMMER PFS ROOTs are safe. 4221 * 4222 * nullfs mounts of subdirectories are not safe. That is, it will 4223 * work, but you do not really have protection against access to 4224 * the related parent directories. 4225 */ 4226 int 4227 sys_getfh(struct getfh_args *uap) 4228 { 4229 struct thread *td = curthread; 4230 struct nlookupdata nd; 4231 fhandle_t fh; 4232 struct vnode *vp; 4233 struct mount *mp; 4234 int error; 4235 4236 /* 4237 * Must be super user 4238 */ 4239 if ((error = priv_check(td, PRIV_ROOT)) != 0) 4240 return (error); 4241 4242 vp = NULL; 4243 error = nlookup_init(&nd, uap->fname, UIO_USERSPACE, NLC_FOLLOW); 4244 if (error == 0) 4245 error = nlookup(&nd); 4246 if (error == 0) 4247 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp); 4248 mp = nd.nl_nch.mount; 4249 nlookup_done(&nd); 4250 if (error == 0) { 4251 bzero(&fh, sizeof(fh)); 4252 fh.fh_fsid = mp->mnt_stat.f_fsid; 4253 error = VFS_VPTOFH(vp, &fh.fh_fid); 4254 vput(vp); 4255 if (error == 0) 4256 error = copyout(&fh, uap->fhp, sizeof(fh)); 4257 } 4258 return (error); 4259 } 4260 4261 /* 4262 * fhopen_args(const struct fhandle *u_fhp, int flags) 4263 * 4264 * syscall for the rpc.lockd to use to translate a NFS file handle into 4265 * an open descriptor. 4266 * 4267 * warning: do not remove the priv_check() call or this becomes one giant 4268 * security hole. 4269 */ 4270 int 4271 sys_fhopen(struct fhopen_args *uap) 4272 { 4273 struct thread *td = curthread; 4274 struct filedesc *fdp = td->td_proc->p_fd; 4275 struct mount *mp; 4276 struct vnode *vp; 4277 struct fhandle fhp; 4278 struct vattr vat; 4279 struct vattr *vap = &vat; 4280 struct flock lf; 4281 int fmode, mode, error = 0, type; 4282 struct file *nfp; 4283 struct file *fp; 4284 int indx; 4285 4286 /* 4287 * Must be super user 4288 */ 4289 error = priv_check(td, PRIV_ROOT); 4290 if (error) 4291 return (error); 4292 4293 fmode = FFLAGS(uap->flags); 4294 4295 /* 4296 * Why not allow a non-read/write open for our lockd? 4297 */ 4298 if (((fmode & (FREAD | FWRITE)) == 0) || (fmode & O_CREAT)) 4299 return (EINVAL); 4300 error = copyin(uap->u_fhp, &fhp, sizeof(fhp)); 4301 if (error) 4302 return(error); 4303 4304 /* 4305 * Find the mount point 4306 */ 4307 mp = vfs_getvfs(&fhp.fh_fsid); 4308 if (mp == NULL) { 4309 error = ESTALE; 4310 goto done; 4311 } 4312 /* now give me my vnode, it gets returned to me locked */ 4313 error = VFS_FHTOVP(mp, NULL, &fhp.fh_fid, &vp); 4314 if (error) 4315 goto done; 4316 /* 4317 * from now on we have to make sure not 4318 * to forget about the vnode 4319 * any error that causes an abort must vput(vp) 4320 * just set error = err and 'goto bad;'. 4321 */ 4322 4323 /* 4324 * from vn_open 4325 */ 4326 if (vp->v_type == VLNK) { 4327 error = EMLINK; 4328 goto bad; 4329 } 4330 if (vp->v_type == VSOCK) { 4331 error = EOPNOTSUPP; 4332 goto bad; 4333 } 4334 mode = 0; 4335 if (fmode & (FWRITE | O_TRUNC)) { 4336 if (vp->v_type == VDIR) { 4337 error = EISDIR; 4338 goto bad; 4339 } 4340 error = vn_writechk(vp, NULL); 4341 if (error) 4342 goto bad; 4343 mode |= VWRITE; 4344 } 4345 if (fmode & FREAD) 4346 mode |= VREAD; 4347 if (mode) { 4348 error = VOP_ACCESS(vp, mode, td->td_ucred); 4349 if (error) 4350 goto bad; 4351 } 4352 if (fmode & O_TRUNC) { 4353 vn_unlock(vp); /* XXX */ 4354 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX */ 4355 VATTR_NULL(vap); 4356 vap->va_size = 0; 4357 error = VOP_SETATTR(vp, vap, td->td_ucred); 4358 if (error) 4359 goto bad; 4360 } 4361 4362 /* 4363 * VOP_OPEN needs the file pointer so it can potentially override 4364 * it. 4365 * 4366 * WARNING! no f_nchandle will be associated when fhopen()ing a 4367 * directory. XXX 4368 */ 4369 if ((error = falloc(td->td_lwp, &nfp, &indx)) != 0) 4370 goto bad; 4371 fp = nfp; 4372 4373 error = VOP_OPEN(vp, fmode, td->td_ucred, fp); 4374 if (error) { 4375 /* 4376 * setting f_ops this way prevents VOP_CLOSE from being 4377 * called or fdrop() releasing the vp from v_data. Since 4378 * the VOP_OPEN failed we don't want to VOP_CLOSE. 4379 */ 4380 fp->f_ops = &badfileops; 4381 fp->f_data = NULL; 4382 goto bad_drop; 4383 } 4384 4385 /* 4386 * The fp is given its own reference, we still have our ref and lock. 4387 * 4388 * Assert that all regular files must be created with a VM object. 4389 */ 4390 if (vp->v_type == VREG && vp->v_object == NULL) { 4391 kprintf("fhopen: regular file did not have VM object: %p\n", vp); 4392 goto bad_drop; 4393 } 4394 4395 /* 4396 * The open was successful. Handle any locking requirements. 4397 */ 4398 if (fmode & (O_EXLOCK | O_SHLOCK)) { 4399 lf.l_whence = SEEK_SET; 4400 lf.l_start = 0; 4401 lf.l_len = 0; 4402 if (fmode & O_EXLOCK) 4403 lf.l_type = F_WRLCK; 4404 else 4405 lf.l_type = F_RDLCK; 4406 if (fmode & FNONBLOCK) 4407 type = 0; 4408 else 4409 type = F_WAIT; 4410 vn_unlock(vp); 4411 if ((error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type)) != 0) { 4412 /* 4413 * release our private reference. 4414 */ 4415 fsetfd(fdp, NULL, indx); 4416 fdrop(fp); 4417 vrele(vp); 4418 goto done; 4419 } 4420 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4421 fp->f_flag |= FHASLOCK; 4422 } 4423 4424 /* 4425 * Clean up. Associate the file pointer with the previously 4426 * reserved descriptor and return it. 4427 */ 4428 vput(vp); 4429 fsetfd(fdp, fp, indx); 4430 fdrop(fp); 4431 uap->sysmsg_result = indx; 4432 if (uap->flags & O_CLOEXEC) 4433 error = fsetfdflags(fdp, indx, UF_EXCLOSE); 4434 return (error); 4435 4436 bad_drop: 4437 fsetfd(fdp, NULL, indx); 4438 fdrop(fp); 4439 bad: 4440 vput(vp); 4441 done: 4442 return (error); 4443 } 4444 4445 /* 4446 * fhstat_args(struct fhandle *u_fhp, struct stat *sb) 4447 */ 4448 int 4449 sys_fhstat(struct fhstat_args *uap) 4450 { 4451 struct thread *td = curthread; 4452 struct stat sb; 4453 fhandle_t fh; 4454 struct mount *mp; 4455 struct vnode *vp; 4456 int error; 4457 4458 /* 4459 * Must be super user 4460 */ 4461 error = priv_check(td, PRIV_ROOT); 4462 if (error) 4463 return (error); 4464 4465 error = copyin(uap->u_fhp, &fh, sizeof(fhandle_t)); 4466 if (error) 4467 return (error); 4468 4469 if ((mp = vfs_getvfs(&fh.fh_fsid)) == NULL) 4470 error = ESTALE; 4471 if (error == 0) { 4472 if ((error = VFS_FHTOVP(mp, NULL, &fh.fh_fid, &vp)) == 0) { 4473 error = vn_stat(vp, &sb, td->td_ucred); 4474 vput(vp); 4475 } 4476 } 4477 if (error == 0) 4478 error = copyout(&sb, uap->sb, sizeof(sb)); 4479 return (error); 4480 } 4481 4482 /* 4483 * fhstatfs_args(struct fhandle *u_fhp, struct statfs *buf) 4484 */ 4485 int 4486 sys_fhstatfs(struct fhstatfs_args *uap) 4487 { 4488 struct thread *td = curthread; 4489 struct proc *p = td->td_proc; 4490 struct statfs *sp; 4491 struct mount *mp; 4492 struct vnode *vp; 4493 struct statfs sb; 4494 char *fullpath, *freepath; 4495 fhandle_t fh; 4496 int error; 4497 4498 /* 4499 * Must be super user 4500 */ 4501 if ((error = priv_check(td, PRIV_ROOT))) 4502 return (error); 4503 4504 if ((error = copyin(uap->u_fhp, &fh, sizeof(fhandle_t))) != 0) 4505 return (error); 4506 4507 if ((mp = vfs_getvfs(&fh.fh_fsid)) == NULL) { 4508 error = ESTALE; 4509 goto done; 4510 } 4511 if (p != NULL && !chroot_visible_mnt(mp, p)) { 4512 error = ESTALE; 4513 goto done; 4514 } 4515 4516 if ((error = VFS_FHTOVP(mp, NULL, &fh.fh_fid, &vp)) != 0) 4517 goto done; 4518 mp = vp->v_mount; 4519 sp = &mp->mnt_stat; 4520 vput(vp); 4521 if ((error = VFS_STATFS(mp, sp, td->td_ucred)) != 0) 4522 goto done; 4523 4524 error = mount_path(p, mp, &fullpath, &freepath); 4525 if (error) 4526 goto done; 4527 bzero(sp->f_mntonname, sizeof(sp->f_mntonname)); 4528 strlcpy(sp->f_mntonname, fullpath, sizeof(sp->f_mntonname)); 4529 kfree(freepath, M_TEMP); 4530 4531 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK; 4532 if (priv_check(td, PRIV_ROOT)) { 4533 bcopy(sp, &sb, sizeof(sb)); 4534 sb.f_fsid.val[0] = sb.f_fsid.val[1] = 0; 4535 sp = &sb; 4536 } 4537 error = copyout(sp, uap->buf, sizeof(*sp)); 4538 done: 4539 return (error); 4540 } 4541 4542 /* 4543 * fhstatvfs_args(struct fhandle *u_fhp, struct statvfs *buf) 4544 */ 4545 int 4546 sys_fhstatvfs(struct fhstatvfs_args *uap) 4547 { 4548 struct thread *td = curthread; 4549 struct proc *p = td->td_proc; 4550 struct statvfs *sp; 4551 struct mount *mp; 4552 struct vnode *vp; 4553 fhandle_t fh; 4554 int error; 4555 4556 /* 4557 * Must be super user 4558 */ 4559 if ((error = priv_check(td, PRIV_ROOT))) 4560 return (error); 4561 4562 if ((error = copyin(uap->u_fhp, &fh, sizeof(fhandle_t))) != 0) 4563 return (error); 4564 4565 if ((mp = vfs_getvfs(&fh.fh_fsid)) == NULL) { 4566 error = ESTALE; 4567 goto done; 4568 } 4569 if (p != NULL && !chroot_visible_mnt(mp, p)) { 4570 error = ESTALE; 4571 goto done; 4572 } 4573 4574 if ((error = VFS_FHTOVP(mp, NULL, &fh.fh_fid, &vp))) 4575 goto done; 4576 mp = vp->v_mount; 4577 sp = &mp->mnt_vstat; 4578 vput(vp); 4579 if ((error = VFS_STATVFS(mp, sp, td->td_ucred)) != 0) 4580 goto done; 4581 4582 sp->f_flag = 0; 4583 if (mp->mnt_flag & MNT_RDONLY) 4584 sp->f_flag |= ST_RDONLY; 4585 if (mp->mnt_flag & MNT_NOSUID) 4586 sp->f_flag |= ST_NOSUID; 4587 error = copyout(sp, uap->buf, sizeof(*sp)); 4588 done: 4589 return (error); 4590 } 4591 4592 4593 /* 4594 * Syscall to push extended attribute configuration information into the 4595 * VFS. Accepts a path, which it converts to a mountpoint, as well as 4596 * a command (int cmd), and attribute name and misc data. For now, the 4597 * attribute name is left in userspace for consumption by the VFS_op. 4598 * It will probably be changed to be copied into sysspace by the 4599 * syscall in the future, once issues with various consumers of the 4600 * attribute code have raised their hands. 4601 * 4602 * Currently this is used only by UFS Extended Attributes. 4603 */ 4604 int 4605 sys_extattrctl(struct extattrctl_args *uap) 4606 { 4607 struct nlookupdata nd; 4608 struct vnode *vp; 4609 char attrname[EXTATTR_MAXNAMELEN]; 4610 int error; 4611 size_t size; 4612 4613 attrname[0] = 0; 4614 vp = NULL; 4615 error = 0; 4616 4617 if (error == 0 && uap->filename) { 4618 error = nlookup_init(&nd, uap->filename, UIO_USERSPACE, 4619 NLC_FOLLOW); 4620 if (error == 0) 4621 error = nlookup(&nd); 4622 if (error == 0) 4623 error = cache_vref(&nd.nl_nch, nd.nl_cred, &vp); 4624 nlookup_done(&nd); 4625 } 4626 4627 if (error == 0 && uap->attrname) { 4628 error = copyinstr(uap->attrname, attrname, EXTATTR_MAXNAMELEN, 4629 &size); 4630 } 4631 4632 if (error == 0) { 4633 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 4634 if (error == 0) 4635 error = nlookup(&nd); 4636 if (error == 0) 4637 error = ncp_writechk(&nd.nl_nch); 4638 if (error == 0) { 4639 error = VFS_EXTATTRCTL(nd.nl_nch.mount, uap->cmd, vp, 4640 uap->attrnamespace, 4641 uap->attrname, nd.nl_cred); 4642 } 4643 nlookup_done(&nd); 4644 } 4645 4646 return (error); 4647 } 4648 4649 /* 4650 * Syscall to get a named extended attribute on a file or directory. 4651 */ 4652 int 4653 sys_extattr_set_file(struct extattr_set_file_args *uap) 4654 { 4655 char attrname[EXTATTR_MAXNAMELEN]; 4656 struct nlookupdata nd; 4657 struct vnode *vp; 4658 struct uio auio; 4659 struct iovec aiov; 4660 int error; 4661 4662 error = copyin(uap->attrname, attrname, EXTATTR_MAXNAMELEN); 4663 if (error) 4664 return (error); 4665 4666 vp = NULL; 4667 4668 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 4669 if (error == 0) 4670 error = nlookup(&nd); 4671 if (error == 0) 4672 error = ncp_writechk(&nd.nl_nch); 4673 if (error == 0) 4674 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp); 4675 if (error) { 4676 nlookup_done(&nd); 4677 return (error); 4678 } 4679 4680 bzero(&auio, sizeof(auio)); 4681 aiov.iov_base = uap->data; 4682 aiov.iov_len = uap->nbytes; 4683 auio.uio_iov = &aiov; 4684 auio.uio_iovcnt = 1; 4685 auio.uio_offset = 0; 4686 auio.uio_resid = uap->nbytes; 4687 auio.uio_rw = UIO_WRITE; 4688 auio.uio_td = curthread; 4689 4690 error = VOP_SETEXTATTR(vp, uap->attrnamespace, attrname, 4691 &auio, nd.nl_cred); 4692 4693 vput(vp); 4694 nlookup_done(&nd); 4695 return (error); 4696 } 4697 4698 /* 4699 * Syscall to get a named extended attribute on a file or directory. 4700 */ 4701 int 4702 sys_extattr_get_file(struct extattr_get_file_args *uap) 4703 { 4704 char attrname[EXTATTR_MAXNAMELEN]; 4705 struct nlookupdata nd; 4706 struct uio auio; 4707 struct iovec aiov; 4708 struct vnode *vp; 4709 int error; 4710 4711 error = copyin(uap->attrname, attrname, EXTATTR_MAXNAMELEN); 4712 if (error) 4713 return (error); 4714 4715 vp = NULL; 4716 4717 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 4718 if (error == 0) 4719 error = nlookup(&nd); 4720 if (error == 0) 4721 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp); 4722 if (error) { 4723 nlookup_done(&nd); 4724 return (error); 4725 } 4726 4727 bzero(&auio, sizeof(auio)); 4728 aiov.iov_base = uap->data; 4729 aiov.iov_len = uap->nbytes; 4730 auio.uio_iov = &aiov; 4731 auio.uio_iovcnt = 1; 4732 auio.uio_offset = 0; 4733 auio.uio_resid = uap->nbytes; 4734 auio.uio_rw = UIO_READ; 4735 auio.uio_td = curthread; 4736 4737 error = VOP_GETEXTATTR(vp, uap->attrnamespace, attrname, 4738 &auio, nd.nl_cred); 4739 uap->sysmsg_result = uap->nbytes - auio.uio_resid; 4740 4741 vput(vp); 4742 nlookup_done(&nd); 4743 return(error); 4744 } 4745 4746 /* 4747 * Syscall to delete a named extended attribute from a file or directory. 4748 * Accepts attribute name. The real work happens in VOP_SETEXTATTR(). 4749 */ 4750 int 4751 sys_extattr_delete_file(struct extattr_delete_file_args *uap) 4752 { 4753 char attrname[EXTATTR_MAXNAMELEN]; 4754 struct nlookupdata nd; 4755 struct vnode *vp; 4756 int error; 4757 4758 error = copyin(uap->attrname, attrname, EXTATTR_MAXNAMELEN); 4759 if (error) 4760 return(error); 4761 4762 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 4763 if (error == 0) 4764 error = nlookup(&nd); 4765 if (error == 0) 4766 error = ncp_writechk(&nd.nl_nch); 4767 if (error == 0) { 4768 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp); 4769 if (error == 0) { 4770 error = VOP_SETEXTATTR(vp, uap->attrnamespace, 4771 attrname, NULL, nd.nl_cred); 4772 vput(vp); 4773 } 4774 } 4775 nlookup_done(&nd); 4776 return(error); 4777 } 4778 4779 /* 4780 * Determine if the mount is visible to the process. 4781 */ 4782 static int 4783 chroot_visible_mnt(struct mount *mp, struct proc *p) 4784 { 4785 struct nchandle nch; 4786 4787 /* 4788 * Traverse from the mount point upwards. If we hit the process 4789 * root then the mount point is visible to the process. 4790 */ 4791 nch = mp->mnt_ncmountpt; 4792 while (nch.ncp) { 4793 if (nch.mount == p->p_fd->fd_nrdir.mount && 4794 nch.ncp == p->p_fd->fd_nrdir.ncp) { 4795 return(1); 4796 } 4797 if (nch.ncp == nch.mount->mnt_ncmountpt.ncp) { 4798 nch = nch.mount->mnt_ncmounton; 4799 } else { 4800 nch.ncp = nch.ncp->nc_parent; 4801 } 4802 } 4803 4804 /* 4805 * If the mount point is not visible to the process, but the 4806 * process root is in a subdirectory of the mount, return 4807 * TRUE anyway. 4808 */ 4809 if (p->p_fd->fd_nrdir.mount == mp) 4810 return(1); 4811 4812 return(0); 4813 } 4814 4815