1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)vfs_syscalls.c 8.13 (Berkeley) 4/15/94 39 * $FreeBSD: src/sys/kern/vfs_syscalls.c,v 1.151.2.18 2003/04/04 20:35:58 tegge Exp $ 40 */ 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/buf.h> 45 #include <sys/conf.h> 46 #include <sys/sysent.h> 47 #include <sys/malloc.h> 48 #include <sys/mount.h> 49 #include <sys/mountctl.h> 50 #include <sys/sysproto.h> 51 #include <sys/filedesc.h> 52 #include <sys/kernel.h> 53 #include <sys/fcntl.h> 54 #include <sys/file.h> 55 #include <sys/linker.h> 56 #include <sys/stat.h> 57 #include <sys/unistd.h> 58 #include <sys/vnode.h> 59 #include <sys/proc.h> 60 #include <sys/priv.h> 61 #include <sys/jail.h> 62 #include <sys/namei.h> 63 #include <sys/nlookup.h> 64 #include <sys/dirent.h> 65 #include <sys/extattr.h> 66 #include <sys/spinlock.h> 67 #include <sys/kern_syscall.h> 68 #include <sys/objcache.h> 69 #include <sys/sysctl.h> 70 71 #include <sys/buf2.h> 72 #include <sys/file2.h> 73 #include <sys/spinlock2.h> 74 #include <sys/mplock2.h> 75 76 #include <vm/vm.h> 77 #include <vm/vm_object.h> 78 #include <vm/vm_page.h> 79 80 #include <machine/limits.h> 81 #include <machine/stdarg.h> 82 83 #include <vfs/union/union.h> 84 85 static void mount_warning(struct mount *mp, const char *ctl, ...) 86 __printflike(2, 3); 87 static int mount_path(struct proc *p, struct mount *mp, char **rb, char **fb); 88 static int checkvp_chdir (struct vnode *vn, struct thread *td); 89 static void checkdirs (struct nchandle *old_nch, struct nchandle *new_nch); 90 static int chroot_refuse_vdir_fds (struct filedesc *fdp); 91 static int chroot_visible_mnt(struct mount *mp, struct proc *p); 92 static int getutimes (const struct timeval *, struct timespec *); 93 static int setfown (struct mount *, struct vnode *, uid_t, gid_t); 94 static int setfmode (struct vnode *, int); 95 static int setfflags (struct vnode *, int); 96 static int setutimes (struct vnode *, struct vattr *, 97 const struct timespec *, int); 98 static int usermount = 0; /* if 1, non-root can mount fs. */ 99 100 int (*union_dircheckp) (struct thread *, struct vnode **, struct file *); 101 102 SYSCTL_INT(_vfs, OID_AUTO, usermount, CTLFLAG_RW, &usermount, 0, 103 "Allow non-root users to mount filesystems"); 104 105 /* 106 * Virtual File System System Calls 107 */ 108 109 /* 110 * Mount a file system. 111 * 112 * mount_args(char *type, char *path, int flags, caddr_t data) 113 * 114 * MPALMOSTSAFE 115 */ 116 int 117 sys_mount(struct mount_args *uap) 118 { 119 struct thread *td = curthread; 120 struct vnode *vp; 121 struct nchandle nch; 122 struct mount *mp, *nullmp; 123 struct vfsconf *vfsp; 124 int error, flag = 0, flag2 = 0; 125 int hasmount; 126 struct vattr va; 127 struct nlookupdata nd; 128 char fstypename[MFSNAMELEN]; 129 struct ucred *cred; 130 131 cred = td->td_ucred; 132 if (jailed(cred)) { 133 error = EPERM; 134 goto done; 135 } 136 if (usermount == 0 && (error = priv_check(td, PRIV_ROOT))) 137 goto done; 138 139 /* 140 * Do not allow NFS export by non-root users. 141 */ 142 if (uap->flags & MNT_EXPORTED) { 143 error = priv_check(td, PRIV_ROOT); 144 if (error) 145 goto done; 146 } 147 /* 148 * Silently enforce MNT_NOSUID and MNT_NODEV for non-root users 149 */ 150 if (priv_check(td, PRIV_ROOT)) 151 uap->flags |= MNT_NOSUID | MNT_NODEV; 152 153 /* 154 * Lookup the requested path and extract the nch and vnode. 155 */ 156 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 157 if (error == 0) { 158 if ((error = nlookup(&nd)) == 0) { 159 if (nd.nl_nch.ncp->nc_vp == NULL) 160 error = ENOENT; 161 } 162 } 163 if (error) { 164 nlookup_done(&nd); 165 goto done; 166 } 167 168 /* 169 * If the target filesystem is resolved via a nullfs mount, then 170 * nd.nl_nch.mount will be pointing to the nullfs mount structure 171 * instead of the target file system. We need it in case we are 172 * doing an update. 173 */ 174 nullmp = nd.nl_nch.mount; 175 176 /* 177 * Extract the locked+refd ncp and cleanup the nd structure 178 */ 179 nch = nd.nl_nch; 180 cache_zero(&nd.nl_nch); 181 nlookup_done(&nd); 182 183 if ((nch.ncp->nc_flag & NCF_ISMOUNTPT) && 184 (mp = cache_findmount(&nch)) != NULL) { 185 cache_dropmount(mp); 186 hasmount = 1; 187 } else { 188 hasmount = 0; 189 } 190 191 192 /* 193 * now we have the locked ref'd nch and unreferenced vnode. 194 */ 195 vp = nch.ncp->nc_vp; 196 if ((error = vget(vp, LK_EXCLUSIVE)) != 0) { 197 cache_put(&nch); 198 goto done; 199 } 200 cache_unlock(&nch); 201 202 /* 203 * Extract the file system type. We need to know this early, to take 204 * appropriate actions if we are dealing with a nullfs. 205 */ 206 if ((error = copyinstr(uap->type, fstypename, MFSNAMELEN, NULL)) != 0) { 207 cache_drop(&nch); 208 vput(vp); 209 goto done; 210 } 211 212 /* 213 * Now we have an unlocked ref'd nch and a locked ref'd vp 214 */ 215 if (uap->flags & MNT_UPDATE) { 216 if ((vp->v_flag & (VROOT|VPFSROOT)) == 0) { 217 cache_drop(&nch); 218 vput(vp); 219 error = EINVAL; 220 goto done; 221 } 222 223 if (strncmp(fstypename, "null", 5) == 0) { 224 KKASSERT(nullmp); 225 mp = nullmp; 226 } else { 227 mp = vp->v_mount; 228 } 229 230 flag = mp->mnt_flag; 231 flag2 = mp->mnt_kern_flag; 232 /* 233 * We only allow the filesystem to be reloaded if it 234 * is currently mounted read-only. 235 */ 236 if ((uap->flags & MNT_RELOAD) && 237 ((mp->mnt_flag & MNT_RDONLY) == 0)) { 238 cache_drop(&nch); 239 vput(vp); 240 error = EOPNOTSUPP; /* Needs translation */ 241 goto done; 242 } 243 /* 244 * Only root, or the user that did the original mount is 245 * permitted to update it. 246 */ 247 if (mp->mnt_stat.f_owner != cred->cr_uid && 248 (error = priv_check(td, PRIV_ROOT))) { 249 cache_drop(&nch); 250 vput(vp); 251 goto done; 252 } 253 if (vfs_busy(mp, LK_NOWAIT)) { 254 cache_drop(&nch); 255 vput(vp); 256 error = EBUSY; 257 goto done; 258 } 259 if (hasmount) { 260 cache_drop(&nch); 261 vfs_unbusy(mp); 262 vput(vp); 263 error = EBUSY; 264 goto done; 265 } 266 mp->mnt_flag |= 267 uap->flags & (MNT_RELOAD | MNT_FORCE | MNT_UPDATE); 268 lwkt_gettoken(&mp->mnt_token); 269 vn_unlock(vp); 270 goto update; 271 } 272 273 /* 274 * If the user is not root, ensure that they own the directory 275 * onto which we are attempting to mount. 276 */ 277 if ((error = VOP_GETATTR(vp, &va)) || 278 (va.va_uid != cred->cr_uid && 279 (error = priv_check(td, PRIV_ROOT)))) { 280 cache_drop(&nch); 281 vput(vp); 282 goto done; 283 } 284 if ((error = vinvalbuf(vp, V_SAVE, 0, 0)) != 0) { 285 cache_drop(&nch); 286 vput(vp); 287 goto done; 288 } 289 if (vp->v_type != VDIR) { 290 cache_drop(&nch); 291 vput(vp); 292 error = ENOTDIR; 293 goto done; 294 } 295 if (vp->v_mount->mnt_kern_flag & MNTK_NOSTKMNT) { 296 cache_drop(&nch); 297 vput(vp); 298 error = EPERM; 299 goto done; 300 } 301 vfsp = vfsconf_find_by_name(fstypename); 302 if (vfsp == NULL) { 303 linker_file_t lf; 304 305 /* Only load modules for root (very important!) */ 306 if ((error = priv_check(td, PRIV_ROOT)) != 0) { 307 cache_drop(&nch); 308 vput(vp); 309 goto done; 310 } 311 error = linker_load_file(fstypename, &lf); 312 if (error || lf == NULL) { 313 cache_drop(&nch); 314 vput(vp); 315 if (lf == NULL) 316 error = ENODEV; 317 goto done; 318 } 319 lf->userrefs++; 320 /* lookup again, see if the VFS was loaded */ 321 vfsp = vfsconf_find_by_name(fstypename); 322 if (vfsp == NULL) { 323 lf->userrefs--; 324 linker_file_unload(lf); 325 cache_drop(&nch); 326 vput(vp); 327 error = ENODEV; 328 goto done; 329 } 330 } 331 if (hasmount) { 332 cache_drop(&nch); 333 vput(vp); 334 error = EBUSY; 335 goto done; 336 } 337 338 /* 339 * Allocate and initialize the filesystem. 340 */ 341 mp = kmalloc(sizeof(struct mount), M_MOUNT, M_ZERO|M_WAITOK); 342 mount_init(mp); 343 vfs_busy(mp, LK_NOWAIT); 344 mp->mnt_op = vfsp->vfc_vfsops; 345 mp->mnt_vfc = vfsp; 346 vfsp->vfc_refcount++; 347 mp->mnt_stat.f_type = vfsp->vfc_typenum; 348 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; 349 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 350 mp->mnt_stat.f_owner = cred->cr_uid; 351 lwkt_gettoken(&mp->mnt_token); 352 vn_unlock(vp); 353 update: 354 /* 355 * (per-mount token acquired at this point) 356 * 357 * Set the mount level flags. 358 */ 359 if (uap->flags & MNT_RDONLY) 360 mp->mnt_flag |= MNT_RDONLY; 361 else if (mp->mnt_flag & MNT_RDONLY) 362 mp->mnt_kern_flag |= MNTK_WANTRDWR; 363 mp->mnt_flag &=~ (MNT_NOSUID | MNT_NOEXEC | MNT_NODEV | 364 MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_NOATIME | 365 MNT_NOSYMFOLLOW | MNT_IGNORE | MNT_TRIM | 366 MNT_NOCLUSTERR | MNT_NOCLUSTERW | MNT_SUIDDIR); 367 mp->mnt_flag |= uap->flags & (MNT_NOSUID | MNT_NOEXEC | 368 MNT_NODEV | MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_FORCE | 369 MNT_NOSYMFOLLOW | MNT_IGNORE | MNT_TRIM | 370 MNT_NOATIME | MNT_NOCLUSTERR | MNT_NOCLUSTERW | MNT_SUIDDIR); 371 /* 372 * Mount the filesystem. 373 * XXX The final recipients of VFS_MOUNT just overwrite the ndp they 374 * get. 375 */ 376 error = VFS_MOUNT(mp, uap->path, uap->data, cred); 377 if (mp->mnt_flag & MNT_UPDATE) { 378 if (mp->mnt_kern_flag & MNTK_WANTRDWR) 379 mp->mnt_flag &= ~MNT_RDONLY; 380 mp->mnt_flag &=~ (MNT_UPDATE | MNT_RELOAD | MNT_FORCE); 381 mp->mnt_kern_flag &=~ MNTK_WANTRDWR; 382 if (error) { 383 mp->mnt_flag = flag; 384 mp->mnt_kern_flag = flag2; 385 } 386 lwkt_reltoken(&mp->mnt_token); 387 vfs_unbusy(mp); 388 vrele(vp); 389 cache_drop(&nch); 390 goto done; 391 } 392 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 393 394 /* 395 * Put the new filesystem on the mount list after root. The mount 396 * point gets its own mnt_ncmountpt (unless the VFS already set one 397 * up) which represents the root of the mount. The lookup code 398 * detects the mount point going forward and checks the root of 399 * the mount going backwards. 400 * 401 * It is not necessary to invalidate or purge the vnode underneath 402 * because elements under the mount will be given their own glue 403 * namecache record. 404 */ 405 if (!error) { 406 if (mp->mnt_ncmountpt.ncp == NULL) { 407 /* 408 * allocate, then unlock, but leave the ref intact 409 */ 410 cache_allocroot(&mp->mnt_ncmountpt, mp, NULL); 411 cache_unlock(&mp->mnt_ncmountpt); 412 } 413 mp->mnt_ncmounton = nch; /* inherits ref */ 414 nch.ncp->nc_flag |= NCF_ISMOUNTPT; 415 cache_ismounting(mp); 416 417 mountlist_insert(mp, MNTINS_LAST); 418 vn_unlock(vp); 419 checkdirs(&mp->mnt_ncmounton, &mp->mnt_ncmountpt); 420 error = vfs_allocate_syncvnode(mp); 421 lwkt_reltoken(&mp->mnt_token); 422 vfs_unbusy(mp); 423 error = VFS_START(mp, 0); 424 vrele(vp); 425 } else { 426 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_coherency_ops); 427 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_journal_ops); 428 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_norm_ops); 429 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_spec_ops); 430 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_fifo_ops); 431 mp->mnt_vfc->vfc_refcount--; 432 lwkt_reltoken(&mp->mnt_token); 433 vfs_unbusy(mp); 434 kfree(mp, M_MOUNT); 435 cache_drop(&nch); 436 vput(vp); 437 } 438 done: 439 return (error); 440 } 441 442 /* 443 * Scan all active processes to see if any of them have a current 444 * or root directory onto which the new filesystem has just been 445 * mounted. If so, replace them with the new mount point. 446 * 447 * Both old_nch and new_nch are ref'd on call but not locked. 448 * new_nch must be temporarily locked so it can be associated with the 449 * vnode representing the root of the mount point. 450 */ 451 struct checkdirs_info { 452 struct nchandle old_nch; 453 struct nchandle new_nch; 454 struct vnode *old_vp; 455 struct vnode *new_vp; 456 }; 457 458 static int checkdirs_callback(struct proc *p, void *data); 459 460 static void 461 checkdirs(struct nchandle *old_nch, struct nchandle *new_nch) 462 { 463 struct checkdirs_info info; 464 struct vnode *olddp; 465 struct vnode *newdp; 466 struct mount *mp; 467 468 /* 469 * If the old mount point's vnode has a usecount of 1, it is not 470 * being held as a descriptor anywhere. 471 */ 472 olddp = old_nch->ncp->nc_vp; 473 if (olddp == NULL || olddp->v_sysref.refcnt == 1) 474 return; 475 476 /* 477 * Force the root vnode of the new mount point to be resolved 478 * so we can update any matching processes. 479 */ 480 mp = new_nch->mount; 481 if (VFS_ROOT(mp, &newdp)) 482 panic("mount: lost mount"); 483 vn_unlock(newdp); 484 cache_lock(new_nch); 485 vn_lock(newdp, LK_EXCLUSIVE | LK_RETRY); 486 cache_setunresolved(new_nch); 487 cache_setvp(new_nch, newdp); 488 cache_unlock(new_nch); 489 490 /* 491 * Special handling of the root node 492 */ 493 if (rootvnode == olddp) { 494 vref(newdp); 495 vfs_cache_setroot(newdp, cache_hold(new_nch)); 496 } 497 498 /* 499 * Pass newdp separately so the callback does not have to access 500 * it via new_nch->ncp->nc_vp. 501 */ 502 info.old_nch = *old_nch; 503 info.new_nch = *new_nch; 504 info.new_vp = newdp; 505 allproc_scan(checkdirs_callback, &info); 506 vput(newdp); 507 } 508 509 /* 510 * NOTE: callback is not MP safe because the scanned process's filedesc 511 * structure can be ripped out from under us, amoung other things. 512 */ 513 static int 514 checkdirs_callback(struct proc *p, void *data) 515 { 516 struct checkdirs_info *info = data; 517 struct filedesc *fdp; 518 struct nchandle ncdrop1; 519 struct nchandle ncdrop2; 520 struct vnode *vprele1; 521 struct vnode *vprele2; 522 523 if ((fdp = p->p_fd) != NULL) { 524 cache_zero(&ncdrop1); 525 cache_zero(&ncdrop2); 526 vprele1 = NULL; 527 vprele2 = NULL; 528 529 /* 530 * MPUNSAFE - XXX fdp can be pulled out from under a 531 * foreign process. 532 * 533 * A shared filedesc is ok, we don't have to copy it 534 * because we are making this change globally. 535 */ 536 spin_lock(&fdp->fd_spin); 537 if (fdp->fd_ncdir.mount == info->old_nch.mount && 538 fdp->fd_ncdir.ncp == info->old_nch.ncp) { 539 vprele1 = fdp->fd_cdir; 540 vref(info->new_vp); 541 fdp->fd_cdir = info->new_vp; 542 ncdrop1 = fdp->fd_ncdir; 543 cache_copy(&info->new_nch, &fdp->fd_ncdir); 544 } 545 if (fdp->fd_nrdir.mount == info->old_nch.mount && 546 fdp->fd_nrdir.ncp == info->old_nch.ncp) { 547 vprele2 = fdp->fd_rdir; 548 vref(info->new_vp); 549 fdp->fd_rdir = info->new_vp; 550 ncdrop2 = fdp->fd_nrdir; 551 cache_copy(&info->new_nch, &fdp->fd_nrdir); 552 } 553 spin_unlock(&fdp->fd_spin); 554 if (ncdrop1.ncp) 555 cache_drop(&ncdrop1); 556 if (ncdrop2.ncp) 557 cache_drop(&ncdrop2); 558 if (vprele1) 559 vrele(vprele1); 560 if (vprele2) 561 vrele(vprele2); 562 } 563 return(0); 564 } 565 566 /* 567 * Unmount a file system. 568 * 569 * Note: unmount takes a path to the vnode mounted on as argument, 570 * not special file (as before). 571 * 572 * umount_args(char *path, int flags) 573 * 574 * MPALMOSTSAFE 575 */ 576 int 577 sys_unmount(struct unmount_args *uap) 578 { 579 struct thread *td = curthread; 580 struct proc *p __debugvar = td->td_proc; 581 struct mount *mp = NULL; 582 struct nlookupdata nd; 583 int error; 584 585 KKASSERT(p); 586 get_mplock(); 587 if (td->td_ucred->cr_prison != NULL) { 588 error = EPERM; 589 goto done; 590 } 591 if (usermount == 0 && (error = priv_check(td, PRIV_ROOT))) 592 goto done; 593 594 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 595 if (error == 0) 596 error = nlookup(&nd); 597 if (error) 598 goto out; 599 600 mp = nd.nl_nch.mount; 601 602 /* 603 * Only root, or the user that did the original mount is 604 * permitted to unmount this filesystem. 605 */ 606 if ((mp->mnt_stat.f_owner != td->td_ucred->cr_uid) && 607 (error = priv_check(td, PRIV_ROOT))) 608 goto out; 609 610 /* 611 * Don't allow unmounting the root file system. 612 */ 613 if (mp->mnt_flag & MNT_ROOTFS) { 614 error = EINVAL; 615 goto out; 616 } 617 618 /* 619 * Must be the root of the filesystem 620 */ 621 if (nd.nl_nch.ncp != mp->mnt_ncmountpt.ncp) { 622 error = EINVAL; 623 goto out; 624 } 625 626 out: 627 nlookup_done(&nd); 628 if (error == 0) 629 error = dounmount(mp, uap->flags); 630 done: 631 rel_mplock(); 632 return (error); 633 } 634 635 /* 636 * Do the actual file system unmount. 637 */ 638 static int 639 dounmount_interlock(struct mount *mp) 640 { 641 if (mp->mnt_kern_flag & MNTK_UNMOUNT) 642 return (EBUSY); 643 mp->mnt_kern_flag |= MNTK_UNMOUNT; 644 return(0); 645 } 646 647 static int 648 unmount_allproc_cb(struct proc *p, void *arg) 649 { 650 struct mount *mp; 651 652 if (p->p_textnch.ncp == NULL) 653 return 0; 654 655 mp = (struct mount *)arg; 656 if (p->p_textnch.mount == mp) 657 cache_drop(&p->p_textnch); 658 659 return 0; 660 } 661 662 int 663 dounmount(struct mount *mp, int flags) 664 { 665 struct namecache *ncp; 666 struct nchandle nch; 667 struct vnode *vp; 668 int error; 669 int async_flag; 670 int lflags; 671 int freeok = 1; 672 int retry; 673 674 lwkt_gettoken(&mp->mnt_token); 675 /* 676 * Exclusive access for unmounting purposes 677 */ 678 if ((error = mountlist_interlock(dounmount_interlock, mp)) != 0) 679 goto out; 680 681 /* 682 * Allow filesystems to detect that a forced unmount is in progress. 683 */ 684 if (flags & MNT_FORCE) 685 mp->mnt_kern_flag |= MNTK_UNMOUNTF; 686 lflags = LK_EXCLUSIVE | ((flags & MNT_FORCE) ? 0 : LK_TIMELOCK); 687 error = lockmgr(&mp->mnt_lock, lflags); 688 if (error) { 689 mp->mnt_kern_flag &= ~(MNTK_UNMOUNT | MNTK_UNMOUNTF); 690 if (mp->mnt_kern_flag & MNTK_MWAIT) { 691 mp->mnt_kern_flag &= ~MNTK_MWAIT; 692 wakeup(mp); 693 } 694 goto out; 695 } 696 697 if (mp->mnt_flag & MNT_EXPUBLIC) 698 vfs_setpublicfs(NULL, NULL, NULL); 699 700 vfs_msync(mp, MNT_WAIT); 701 async_flag = mp->mnt_flag & MNT_ASYNC; 702 mp->mnt_flag &=~ MNT_ASYNC; 703 704 /* 705 * If this filesystem isn't aliasing other filesystems, 706 * try to invalidate any remaining namecache entries and 707 * check the count afterwords. 708 */ 709 if ((mp->mnt_kern_flag & MNTK_NCALIASED) == 0) { 710 cache_lock(&mp->mnt_ncmountpt); 711 cache_inval(&mp->mnt_ncmountpt, CINV_DESTROY|CINV_CHILDREN); 712 cache_unlock(&mp->mnt_ncmountpt); 713 714 if ((ncp = mp->mnt_ncmountpt.ncp) != NULL && 715 (ncp->nc_refs != 1 || TAILQ_FIRST(&ncp->nc_list))) { 716 allproc_scan(&unmount_allproc_cb, mp); 717 } 718 719 if ((ncp = mp->mnt_ncmountpt.ncp) != NULL && 720 (ncp->nc_refs != 1 || TAILQ_FIRST(&ncp->nc_list))) { 721 722 if ((flags & MNT_FORCE) == 0) { 723 error = EBUSY; 724 mount_warning(mp, "Cannot unmount: " 725 "%d namecache " 726 "references still " 727 "present", 728 ncp->nc_refs - 1); 729 } else { 730 mount_warning(mp, "Forced unmount: " 731 "%d namecache " 732 "references still " 733 "present", 734 ncp->nc_refs - 1); 735 freeok = 0; 736 } 737 } 738 } 739 740 /* 741 * Decomission our special mnt_syncer vnode. This also stops 742 * the vnlru code. If we are unable to unmount we recommission 743 * the vnode. 744 * 745 * Then sync the filesystem. 746 */ 747 if ((vp = mp->mnt_syncer) != NULL) { 748 mp->mnt_syncer = NULL; 749 vrele(vp); 750 } 751 if ((mp->mnt_flag & MNT_RDONLY) == 0) 752 VFS_SYNC(mp, MNT_WAIT); 753 754 /* 755 * nchandle records ref the mount structure. Expect a count of 1 756 * (our mount->mnt_ncmountpt). 757 * 758 * Scans can get temporary refs on a mountpoint (thought really 759 * heavy duty stuff like cache_findmount() do not). 760 */ 761 for (retry = 0; retry < 10 && mp->mnt_refs != 1; ++retry) { 762 cache_unmounting(mp); 763 tsleep(&mp->mnt_refs, 0, "mntbsy", hz / 10 + 1); 764 } 765 if (mp->mnt_refs != 1) { 766 if ((flags & MNT_FORCE) == 0) { 767 mount_warning(mp, "Cannot unmount: " 768 "%d mount refs still present", 769 mp->mnt_refs); 770 error = EBUSY; 771 } else { 772 mount_warning(mp, "Forced unmount: " 773 "%d mount refs still present", 774 mp->mnt_refs); 775 freeok = 0; 776 } 777 } 778 779 /* 780 * So far so good, sync the filesystem once more and 781 * call the VFS unmount code if the sync succeeds. 782 */ 783 if (error == 0) { 784 if (((mp->mnt_flag & MNT_RDONLY) || 785 (error = VFS_SYNC(mp, MNT_WAIT)) == 0) || 786 (flags & MNT_FORCE)) { 787 error = VFS_UNMOUNT(mp, flags); 788 } 789 } 790 791 /* 792 * If an error occurred we can still recover, restoring the 793 * syncer vnode and misc flags. 794 */ 795 if (error) { 796 if (mp->mnt_syncer == NULL) 797 vfs_allocate_syncvnode(mp); 798 mp->mnt_kern_flag &= ~(MNTK_UNMOUNT | MNTK_UNMOUNTF); 799 mp->mnt_flag |= async_flag; 800 lockmgr(&mp->mnt_lock, LK_RELEASE); 801 if (mp->mnt_kern_flag & MNTK_MWAIT) { 802 mp->mnt_kern_flag &= ~MNTK_MWAIT; 803 wakeup(mp); 804 } 805 goto out; 806 } 807 /* 808 * Clean up any journals still associated with the mount after 809 * filesystem activity has ceased. 810 */ 811 journal_remove_all_journals(mp, 812 ((flags & MNT_FORCE) ? MC_JOURNAL_STOP_IMM : 0)); 813 814 mountlist_remove(mp); 815 816 /* 817 * Remove any installed vnode ops here so the individual VFSs don't 818 * have to. 819 */ 820 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_coherency_ops); 821 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_journal_ops); 822 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_norm_ops); 823 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_spec_ops); 824 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_fifo_ops); 825 826 if (mp->mnt_ncmountpt.ncp != NULL) { 827 nch = mp->mnt_ncmountpt; 828 cache_zero(&mp->mnt_ncmountpt); 829 cache_clrmountpt(&nch); 830 cache_drop(&nch); 831 } 832 if (mp->mnt_ncmounton.ncp != NULL) { 833 cache_unmounting(mp); 834 nch = mp->mnt_ncmounton; 835 cache_zero(&mp->mnt_ncmounton); 836 cache_clrmountpt(&nch); 837 cache_drop(&nch); 838 } 839 840 mp->mnt_vfc->vfc_refcount--; 841 if (!TAILQ_EMPTY(&mp->mnt_nvnodelist)) 842 panic("unmount: dangling vnode"); 843 lockmgr(&mp->mnt_lock, LK_RELEASE); 844 if (mp->mnt_kern_flag & MNTK_MWAIT) { 845 mp->mnt_kern_flag &= ~MNTK_MWAIT; 846 wakeup(mp); 847 } 848 849 /* 850 * If we reach here and freeok != 0 we must free the mount. 851 * If refs > 1 cycle and wait, just in case someone tried 852 * to busy the mount after we decided to do the unmount. 853 */ 854 if (freeok) { 855 while (mp->mnt_refs > 1) { 856 cache_unmounting(mp); 857 wakeup(mp); 858 tsleep(&mp->mnt_refs, 0, "umntrwait", hz / 10 + 1); 859 } 860 lwkt_reltoken(&mp->mnt_token); 861 kfree(mp, M_MOUNT); 862 mp = NULL; 863 } 864 error = 0; 865 out: 866 if (mp) 867 lwkt_reltoken(&mp->mnt_token); 868 return (error); 869 } 870 871 static 872 void 873 mount_warning(struct mount *mp, const char *ctl, ...) 874 { 875 char *ptr; 876 char *buf; 877 __va_list va; 878 879 __va_start(va, ctl); 880 if (cache_fullpath(NULL, &mp->mnt_ncmounton, NULL, 881 &ptr, &buf, 0) == 0) { 882 kprintf("unmount(%s): ", ptr); 883 kvprintf(ctl, va); 884 kprintf("\n"); 885 kfree(buf, M_TEMP); 886 } else { 887 kprintf("unmount(%p", mp); 888 if (mp->mnt_ncmounton.ncp && mp->mnt_ncmounton.ncp->nc_name) 889 kprintf(",%s", mp->mnt_ncmounton.ncp->nc_name); 890 kprintf("): "); 891 kvprintf(ctl, va); 892 kprintf("\n"); 893 } 894 __va_end(va); 895 } 896 897 /* 898 * Shim cache_fullpath() to handle the case where a process is chrooted into 899 * a subdirectory of a mount. In this case if the root mount matches the 900 * process root directory's mount we have to specify the process's root 901 * directory instead of the mount point, because the mount point might 902 * be above the root directory. 903 */ 904 static 905 int 906 mount_path(struct proc *p, struct mount *mp, char **rb, char **fb) 907 { 908 struct nchandle *nch; 909 910 if (p && p->p_fd->fd_nrdir.mount == mp) 911 nch = &p->p_fd->fd_nrdir; 912 else 913 nch = &mp->mnt_ncmountpt; 914 return(cache_fullpath(p, nch, NULL, rb, fb, 0)); 915 } 916 917 /* 918 * Sync each mounted filesystem. 919 */ 920 921 #ifdef DEBUG 922 static int syncprt = 0; 923 SYSCTL_INT(_debug, OID_AUTO, syncprt, CTLFLAG_RW, &syncprt, 0, ""); 924 #endif /* DEBUG */ 925 926 static int sync_callback(struct mount *mp, void *data); 927 928 int 929 sys_sync(struct sync_args *uap) 930 { 931 mountlist_scan(sync_callback, NULL, MNTSCAN_FORWARD); 932 #ifdef DEBUG 933 /* 934 * print out buffer pool stat information on each sync() call. 935 */ 936 if (syncprt) 937 vfs_bufstats(); 938 #endif /* DEBUG */ 939 return (0); 940 } 941 942 static 943 int 944 sync_callback(struct mount *mp, void *data __unused) 945 { 946 int asyncflag; 947 948 if ((mp->mnt_flag & MNT_RDONLY) == 0) { 949 asyncflag = mp->mnt_flag & MNT_ASYNC; 950 mp->mnt_flag &= ~MNT_ASYNC; 951 vfs_msync(mp, MNT_NOWAIT); 952 VFS_SYNC(mp, MNT_NOWAIT); 953 mp->mnt_flag |= asyncflag; 954 } 955 return(0); 956 } 957 958 /* XXX PRISON: could be per prison flag */ 959 static int prison_quotas; 960 #if 0 961 SYSCTL_INT(_kern_prison, OID_AUTO, quotas, CTLFLAG_RW, &prison_quotas, 0, ""); 962 #endif 963 964 /* 965 * quotactl_args(char *path, int fcmd, int uid, caddr_t arg) 966 * 967 * Change filesystem quotas. 968 * 969 * MPALMOSTSAFE 970 */ 971 int 972 sys_quotactl(struct quotactl_args *uap) 973 { 974 struct nlookupdata nd; 975 struct thread *td; 976 struct mount *mp; 977 int error; 978 979 get_mplock(); 980 td = curthread; 981 if (td->td_ucred->cr_prison && !prison_quotas) { 982 error = EPERM; 983 goto done; 984 } 985 986 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 987 if (error == 0) 988 error = nlookup(&nd); 989 if (error == 0) { 990 mp = nd.nl_nch.mount; 991 error = VFS_QUOTACTL(mp, uap->cmd, uap->uid, 992 uap->arg, nd.nl_cred); 993 } 994 nlookup_done(&nd); 995 done: 996 rel_mplock(); 997 return (error); 998 } 999 1000 /* 1001 * mountctl(char *path, int op, int fd, const void *ctl, int ctllen, 1002 * void *buf, int buflen) 1003 * 1004 * This function operates on a mount point and executes the specified 1005 * operation using the specified control data, and possibly returns data. 1006 * 1007 * The actual number of bytes stored in the result buffer is returned, 0 1008 * if none, otherwise an error is returned. 1009 * 1010 * MPALMOSTSAFE 1011 */ 1012 int 1013 sys_mountctl(struct mountctl_args *uap) 1014 { 1015 struct thread *td = curthread; 1016 struct proc *p = td->td_proc; 1017 struct file *fp; 1018 void *ctl = NULL; 1019 void *buf = NULL; 1020 char *path = NULL; 1021 int error; 1022 1023 /* 1024 * Sanity and permissions checks. We must be root. 1025 */ 1026 KKASSERT(p); 1027 if (td->td_ucred->cr_prison != NULL) 1028 return (EPERM); 1029 if ((uap->op != MOUNTCTL_MOUNTFLAGS) && 1030 (error = priv_check(td, PRIV_ROOT)) != 0) 1031 return (error); 1032 1033 /* 1034 * Argument length checks 1035 */ 1036 if (uap->ctllen < 0 || uap->ctllen > 1024) 1037 return (EINVAL); 1038 if (uap->buflen < 0 || uap->buflen > 16 * 1024) 1039 return (EINVAL); 1040 if (uap->path == NULL) 1041 return (EINVAL); 1042 1043 /* 1044 * Allocate the necessary buffers and copyin data 1045 */ 1046 path = objcache_get(namei_oc, M_WAITOK); 1047 error = copyinstr(uap->path, path, MAXPATHLEN, NULL); 1048 if (error) 1049 goto done; 1050 1051 if (uap->ctllen) { 1052 ctl = kmalloc(uap->ctllen + 1, M_TEMP, M_WAITOK|M_ZERO); 1053 error = copyin(uap->ctl, ctl, uap->ctllen); 1054 if (error) 1055 goto done; 1056 } 1057 if (uap->buflen) 1058 buf = kmalloc(uap->buflen + 1, M_TEMP, M_WAITOK|M_ZERO); 1059 1060 /* 1061 * Validate the descriptor 1062 */ 1063 if (uap->fd >= 0) { 1064 fp = holdfp(p->p_fd, uap->fd, -1); 1065 if (fp == NULL) { 1066 error = EBADF; 1067 goto done; 1068 } 1069 } else { 1070 fp = NULL; 1071 } 1072 1073 /* 1074 * Execute the internal kernel function and clean up. 1075 */ 1076 get_mplock(); 1077 error = kern_mountctl(path, uap->op, fp, ctl, uap->ctllen, buf, uap->buflen, &uap->sysmsg_result); 1078 rel_mplock(); 1079 if (fp) 1080 fdrop(fp); 1081 if (error == 0 && uap->sysmsg_result > 0) 1082 error = copyout(buf, uap->buf, uap->sysmsg_result); 1083 done: 1084 if (path) 1085 objcache_put(namei_oc, path); 1086 if (ctl) 1087 kfree(ctl, M_TEMP); 1088 if (buf) 1089 kfree(buf, M_TEMP); 1090 return (error); 1091 } 1092 1093 /* 1094 * Execute a mount control operation by resolving the path to a mount point 1095 * and calling vop_mountctl(). 1096 * 1097 * Use the mount point from the nch instead of the vnode so nullfs mounts 1098 * can properly spike the VOP. 1099 */ 1100 int 1101 kern_mountctl(const char *path, int op, struct file *fp, 1102 const void *ctl, int ctllen, 1103 void *buf, int buflen, int *res) 1104 { 1105 struct vnode *vp; 1106 struct mount *mp; 1107 struct nlookupdata nd; 1108 int error; 1109 1110 *res = 0; 1111 vp = NULL; 1112 error = nlookup_init(&nd, path, UIO_SYSSPACE, NLC_FOLLOW); 1113 if (error == 0) 1114 error = nlookup(&nd); 1115 if (error == 0) 1116 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp); 1117 mp = nd.nl_nch.mount; 1118 nlookup_done(&nd); 1119 if (error) 1120 return (error); 1121 vn_unlock(vp); 1122 1123 /* 1124 * Must be the root of the filesystem 1125 */ 1126 if ((vp->v_flag & (VROOT|VPFSROOT)) == 0) { 1127 vrele(vp); 1128 return (EINVAL); 1129 } 1130 error = vop_mountctl(mp->mnt_vn_use_ops, vp, op, fp, ctl, ctllen, 1131 buf, buflen, res); 1132 vrele(vp); 1133 return (error); 1134 } 1135 1136 int 1137 kern_statfs(struct nlookupdata *nd, struct statfs *buf) 1138 { 1139 struct thread *td = curthread; 1140 struct proc *p = td->td_proc; 1141 struct mount *mp; 1142 struct statfs *sp; 1143 char *fullpath, *freepath; 1144 int error; 1145 1146 if ((error = nlookup(nd)) != 0) 1147 return (error); 1148 mp = nd->nl_nch.mount; 1149 sp = &mp->mnt_stat; 1150 if ((error = VFS_STATFS(mp, sp, nd->nl_cred)) != 0) 1151 return (error); 1152 1153 error = mount_path(p, mp, &fullpath, &freepath); 1154 if (error) 1155 return(error); 1156 bzero(sp->f_mntonname, sizeof(sp->f_mntonname)); 1157 strlcpy(sp->f_mntonname, fullpath, sizeof(sp->f_mntonname)); 1158 kfree(freepath, M_TEMP); 1159 1160 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK; 1161 bcopy(sp, buf, sizeof(*buf)); 1162 /* Only root should have access to the fsid's. */ 1163 if (priv_check(td, PRIV_ROOT)) 1164 buf->f_fsid.val[0] = buf->f_fsid.val[1] = 0; 1165 return (0); 1166 } 1167 1168 /* 1169 * statfs_args(char *path, struct statfs *buf) 1170 * 1171 * Get filesystem statistics. 1172 */ 1173 int 1174 sys_statfs(struct statfs_args *uap) 1175 { 1176 struct nlookupdata nd; 1177 struct statfs buf; 1178 int error; 1179 1180 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 1181 if (error == 0) 1182 error = kern_statfs(&nd, &buf); 1183 nlookup_done(&nd); 1184 if (error == 0) 1185 error = copyout(&buf, uap->buf, sizeof(*uap->buf)); 1186 return (error); 1187 } 1188 1189 int 1190 kern_fstatfs(int fd, struct statfs *buf) 1191 { 1192 struct thread *td = curthread; 1193 struct proc *p = td->td_proc; 1194 struct file *fp; 1195 struct mount *mp; 1196 struct statfs *sp; 1197 char *fullpath, *freepath; 1198 int error; 1199 1200 KKASSERT(p); 1201 if ((error = holdvnode(p->p_fd, fd, &fp)) != 0) 1202 return (error); 1203 1204 /* 1205 * Try to use mount info from any overlays rather than the 1206 * mount info for the underlying vnode, otherwise we will 1207 * fail when operating on null-mounted paths inside a chroot. 1208 */ 1209 if ((mp = fp->f_nchandle.mount) == NULL) 1210 mp = ((struct vnode *)fp->f_data)->v_mount; 1211 if (mp == NULL) { 1212 error = EBADF; 1213 goto done; 1214 } 1215 if (fp->f_cred == NULL) { 1216 error = EINVAL; 1217 goto done; 1218 } 1219 sp = &mp->mnt_stat; 1220 if ((error = VFS_STATFS(mp, sp, fp->f_cred)) != 0) 1221 goto done; 1222 1223 if ((error = mount_path(p, mp, &fullpath, &freepath)) != 0) 1224 goto done; 1225 bzero(sp->f_mntonname, sizeof(sp->f_mntonname)); 1226 strlcpy(sp->f_mntonname, fullpath, sizeof(sp->f_mntonname)); 1227 kfree(freepath, M_TEMP); 1228 1229 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK; 1230 bcopy(sp, buf, sizeof(*buf)); 1231 1232 /* Only root should have access to the fsid's. */ 1233 if (priv_check(td, PRIV_ROOT)) 1234 buf->f_fsid.val[0] = buf->f_fsid.val[1] = 0; 1235 error = 0; 1236 done: 1237 fdrop(fp); 1238 return (error); 1239 } 1240 1241 /* 1242 * fstatfs_args(int fd, struct statfs *buf) 1243 * 1244 * Get filesystem statistics. 1245 */ 1246 int 1247 sys_fstatfs(struct fstatfs_args *uap) 1248 { 1249 struct statfs buf; 1250 int error; 1251 1252 error = kern_fstatfs(uap->fd, &buf); 1253 1254 if (error == 0) 1255 error = copyout(&buf, uap->buf, sizeof(*uap->buf)); 1256 return (error); 1257 } 1258 1259 int 1260 kern_statvfs(struct nlookupdata *nd, struct statvfs *buf) 1261 { 1262 struct mount *mp; 1263 struct statvfs *sp; 1264 int error; 1265 1266 if ((error = nlookup(nd)) != 0) 1267 return (error); 1268 mp = nd->nl_nch.mount; 1269 sp = &mp->mnt_vstat; 1270 if ((error = VFS_STATVFS(mp, sp, nd->nl_cred)) != 0) 1271 return (error); 1272 1273 sp->f_flag = 0; 1274 if (mp->mnt_flag & MNT_RDONLY) 1275 sp->f_flag |= ST_RDONLY; 1276 if (mp->mnt_flag & MNT_NOSUID) 1277 sp->f_flag |= ST_NOSUID; 1278 bcopy(sp, buf, sizeof(*buf)); 1279 return (0); 1280 } 1281 1282 /* 1283 * statfs_args(char *path, struct statfs *buf) 1284 * 1285 * Get filesystem statistics. 1286 */ 1287 int 1288 sys_statvfs(struct statvfs_args *uap) 1289 { 1290 struct nlookupdata nd; 1291 struct statvfs buf; 1292 int error; 1293 1294 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 1295 if (error == 0) 1296 error = kern_statvfs(&nd, &buf); 1297 nlookup_done(&nd); 1298 if (error == 0) 1299 error = copyout(&buf, uap->buf, sizeof(*uap->buf)); 1300 return (error); 1301 } 1302 1303 int 1304 kern_fstatvfs(int fd, struct statvfs *buf) 1305 { 1306 struct thread *td = curthread; 1307 struct proc *p = td->td_proc; 1308 struct file *fp; 1309 struct mount *mp; 1310 struct statvfs *sp; 1311 int error; 1312 1313 KKASSERT(p); 1314 if ((error = holdvnode(p->p_fd, fd, &fp)) != 0) 1315 return (error); 1316 if ((mp = fp->f_nchandle.mount) == NULL) 1317 mp = ((struct vnode *)fp->f_data)->v_mount; 1318 if (mp == NULL) { 1319 error = EBADF; 1320 goto done; 1321 } 1322 if (fp->f_cred == NULL) { 1323 error = EINVAL; 1324 goto done; 1325 } 1326 sp = &mp->mnt_vstat; 1327 if ((error = VFS_STATVFS(mp, sp, fp->f_cred)) != 0) 1328 goto done; 1329 1330 sp->f_flag = 0; 1331 if (mp->mnt_flag & MNT_RDONLY) 1332 sp->f_flag |= ST_RDONLY; 1333 if (mp->mnt_flag & MNT_NOSUID) 1334 sp->f_flag |= ST_NOSUID; 1335 1336 bcopy(sp, buf, sizeof(*buf)); 1337 error = 0; 1338 done: 1339 fdrop(fp); 1340 return (error); 1341 } 1342 1343 /* 1344 * fstatfs_args(int fd, struct statfs *buf) 1345 * 1346 * Get filesystem statistics. 1347 */ 1348 int 1349 sys_fstatvfs(struct fstatvfs_args *uap) 1350 { 1351 struct statvfs buf; 1352 int error; 1353 1354 error = kern_fstatvfs(uap->fd, &buf); 1355 1356 if (error == 0) 1357 error = copyout(&buf, uap->buf, sizeof(*uap->buf)); 1358 return (error); 1359 } 1360 1361 /* 1362 * getfsstat_args(struct statfs *buf, long bufsize, int flags) 1363 * 1364 * Get statistics on all filesystems. 1365 */ 1366 1367 struct getfsstat_info { 1368 struct statfs *sfsp; 1369 long count; 1370 long maxcount; 1371 int error; 1372 int flags; 1373 struct thread *td; 1374 }; 1375 1376 static int getfsstat_callback(struct mount *, void *); 1377 1378 int 1379 sys_getfsstat(struct getfsstat_args *uap) 1380 { 1381 struct thread *td = curthread; 1382 struct getfsstat_info info; 1383 1384 bzero(&info, sizeof(info)); 1385 1386 info.maxcount = uap->bufsize / sizeof(struct statfs); 1387 info.sfsp = uap->buf; 1388 info.count = 0; 1389 info.flags = uap->flags; 1390 info.td = td; 1391 1392 mountlist_scan(getfsstat_callback, &info, MNTSCAN_FORWARD); 1393 if (info.sfsp && info.count > info.maxcount) 1394 uap->sysmsg_result = info.maxcount; 1395 else 1396 uap->sysmsg_result = info.count; 1397 return (info.error); 1398 } 1399 1400 static int 1401 getfsstat_callback(struct mount *mp, void *data) 1402 { 1403 struct getfsstat_info *info = data; 1404 struct statfs *sp; 1405 char *freepath; 1406 char *fullpath; 1407 int error; 1408 1409 if (info->sfsp && info->count < info->maxcount) { 1410 if (info->td->td_proc && 1411 !chroot_visible_mnt(mp, info->td->td_proc)) { 1412 return(0); 1413 } 1414 sp = &mp->mnt_stat; 1415 1416 /* 1417 * If MNT_NOWAIT or MNT_LAZY is specified, do not 1418 * refresh the fsstat cache. MNT_NOWAIT or MNT_LAZY 1419 * overrides MNT_WAIT. 1420 */ 1421 if (((info->flags & (MNT_LAZY|MNT_NOWAIT)) == 0 || 1422 (info->flags & MNT_WAIT)) && 1423 (error = VFS_STATFS(mp, sp, info->td->td_ucred))) { 1424 return(0); 1425 } 1426 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK; 1427 1428 error = mount_path(info->td->td_proc, mp, &fullpath, &freepath); 1429 if (error) { 1430 info->error = error; 1431 return(-1); 1432 } 1433 bzero(sp->f_mntonname, sizeof(sp->f_mntonname)); 1434 strlcpy(sp->f_mntonname, fullpath, sizeof(sp->f_mntonname)); 1435 kfree(freepath, M_TEMP); 1436 1437 error = copyout(sp, info->sfsp, sizeof(*sp)); 1438 if (error) { 1439 info->error = error; 1440 return (-1); 1441 } 1442 ++info->sfsp; 1443 } 1444 info->count++; 1445 return(0); 1446 } 1447 1448 /* 1449 * getvfsstat_args(struct statfs *buf, struct statvfs *vbuf, 1450 long bufsize, int flags) 1451 * 1452 * Get statistics on all filesystems. 1453 */ 1454 1455 struct getvfsstat_info { 1456 struct statfs *sfsp; 1457 struct statvfs *vsfsp; 1458 long count; 1459 long maxcount; 1460 int error; 1461 int flags; 1462 struct thread *td; 1463 }; 1464 1465 static int getvfsstat_callback(struct mount *, void *); 1466 1467 int 1468 sys_getvfsstat(struct getvfsstat_args *uap) 1469 { 1470 struct thread *td = curthread; 1471 struct getvfsstat_info info; 1472 1473 bzero(&info, sizeof(info)); 1474 1475 info.maxcount = uap->vbufsize / sizeof(struct statvfs); 1476 info.sfsp = uap->buf; 1477 info.vsfsp = uap->vbuf; 1478 info.count = 0; 1479 info.flags = uap->flags; 1480 info.td = td; 1481 1482 mountlist_scan(getvfsstat_callback, &info, MNTSCAN_FORWARD); 1483 if (info.vsfsp && info.count > info.maxcount) 1484 uap->sysmsg_result = info.maxcount; 1485 else 1486 uap->sysmsg_result = info.count; 1487 return (info.error); 1488 } 1489 1490 static int 1491 getvfsstat_callback(struct mount *mp, void *data) 1492 { 1493 struct getvfsstat_info *info = data; 1494 struct statfs *sp; 1495 struct statvfs *vsp; 1496 char *freepath; 1497 char *fullpath; 1498 int error; 1499 1500 if (info->vsfsp && info->count < info->maxcount) { 1501 if (info->td->td_proc && 1502 !chroot_visible_mnt(mp, info->td->td_proc)) { 1503 return(0); 1504 } 1505 sp = &mp->mnt_stat; 1506 vsp = &mp->mnt_vstat; 1507 1508 /* 1509 * If MNT_NOWAIT or MNT_LAZY is specified, do not 1510 * refresh the fsstat cache. MNT_NOWAIT or MNT_LAZY 1511 * overrides MNT_WAIT. 1512 */ 1513 if (((info->flags & (MNT_LAZY|MNT_NOWAIT)) == 0 || 1514 (info->flags & MNT_WAIT)) && 1515 (error = VFS_STATFS(mp, sp, info->td->td_ucred))) { 1516 return(0); 1517 } 1518 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK; 1519 1520 if (((info->flags & (MNT_LAZY|MNT_NOWAIT)) == 0 || 1521 (info->flags & MNT_WAIT)) && 1522 (error = VFS_STATVFS(mp, vsp, info->td->td_ucred))) { 1523 return(0); 1524 } 1525 vsp->f_flag = 0; 1526 if (mp->mnt_flag & MNT_RDONLY) 1527 vsp->f_flag |= ST_RDONLY; 1528 if (mp->mnt_flag & MNT_NOSUID) 1529 vsp->f_flag |= ST_NOSUID; 1530 1531 error = mount_path(info->td->td_proc, mp, &fullpath, &freepath); 1532 if (error) { 1533 info->error = error; 1534 return(-1); 1535 } 1536 bzero(sp->f_mntonname, sizeof(sp->f_mntonname)); 1537 strlcpy(sp->f_mntonname, fullpath, sizeof(sp->f_mntonname)); 1538 kfree(freepath, M_TEMP); 1539 1540 error = copyout(sp, info->sfsp, sizeof(*sp)); 1541 if (error == 0) 1542 error = copyout(vsp, info->vsfsp, sizeof(*vsp)); 1543 if (error) { 1544 info->error = error; 1545 return (-1); 1546 } 1547 ++info->sfsp; 1548 ++info->vsfsp; 1549 } 1550 info->count++; 1551 return(0); 1552 } 1553 1554 1555 /* 1556 * fchdir_args(int fd) 1557 * 1558 * Change current working directory to a given file descriptor. 1559 */ 1560 int 1561 sys_fchdir(struct fchdir_args *uap) 1562 { 1563 struct thread *td = curthread; 1564 struct proc *p = td->td_proc; 1565 struct filedesc *fdp = p->p_fd; 1566 struct vnode *vp, *ovp; 1567 struct mount *mp; 1568 struct file *fp; 1569 struct nchandle nch, onch, tnch; 1570 int error; 1571 1572 if ((error = holdvnode(fdp, uap->fd, &fp)) != 0) 1573 return (error); 1574 lwkt_gettoken(&p->p_token); 1575 vp = (struct vnode *)fp->f_data; 1576 vref(vp); 1577 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1578 if (fp->f_nchandle.ncp == NULL) 1579 error = ENOTDIR; 1580 else 1581 error = checkvp_chdir(vp, td); 1582 if (error) { 1583 vput(vp); 1584 goto done; 1585 } 1586 cache_copy(&fp->f_nchandle, &nch); 1587 1588 /* 1589 * If the ncp has become a mount point, traverse through 1590 * the mount point. 1591 */ 1592 1593 while (!error && (nch.ncp->nc_flag & NCF_ISMOUNTPT) && 1594 (mp = cache_findmount(&nch)) != NULL 1595 ) { 1596 error = nlookup_mp(mp, &tnch); 1597 if (error == 0) { 1598 cache_unlock(&tnch); /* leave ref intact */ 1599 vput(vp); 1600 vp = tnch.ncp->nc_vp; 1601 error = vget(vp, LK_SHARED); 1602 KKASSERT(error == 0); 1603 cache_drop(&nch); 1604 nch = tnch; 1605 } 1606 cache_dropmount(mp); 1607 } 1608 if (error == 0) { 1609 ovp = fdp->fd_cdir; 1610 onch = fdp->fd_ncdir; 1611 vn_unlock(vp); /* leave ref intact */ 1612 fdp->fd_cdir = vp; 1613 fdp->fd_ncdir = nch; 1614 cache_drop(&onch); 1615 vrele(ovp); 1616 } else { 1617 cache_drop(&nch); 1618 vput(vp); 1619 } 1620 fdrop(fp); 1621 done: 1622 lwkt_reltoken(&p->p_token); 1623 return (error); 1624 } 1625 1626 int 1627 kern_chdir(struct nlookupdata *nd) 1628 { 1629 struct thread *td = curthread; 1630 struct proc *p = td->td_proc; 1631 struct filedesc *fdp = p->p_fd; 1632 struct vnode *vp, *ovp; 1633 struct nchandle onch; 1634 int error; 1635 1636 if ((error = nlookup(nd)) != 0) 1637 return (error); 1638 if ((vp = nd->nl_nch.ncp->nc_vp) == NULL) 1639 return (ENOENT); 1640 if ((error = vget(vp, LK_SHARED)) != 0) 1641 return (error); 1642 1643 lwkt_gettoken(&p->p_token); 1644 error = checkvp_chdir(vp, td); 1645 vn_unlock(vp); 1646 if (error == 0) { 1647 ovp = fdp->fd_cdir; 1648 onch = fdp->fd_ncdir; 1649 cache_unlock(&nd->nl_nch); /* leave reference intact */ 1650 fdp->fd_ncdir = nd->nl_nch; 1651 fdp->fd_cdir = vp; 1652 cache_drop(&onch); 1653 vrele(ovp); 1654 cache_zero(&nd->nl_nch); 1655 } else { 1656 vrele(vp); 1657 } 1658 lwkt_reltoken(&p->p_token); 1659 return (error); 1660 } 1661 1662 /* 1663 * chdir_args(char *path) 1664 * 1665 * Change current working directory (``.''). 1666 */ 1667 int 1668 sys_chdir(struct chdir_args *uap) 1669 { 1670 struct nlookupdata nd; 1671 int error; 1672 1673 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 1674 if (error == 0) 1675 error = kern_chdir(&nd); 1676 nlookup_done(&nd); 1677 return (error); 1678 } 1679 1680 /* 1681 * Helper function for raised chroot(2) security function: Refuse if 1682 * any filedescriptors are open directories. 1683 */ 1684 static int 1685 chroot_refuse_vdir_fds(struct filedesc *fdp) 1686 { 1687 struct vnode *vp; 1688 struct file *fp; 1689 int error; 1690 int fd; 1691 1692 for (fd = 0; fd < fdp->fd_nfiles ; fd++) { 1693 if ((error = holdvnode(fdp, fd, &fp)) != 0) 1694 continue; 1695 vp = (struct vnode *)fp->f_data; 1696 if (vp->v_type != VDIR) { 1697 fdrop(fp); 1698 continue; 1699 } 1700 fdrop(fp); 1701 return(EPERM); 1702 } 1703 return (0); 1704 } 1705 1706 /* 1707 * This sysctl determines if we will allow a process to chroot(2) if it 1708 * has a directory open: 1709 * 0: disallowed for all processes. 1710 * 1: allowed for processes that were not already chroot(2)'ed. 1711 * 2: allowed for all processes. 1712 */ 1713 1714 static int chroot_allow_open_directories = 1; 1715 1716 SYSCTL_INT(_kern, OID_AUTO, chroot_allow_open_directories, CTLFLAG_RW, 1717 &chroot_allow_open_directories, 0, ""); 1718 1719 /* 1720 * chroot to the specified namecache entry. We obtain the vp from the 1721 * namecache data. The passed ncp must be locked and referenced and will 1722 * remain locked and referenced on return. 1723 */ 1724 int 1725 kern_chroot(struct nchandle *nch) 1726 { 1727 struct thread *td = curthread; 1728 struct proc *p = td->td_proc; 1729 struct filedesc *fdp = p->p_fd; 1730 struct vnode *vp; 1731 int error; 1732 1733 /* 1734 * Only privileged user can chroot 1735 */ 1736 error = priv_check_cred(td->td_ucred, PRIV_VFS_CHROOT, 0); 1737 if (error) 1738 return (error); 1739 1740 /* 1741 * Disallow open directory descriptors (fchdir() breakouts). 1742 */ 1743 if (chroot_allow_open_directories == 0 || 1744 (chroot_allow_open_directories == 1 && fdp->fd_rdir != rootvnode)) { 1745 if ((error = chroot_refuse_vdir_fds(fdp)) != 0) 1746 return (error); 1747 } 1748 if ((vp = nch->ncp->nc_vp) == NULL) 1749 return (ENOENT); 1750 1751 if ((error = vget(vp, LK_SHARED)) != 0) 1752 return (error); 1753 1754 /* 1755 * Check the validity of vp as a directory to change to and 1756 * associate it with rdir/jdir. 1757 */ 1758 error = checkvp_chdir(vp, td); 1759 vn_unlock(vp); /* leave reference intact */ 1760 if (error == 0) { 1761 vrele(fdp->fd_rdir); 1762 fdp->fd_rdir = vp; /* reference inherited by fd_rdir */ 1763 cache_drop(&fdp->fd_nrdir); 1764 cache_copy(nch, &fdp->fd_nrdir); 1765 if (fdp->fd_jdir == NULL) { 1766 fdp->fd_jdir = vp; 1767 vref(fdp->fd_jdir); 1768 cache_copy(nch, &fdp->fd_njdir); 1769 } 1770 } else { 1771 vrele(vp); 1772 } 1773 return (error); 1774 } 1775 1776 /* 1777 * chroot_args(char *path) 1778 * 1779 * Change notion of root (``/'') directory. 1780 */ 1781 int 1782 sys_chroot(struct chroot_args *uap) 1783 { 1784 struct thread *td __debugvar = curthread; 1785 struct nlookupdata nd; 1786 int error; 1787 1788 KKASSERT(td->td_proc); 1789 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 1790 if (error == 0) { 1791 nd.nl_flags |= NLC_EXEC; 1792 error = nlookup(&nd); 1793 if (error == 0) 1794 error = kern_chroot(&nd.nl_nch); 1795 } 1796 nlookup_done(&nd); 1797 return(error); 1798 } 1799 1800 int 1801 sys_chroot_kernel(struct chroot_kernel_args *uap) 1802 { 1803 struct thread *td = curthread; 1804 struct nlookupdata nd; 1805 struct nchandle *nch; 1806 struct vnode *vp; 1807 int error; 1808 1809 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 1810 if (error) 1811 goto error_nond; 1812 1813 error = nlookup(&nd); 1814 if (error) 1815 goto error_out; 1816 1817 nch = &nd.nl_nch; 1818 1819 error = priv_check_cred(td->td_ucred, PRIV_VFS_CHROOT, 0); 1820 if (error) 1821 goto error_out; 1822 1823 if ((vp = nch->ncp->nc_vp) == NULL) { 1824 error = ENOENT; 1825 goto error_out; 1826 } 1827 1828 if ((error = cache_vref(nch, nd.nl_cred, &vp)) != 0) 1829 goto error_out; 1830 1831 kprintf("chroot_kernel: set new rootnch/rootvnode to %s\n", uap->path); 1832 get_mplock(); 1833 vfs_cache_setroot(vp, cache_hold(nch)); 1834 rel_mplock(); 1835 1836 error_out: 1837 nlookup_done(&nd); 1838 error_nond: 1839 return(error); 1840 } 1841 1842 /* 1843 * Common routine for chroot and chdir. Given a locked, referenced vnode, 1844 * determine whether it is legal to chdir to the vnode. The vnode's state 1845 * is not changed by this call. 1846 */ 1847 int 1848 checkvp_chdir(struct vnode *vp, struct thread *td) 1849 { 1850 int error; 1851 1852 if (vp->v_type != VDIR) 1853 error = ENOTDIR; 1854 else 1855 error = VOP_EACCESS(vp, VEXEC, td->td_ucred); 1856 return (error); 1857 } 1858 1859 int 1860 kern_open(struct nlookupdata *nd, int oflags, int mode, int *res) 1861 { 1862 struct thread *td = curthread; 1863 struct proc *p = td->td_proc; 1864 struct lwp *lp = td->td_lwp; 1865 struct filedesc *fdp = p->p_fd; 1866 int cmode, flags; 1867 struct file *nfp; 1868 struct file *fp; 1869 struct vnode *vp; 1870 int type, indx, error = 0; 1871 struct flock lf; 1872 1873 if ((oflags & O_ACCMODE) == O_ACCMODE) 1874 return (EINVAL); 1875 flags = FFLAGS(oflags); 1876 error = falloc(lp, &nfp, NULL); 1877 if (error) 1878 return (error); 1879 fp = nfp; 1880 cmode = ((mode &~ fdp->fd_cmask) & ALLPERMS) & ~S_ISTXT; 1881 1882 /* 1883 * XXX p_dupfd is a real mess. It allows a device to return a 1884 * file descriptor to be duplicated rather then doing the open 1885 * itself. 1886 */ 1887 lp->lwp_dupfd = -1; 1888 1889 /* 1890 * Call vn_open() to do the lookup and assign the vnode to the 1891 * file pointer. vn_open() does not change the ref count on fp 1892 * and the vnode, on success, will be inherited by the file pointer 1893 * and unlocked. 1894 */ 1895 nd->nl_flags |= NLC_LOCKVP; 1896 error = vn_open(nd, fp, flags, cmode); 1897 nlookup_done(nd); 1898 if (error) { 1899 /* 1900 * handle special fdopen() case. bleh. dupfdopen() is 1901 * responsible for dropping the old contents of ofiles[indx] 1902 * if it succeeds. 1903 * 1904 * Note that fsetfd() will add a ref to fp which represents 1905 * the fd_files[] assignment. We must still drop our 1906 * reference. 1907 */ 1908 if ((error == ENODEV || error == ENXIO) && lp->lwp_dupfd >= 0) { 1909 if (fdalloc(p, 0, &indx) == 0) { 1910 error = dupfdopen(fdp, indx, lp->lwp_dupfd, flags, error); 1911 if (error == 0) { 1912 *res = indx; 1913 fdrop(fp); /* our ref */ 1914 return (0); 1915 } 1916 fsetfd(fdp, NULL, indx); 1917 } 1918 } 1919 fdrop(fp); /* our ref */ 1920 if (error == ERESTART) 1921 error = EINTR; 1922 return (error); 1923 } 1924 1925 /* 1926 * ref the vnode for ourselves so it can't be ripped out from under 1927 * is. XXX need an ND flag to request that the vnode be returned 1928 * anyway. 1929 * 1930 * Reserve a file descriptor but do not assign it until the open 1931 * succeeds. 1932 */ 1933 vp = (struct vnode *)fp->f_data; 1934 vref(vp); 1935 if ((error = fdalloc(p, 0, &indx)) != 0) { 1936 fdrop(fp); 1937 vrele(vp); 1938 return (error); 1939 } 1940 1941 /* 1942 * If no error occurs the vp will have been assigned to the file 1943 * pointer. 1944 */ 1945 lp->lwp_dupfd = 0; 1946 1947 if (flags & (O_EXLOCK | O_SHLOCK)) { 1948 lf.l_whence = SEEK_SET; 1949 lf.l_start = 0; 1950 lf.l_len = 0; 1951 if (flags & O_EXLOCK) 1952 lf.l_type = F_WRLCK; 1953 else 1954 lf.l_type = F_RDLCK; 1955 if (flags & FNONBLOCK) 1956 type = 0; 1957 else 1958 type = F_WAIT; 1959 1960 if ((error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type)) != 0) { 1961 /* 1962 * lock request failed. Clean up the reserved 1963 * descriptor. 1964 */ 1965 vrele(vp); 1966 fsetfd(fdp, NULL, indx); 1967 fdrop(fp); 1968 return (error); 1969 } 1970 fp->f_flag |= FHASLOCK; 1971 } 1972 #if 0 1973 /* 1974 * Assert that all regular file vnodes were created with a object. 1975 */ 1976 KASSERT(vp->v_type != VREG || vp->v_object != NULL, 1977 ("open: regular file has no backing object after vn_open")); 1978 #endif 1979 1980 vrele(vp); 1981 1982 /* 1983 * release our private reference, leaving the one associated with the 1984 * descriptor table intact. 1985 */ 1986 fsetfd(fdp, fp, indx); 1987 fdrop(fp); 1988 *res = indx; 1989 if (oflags & O_CLOEXEC) 1990 error = fsetfdflags(fdp, *res, UF_EXCLOSE); 1991 return (error); 1992 } 1993 1994 /* 1995 * open_args(char *path, int flags, int mode) 1996 * 1997 * Check permissions, allocate an open file structure, 1998 * and call the device open routine if any. 1999 */ 2000 int 2001 sys_open(struct open_args *uap) 2002 { 2003 struct nlookupdata nd; 2004 int error; 2005 2006 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 2007 if (error == 0) { 2008 error = kern_open(&nd, uap->flags, 2009 uap->mode, &uap->sysmsg_result); 2010 } 2011 nlookup_done(&nd); 2012 return (error); 2013 } 2014 2015 /* 2016 * openat_args(int fd, char *path, int flags, int mode) 2017 */ 2018 int 2019 sys_openat(struct openat_args *uap) 2020 { 2021 struct nlookupdata nd; 2022 int error; 2023 struct file *fp; 2024 2025 error = nlookup_init_at(&nd, &fp, uap->fd, uap->path, UIO_USERSPACE, 0); 2026 if (error == 0) { 2027 error = kern_open(&nd, uap->flags, uap->mode, 2028 &uap->sysmsg_result); 2029 } 2030 nlookup_done_at(&nd, fp); 2031 return (error); 2032 } 2033 2034 int 2035 kern_mknod(struct nlookupdata *nd, int mode, int rmajor, int rminor) 2036 { 2037 struct thread *td = curthread; 2038 struct proc *p = td->td_proc; 2039 struct vnode *vp; 2040 struct vattr vattr; 2041 int error; 2042 int whiteout = 0; 2043 2044 KKASSERT(p); 2045 2046 VATTR_NULL(&vattr); 2047 vattr.va_mode = (mode & ALLPERMS) &~ p->p_fd->fd_cmask; 2048 vattr.va_rmajor = rmajor; 2049 vattr.va_rminor = rminor; 2050 2051 switch (mode & S_IFMT) { 2052 case S_IFMT: /* used by badsect to flag bad sectors */ 2053 error = priv_check_cred(td->td_ucred, PRIV_VFS_MKNOD_BAD, 0); 2054 vattr.va_type = VBAD; 2055 break; 2056 case S_IFCHR: 2057 error = priv_check(td, PRIV_VFS_MKNOD_DEV); 2058 vattr.va_type = VCHR; 2059 break; 2060 case S_IFBLK: 2061 error = priv_check(td, PRIV_VFS_MKNOD_DEV); 2062 vattr.va_type = VBLK; 2063 break; 2064 case S_IFWHT: 2065 error = priv_check_cred(td->td_ucred, PRIV_VFS_MKNOD_WHT, 0); 2066 whiteout = 1; 2067 break; 2068 case S_IFDIR: /* special directories support for HAMMER */ 2069 error = priv_check_cred(td->td_ucred, PRIV_VFS_MKNOD_DIR, 0); 2070 vattr.va_type = VDIR; 2071 break; 2072 default: 2073 error = EINVAL; 2074 break; 2075 } 2076 2077 if (error) 2078 return (error); 2079 2080 bwillinode(1); 2081 nd->nl_flags |= NLC_CREATE | NLC_REFDVP; 2082 if ((error = nlookup(nd)) != 0) 2083 return (error); 2084 if (nd->nl_nch.ncp->nc_vp) 2085 return (EEXIST); 2086 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 2087 return (error); 2088 2089 if (whiteout) { 2090 error = VOP_NWHITEOUT(&nd->nl_nch, nd->nl_dvp, 2091 nd->nl_cred, NAMEI_CREATE); 2092 } else { 2093 vp = NULL; 2094 error = VOP_NMKNOD(&nd->nl_nch, nd->nl_dvp, 2095 &vp, nd->nl_cred, &vattr); 2096 if (error == 0) 2097 vput(vp); 2098 } 2099 return (error); 2100 } 2101 2102 /* 2103 * mknod_args(char *path, int mode, int dev) 2104 * 2105 * Create a special file. 2106 */ 2107 int 2108 sys_mknod(struct mknod_args *uap) 2109 { 2110 struct nlookupdata nd; 2111 int error; 2112 2113 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 2114 if (error == 0) { 2115 error = kern_mknod(&nd, uap->mode, 2116 umajor(uap->dev), uminor(uap->dev)); 2117 } 2118 nlookup_done(&nd); 2119 return (error); 2120 } 2121 2122 /* 2123 * mknodat_args(int fd, char *path, mode_t mode, dev_t dev) 2124 * 2125 * Create a special file. The path is relative to the directory associated 2126 * with fd. 2127 */ 2128 int 2129 sys_mknodat(struct mknodat_args *uap) 2130 { 2131 struct nlookupdata nd; 2132 struct file *fp; 2133 int error; 2134 2135 error = nlookup_init_at(&nd, &fp, uap->fd, uap->path, UIO_USERSPACE, 0); 2136 if (error == 0) { 2137 error = kern_mknod(&nd, uap->mode, 2138 umajor(uap->dev), uminor(uap->dev)); 2139 } 2140 nlookup_done_at(&nd, fp); 2141 return (error); 2142 } 2143 2144 int 2145 kern_mkfifo(struct nlookupdata *nd, int mode) 2146 { 2147 struct thread *td = curthread; 2148 struct proc *p = td->td_proc; 2149 struct vattr vattr; 2150 struct vnode *vp; 2151 int error; 2152 2153 bwillinode(1); 2154 2155 nd->nl_flags |= NLC_CREATE | NLC_REFDVP; 2156 if ((error = nlookup(nd)) != 0) 2157 return (error); 2158 if (nd->nl_nch.ncp->nc_vp) 2159 return (EEXIST); 2160 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 2161 return (error); 2162 2163 VATTR_NULL(&vattr); 2164 vattr.va_type = VFIFO; 2165 vattr.va_mode = (mode & ALLPERMS) &~ p->p_fd->fd_cmask; 2166 vp = NULL; 2167 error = VOP_NMKNOD(&nd->nl_nch, nd->nl_dvp, &vp, nd->nl_cred, &vattr); 2168 if (error == 0) 2169 vput(vp); 2170 return (error); 2171 } 2172 2173 /* 2174 * mkfifo_args(char *path, int mode) 2175 * 2176 * Create a named pipe. 2177 */ 2178 int 2179 sys_mkfifo(struct mkfifo_args *uap) 2180 { 2181 struct nlookupdata nd; 2182 int error; 2183 2184 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 2185 if (error == 0) 2186 error = kern_mkfifo(&nd, uap->mode); 2187 nlookup_done(&nd); 2188 return (error); 2189 } 2190 2191 /* 2192 * mkfifoat_args(int fd, char *path, mode_t mode) 2193 * 2194 * Create a named pipe. The path is relative to the directory associated 2195 * with fd. 2196 */ 2197 int 2198 sys_mkfifoat(struct mkfifoat_args *uap) 2199 { 2200 struct nlookupdata nd; 2201 struct file *fp; 2202 int error; 2203 2204 error = nlookup_init_at(&nd, &fp, uap->fd, uap->path, UIO_USERSPACE, 0); 2205 if (error == 0) 2206 error = kern_mkfifo(&nd, uap->mode); 2207 nlookup_done_at(&nd, fp); 2208 return (error); 2209 } 2210 2211 static int hardlink_check_uid = 0; 2212 SYSCTL_INT(_security, OID_AUTO, hardlink_check_uid, CTLFLAG_RW, 2213 &hardlink_check_uid, 0, 2214 "Unprivileged processes cannot create hard links to files owned by other " 2215 "users"); 2216 static int hardlink_check_gid = 0; 2217 SYSCTL_INT(_security, OID_AUTO, hardlink_check_gid, CTLFLAG_RW, 2218 &hardlink_check_gid, 0, 2219 "Unprivileged processes cannot create hard links to files owned by other " 2220 "groups"); 2221 2222 static int 2223 can_hardlink(struct vnode *vp, struct thread *td, struct ucred *cred) 2224 { 2225 struct vattr va; 2226 int error; 2227 2228 /* 2229 * Shortcut if disabled 2230 */ 2231 if (hardlink_check_uid == 0 && hardlink_check_gid == 0) 2232 return (0); 2233 2234 /* 2235 * Privileged user can always hardlink 2236 */ 2237 if (priv_check_cred(cred, PRIV_VFS_LINK, 0) == 0) 2238 return (0); 2239 2240 /* 2241 * Otherwise only if the originating file is owned by the 2242 * same user or group. Note that any group is allowed if 2243 * the file is owned by the caller. 2244 */ 2245 error = VOP_GETATTR(vp, &va); 2246 if (error != 0) 2247 return (error); 2248 2249 if (hardlink_check_uid) { 2250 if (cred->cr_uid != va.va_uid) 2251 return (EPERM); 2252 } 2253 2254 if (hardlink_check_gid) { 2255 if (cred->cr_uid != va.va_uid && !groupmember(va.va_gid, cred)) 2256 return (EPERM); 2257 } 2258 2259 return (0); 2260 } 2261 2262 int 2263 kern_link(struct nlookupdata *nd, struct nlookupdata *linknd) 2264 { 2265 struct thread *td = curthread; 2266 struct vnode *vp; 2267 int error; 2268 2269 /* 2270 * Lookup the source and obtained a locked vnode. 2271 * 2272 * You may only hardlink a file which you have write permission 2273 * on or which you own. 2274 * 2275 * XXX relookup on vget failure / race ? 2276 */ 2277 bwillinode(1); 2278 nd->nl_flags |= NLC_WRITE | NLC_OWN | NLC_HLINK; 2279 if ((error = nlookup(nd)) != 0) 2280 return (error); 2281 vp = nd->nl_nch.ncp->nc_vp; 2282 KKASSERT(vp != NULL); 2283 if (vp->v_type == VDIR) 2284 return (EPERM); /* POSIX */ 2285 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 2286 return (error); 2287 if ((error = vget(vp, LK_EXCLUSIVE)) != 0) 2288 return (error); 2289 2290 /* 2291 * Unlock the source so we can lookup the target without deadlocking 2292 * (XXX vp is locked already, possible other deadlock?). The target 2293 * must not exist. 2294 */ 2295 KKASSERT(nd->nl_flags & NLC_NCPISLOCKED); 2296 nd->nl_flags &= ~NLC_NCPISLOCKED; 2297 cache_unlock(&nd->nl_nch); 2298 vn_unlock(vp); 2299 2300 linknd->nl_flags |= NLC_CREATE | NLC_REFDVP; 2301 if ((error = nlookup(linknd)) != 0) { 2302 vrele(vp); 2303 return (error); 2304 } 2305 if (linknd->nl_nch.ncp->nc_vp) { 2306 vrele(vp); 2307 return (EEXIST); 2308 } 2309 if ((error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY)) != 0) { 2310 vrele(vp); 2311 return (error); 2312 } 2313 2314 /* 2315 * Finally run the new API VOP. 2316 */ 2317 error = can_hardlink(vp, td, td->td_ucred); 2318 if (error == 0) { 2319 error = VOP_NLINK(&linknd->nl_nch, linknd->nl_dvp, 2320 vp, linknd->nl_cred); 2321 } 2322 vput(vp); 2323 return (error); 2324 } 2325 2326 /* 2327 * link_args(char *path, char *link) 2328 * 2329 * Make a hard file link. 2330 */ 2331 int 2332 sys_link(struct link_args *uap) 2333 { 2334 struct nlookupdata nd, linknd; 2335 int error; 2336 2337 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 2338 if (error == 0) { 2339 error = nlookup_init(&linknd, uap->link, UIO_USERSPACE, 0); 2340 if (error == 0) 2341 error = kern_link(&nd, &linknd); 2342 nlookup_done(&linknd); 2343 } 2344 nlookup_done(&nd); 2345 return (error); 2346 } 2347 2348 /* 2349 * linkat_args(int fd1, char *path1, int fd2, char *path2, int flags) 2350 * 2351 * Make a hard file link. The path1 argument is relative to the directory 2352 * associated with fd1, and similarly the path2 argument is relative to 2353 * the directory associated with fd2. 2354 */ 2355 int 2356 sys_linkat(struct linkat_args *uap) 2357 { 2358 struct nlookupdata nd, linknd; 2359 struct file *fp1, *fp2; 2360 int error; 2361 2362 error = nlookup_init_at(&nd, &fp1, uap->fd1, uap->path1, UIO_USERSPACE, 2363 (uap->flags & AT_SYMLINK_FOLLOW) ? NLC_FOLLOW : 0); 2364 if (error == 0) { 2365 error = nlookup_init_at(&linknd, &fp2, uap->fd2, 2366 uap->path2, UIO_USERSPACE, 0); 2367 if (error == 0) 2368 error = kern_link(&nd, &linknd); 2369 nlookup_done_at(&linknd, fp2); 2370 } 2371 nlookup_done_at(&nd, fp1); 2372 return (error); 2373 } 2374 2375 int 2376 kern_symlink(struct nlookupdata *nd, char *path, int mode) 2377 { 2378 struct vattr vattr; 2379 struct vnode *vp; 2380 struct vnode *dvp; 2381 int error; 2382 2383 bwillinode(1); 2384 nd->nl_flags |= NLC_CREATE | NLC_REFDVP; 2385 if ((error = nlookup(nd)) != 0) 2386 return (error); 2387 if (nd->nl_nch.ncp->nc_vp) 2388 return (EEXIST); 2389 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 2390 return (error); 2391 dvp = nd->nl_dvp; 2392 VATTR_NULL(&vattr); 2393 vattr.va_mode = mode; 2394 error = VOP_NSYMLINK(&nd->nl_nch, dvp, &vp, nd->nl_cred, &vattr, path); 2395 if (error == 0) 2396 vput(vp); 2397 return (error); 2398 } 2399 2400 /* 2401 * symlink(char *path, char *link) 2402 * 2403 * Make a symbolic link. 2404 */ 2405 int 2406 sys_symlink(struct symlink_args *uap) 2407 { 2408 struct thread *td = curthread; 2409 struct nlookupdata nd; 2410 char *path; 2411 int error; 2412 int mode; 2413 2414 path = objcache_get(namei_oc, M_WAITOK); 2415 error = copyinstr(uap->path, path, MAXPATHLEN, NULL); 2416 if (error == 0) { 2417 error = nlookup_init(&nd, uap->link, UIO_USERSPACE, 0); 2418 if (error == 0) { 2419 mode = ACCESSPERMS & ~td->td_proc->p_fd->fd_cmask; 2420 error = kern_symlink(&nd, path, mode); 2421 } 2422 nlookup_done(&nd); 2423 } 2424 objcache_put(namei_oc, path); 2425 return (error); 2426 } 2427 2428 /* 2429 * symlinkat_args(char *path1, int fd, char *path2) 2430 * 2431 * Make a symbolic link. The path2 argument is relative to the directory 2432 * associated with fd. 2433 */ 2434 int 2435 sys_symlinkat(struct symlinkat_args *uap) 2436 { 2437 struct thread *td = curthread; 2438 struct nlookupdata nd; 2439 struct file *fp; 2440 char *path1; 2441 int error; 2442 int mode; 2443 2444 path1 = objcache_get(namei_oc, M_WAITOK); 2445 error = copyinstr(uap->path1, path1, MAXPATHLEN, NULL); 2446 if (error == 0) { 2447 error = nlookup_init_at(&nd, &fp, uap->fd, uap->path2, 2448 UIO_USERSPACE, 0); 2449 if (error == 0) { 2450 mode = ACCESSPERMS & ~td->td_proc->p_fd->fd_cmask; 2451 error = kern_symlink(&nd, path1, mode); 2452 } 2453 nlookup_done_at(&nd, fp); 2454 } 2455 objcache_put(namei_oc, path1); 2456 return (error); 2457 } 2458 2459 /* 2460 * undelete_args(char *path) 2461 * 2462 * Delete a whiteout from the filesystem. 2463 */ 2464 int 2465 sys_undelete(struct undelete_args *uap) 2466 { 2467 struct nlookupdata nd; 2468 int error; 2469 2470 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 2471 bwillinode(1); 2472 nd.nl_flags |= NLC_DELETE | NLC_REFDVP; 2473 if (error == 0) 2474 error = nlookup(&nd); 2475 if (error == 0) 2476 error = ncp_writechk(&nd.nl_nch); 2477 if (error == 0) { 2478 error = VOP_NWHITEOUT(&nd.nl_nch, nd.nl_dvp, nd.nl_cred, 2479 NAMEI_DELETE); 2480 } 2481 nlookup_done(&nd); 2482 return (error); 2483 } 2484 2485 int 2486 kern_unlink(struct nlookupdata *nd) 2487 { 2488 int error; 2489 2490 bwillinode(1); 2491 nd->nl_flags |= NLC_DELETE | NLC_REFDVP; 2492 if ((error = nlookup(nd)) != 0) 2493 return (error); 2494 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 2495 return (error); 2496 error = VOP_NREMOVE(&nd->nl_nch, nd->nl_dvp, nd->nl_cred); 2497 return (error); 2498 } 2499 2500 /* 2501 * unlink_args(char *path) 2502 * 2503 * Delete a name from the filesystem. 2504 */ 2505 int 2506 sys_unlink(struct unlink_args *uap) 2507 { 2508 struct nlookupdata nd; 2509 int error; 2510 2511 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 2512 if (error == 0) 2513 error = kern_unlink(&nd); 2514 nlookup_done(&nd); 2515 return (error); 2516 } 2517 2518 2519 /* 2520 * unlinkat_args(int fd, char *path, int flags) 2521 * 2522 * Delete the file or directory entry pointed to by fd/path. 2523 */ 2524 int 2525 sys_unlinkat(struct unlinkat_args *uap) 2526 { 2527 struct nlookupdata nd; 2528 struct file *fp; 2529 int error; 2530 2531 if (uap->flags & ~AT_REMOVEDIR) 2532 return (EINVAL); 2533 2534 error = nlookup_init_at(&nd, &fp, uap->fd, uap->path, UIO_USERSPACE, 0); 2535 if (error == 0) { 2536 if (uap->flags & AT_REMOVEDIR) 2537 error = kern_rmdir(&nd); 2538 else 2539 error = kern_unlink(&nd); 2540 } 2541 nlookup_done_at(&nd, fp); 2542 return (error); 2543 } 2544 2545 int 2546 kern_lseek(int fd, off_t offset, int whence, off_t *res) 2547 { 2548 struct thread *td = curthread; 2549 struct proc *p = td->td_proc; 2550 struct file *fp; 2551 struct vnode *vp; 2552 struct vattr vattr; 2553 off_t new_offset; 2554 int error; 2555 2556 fp = holdfp(p->p_fd, fd, -1); 2557 if (fp == NULL) 2558 return (EBADF); 2559 if (fp->f_type != DTYPE_VNODE) { 2560 error = ESPIPE; 2561 goto done; 2562 } 2563 vp = (struct vnode *)fp->f_data; 2564 2565 switch (whence) { 2566 case L_INCR: 2567 spin_lock(&fp->f_spin); 2568 new_offset = fp->f_offset + offset; 2569 error = 0; 2570 break; 2571 case L_XTND: 2572 error = VOP_GETATTR(vp, &vattr); 2573 spin_lock(&fp->f_spin); 2574 new_offset = offset + vattr.va_size; 2575 break; 2576 case L_SET: 2577 new_offset = offset; 2578 error = 0; 2579 spin_lock(&fp->f_spin); 2580 break; 2581 default: 2582 new_offset = 0; 2583 error = EINVAL; 2584 spin_lock(&fp->f_spin); 2585 break; 2586 } 2587 2588 /* 2589 * Validate the seek position. Negative offsets are not allowed 2590 * for regular files or directories. 2591 * 2592 * Normally we would also not want to allow negative offsets for 2593 * character and block-special devices. However kvm addresses 2594 * on 64 bit architectures might appear to be negative and must 2595 * be allowed. 2596 */ 2597 if (error == 0) { 2598 if (new_offset < 0 && 2599 (vp->v_type == VREG || vp->v_type == VDIR)) { 2600 error = EINVAL; 2601 } else { 2602 fp->f_offset = new_offset; 2603 } 2604 } 2605 *res = fp->f_offset; 2606 spin_unlock(&fp->f_spin); 2607 done: 2608 fdrop(fp); 2609 return (error); 2610 } 2611 2612 /* 2613 * lseek_args(int fd, int pad, off_t offset, int whence) 2614 * 2615 * Reposition read/write file offset. 2616 */ 2617 int 2618 sys_lseek(struct lseek_args *uap) 2619 { 2620 int error; 2621 2622 error = kern_lseek(uap->fd, uap->offset, uap->whence, 2623 &uap->sysmsg_offset); 2624 2625 return (error); 2626 } 2627 2628 /* 2629 * Check if current process can access given file. amode is a bitmask of *_OK 2630 * access bits. flags is a bitmask of AT_* flags. 2631 */ 2632 int 2633 kern_access(struct nlookupdata *nd, int amode, int flags) 2634 { 2635 struct vnode *vp; 2636 int error, mode; 2637 2638 if (flags & ~AT_EACCESS) 2639 return (EINVAL); 2640 if ((error = nlookup(nd)) != 0) 2641 return (error); 2642 retry: 2643 error = cache_vget(&nd->nl_nch, nd->nl_cred, LK_EXCLUSIVE, &vp); 2644 if (error) 2645 return (error); 2646 2647 /* Flags == 0 means only check for existence. */ 2648 if (amode) { 2649 mode = 0; 2650 if (amode & R_OK) 2651 mode |= VREAD; 2652 if (amode & W_OK) 2653 mode |= VWRITE; 2654 if (amode & X_OK) 2655 mode |= VEXEC; 2656 if ((mode & VWRITE) == 0 || 2657 (error = vn_writechk(vp, &nd->nl_nch)) == 0) 2658 error = VOP_ACCESS_FLAGS(vp, mode, flags, nd->nl_cred); 2659 2660 /* 2661 * If the file handle is stale we have to re-resolve the 2662 * entry. This is a hack at the moment. 2663 */ 2664 if (error == ESTALE) { 2665 vput(vp); 2666 cache_setunresolved(&nd->nl_nch); 2667 error = cache_resolve(&nd->nl_nch, nd->nl_cred); 2668 if (error == 0) { 2669 vp = NULL; 2670 goto retry; 2671 } 2672 return(error); 2673 } 2674 } 2675 vput(vp); 2676 return (error); 2677 } 2678 2679 /* 2680 * access_args(char *path, int flags) 2681 * 2682 * Check access permissions. 2683 */ 2684 int 2685 sys_access(struct access_args *uap) 2686 { 2687 struct nlookupdata nd; 2688 int error; 2689 2690 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 2691 if (error == 0) 2692 error = kern_access(&nd, uap->flags, 0); 2693 nlookup_done(&nd); 2694 return (error); 2695 } 2696 2697 2698 /* 2699 * eaccess_args(char *path, int flags) 2700 * 2701 * Check access permissions. 2702 */ 2703 int 2704 sys_eaccess(struct eaccess_args *uap) 2705 { 2706 struct nlookupdata nd; 2707 int error; 2708 2709 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 2710 if (error == 0) 2711 error = kern_access(&nd, uap->flags, AT_EACCESS); 2712 nlookup_done(&nd); 2713 return (error); 2714 } 2715 2716 2717 /* 2718 * faccessat_args(int fd, char *path, int amode, int flags) 2719 * 2720 * Check access permissions. 2721 */ 2722 int 2723 sys_faccessat(struct faccessat_args *uap) 2724 { 2725 struct nlookupdata nd; 2726 struct file *fp; 2727 int error; 2728 2729 error = nlookup_init_at(&nd, &fp, uap->fd, uap->path, UIO_USERSPACE, 2730 NLC_FOLLOW); 2731 if (error == 0) 2732 error = kern_access(&nd, uap->amode, uap->flags); 2733 nlookup_done_at(&nd, fp); 2734 return (error); 2735 } 2736 2737 2738 int 2739 kern_stat(struct nlookupdata *nd, struct stat *st) 2740 { 2741 int error; 2742 struct vnode *vp; 2743 2744 if ((error = nlookup(nd)) != 0) 2745 return (error); 2746 again: 2747 if ((vp = nd->nl_nch.ncp->nc_vp) == NULL) 2748 return (ENOENT); 2749 2750 if ((error = vget(vp, LK_SHARED)) != 0) 2751 return (error); 2752 error = vn_stat(vp, st, nd->nl_cred); 2753 2754 /* 2755 * If the file handle is stale we have to re-resolve the entry. This 2756 * is a hack at the moment. 2757 */ 2758 if (error == ESTALE) { 2759 vput(vp); 2760 cache_setunresolved(&nd->nl_nch); 2761 error = cache_resolve(&nd->nl_nch, nd->nl_cred); 2762 if (error == 0) 2763 goto again; 2764 } else { 2765 vput(vp); 2766 } 2767 return (error); 2768 } 2769 2770 /* 2771 * stat_args(char *path, struct stat *ub) 2772 * 2773 * Get file status; this version follows links. 2774 */ 2775 int 2776 sys_stat(struct stat_args *uap) 2777 { 2778 struct nlookupdata nd; 2779 struct stat st; 2780 int error; 2781 2782 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 2783 if (error == 0) { 2784 error = kern_stat(&nd, &st); 2785 if (error == 0) 2786 error = copyout(&st, uap->ub, sizeof(*uap->ub)); 2787 } 2788 nlookup_done(&nd); 2789 return (error); 2790 } 2791 2792 /* 2793 * lstat_args(char *path, struct stat *ub) 2794 * 2795 * Get file status; this version does not follow links. 2796 */ 2797 int 2798 sys_lstat(struct lstat_args *uap) 2799 { 2800 struct nlookupdata nd; 2801 struct stat st; 2802 int error; 2803 2804 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 2805 if (error == 0) { 2806 error = kern_stat(&nd, &st); 2807 if (error == 0) 2808 error = copyout(&st, uap->ub, sizeof(*uap->ub)); 2809 } 2810 nlookup_done(&nd); 2811 return (error); 2812 } 2813 2814 /* 2815 * fstatat_args(int fd, char *path, struct stat *sb, int flags) 2816 * 2817 * Get status of file pointed to by fd/path. 2818 */ 2819 int 2820 sys_fstatat(struct fstatat_args *uap) 2821 { 2822 struct nlookupdata nd; 2823 struct stat st; 2824 int error; 2825 int flags; 2826 struct file *fp; 2827 2828 if (uap->flags & ~AT_SYMLINK_NOFOLLOW) 2829 return (EINVAL); 2830 2831 flags = (uap->flags & AT_SYMLINK_NOFOLLOW) ? 0 : NLC_FOLLOW; 2832 2833 error = nlookup_init_at(&nd, &fp, uap->fd, uap->path, 2834 UIO_USERSPACE, flags); 2835 if (error == 0) { 2836 error = kern_stat(&nd, &st); 2837 if (error == 0) 2838 error = copyout(&st, uap->sb, sizeof(*uap->sb)); 2839 } 2840 nlookup_done_at(&nd, fp); 2841 return (error); 2842 } 2843 2844 /* 2845 * pathconf_Args(char *path, int name) 2846 * 2847 * Get configurable pathname variables. 2848 */ 2849 int 2850 sys_pathconf(struct pathconf_args *uap) 2851 { 2852 struct nlookupdata nd; 2853 struct vnode *vp; 2854 int error; 2855 2856 vp = NULL; 2857 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 2858 if (error == 0) 2859 error = nlookup(&nd); 2860 if (error == 0) 2861 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp); 2862 nlookup_done(&nd); 2863 if (error == 0) { 2864 error = VOP_PATHCONF(vp, uap->name, &uap->sysmsg_reg); 2865 vput(vp); 2866 } 2867 return (error); 2868 } 2869 2870 /* 2871 * XXX: daver 2872 * kern_readlink isn't properly split yet. There is a copyin burried 2873 * in VOP_READLINK(). 2874 */ 2875 int 2876 kern_readlink(struct nlookupdata *nd, char *buf, int count, int *res) 2877 { 2878 struct thread *td = curthread; 2879 struct vnode *vp; 2880 struct iovec aiov; 2881 struct uio auio; 2882 int error; 2883 2884 if ((error = nlookup(nd)) != 0) 2885 return (error); 2886 error = cache_vget(&nd->nl_nch, nd->nl_cred, LK_EXCLUSIVE, &vp); 2887 if (error) 2888 return (error); 2889 if (vp->v_type != VLNK) { 2890 error = EINVAL; 2891 } else { 2892 aiov.iov_base = buf; 2893 aiov.iov_len = count; 2894 auio.uio_iov = &aiov; 2895 auio.uio_iovcnt = 1; 2896 auio.uio_offset = 0; 2897 auio.uio_rw = UIO_READ; 2898 auio.uio_segflg = UIO_USERSPACE; 2899 auio.uio_td = td; 2900 auio.uio_resid = count; 2901 error = VOP_READLINK(vp, &auio, td->td_ucred); 2902 } 2903 vput(vp); 2904 *res = count - auio.uio_resid; 2905 return (error); 2906 } 2907 2908 /* 2909 * readlink_args(char *path, char *buf, int count) 2910 * 2911 * Return target name of a symbolic link. 2912 */ 2913 int 2914 sys_readlink(struct readlink_args *uap) 2915 { 2916 struct nlookupdata nd; 2917 int error; 2918 2919 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 2920 if (error == 0) { 2921 error = kern_readlink(&nd, uap->buf, uap->count, 2922 &uap->sysmsg_result); 2923 } 2924 nlookup_done(&nd); 2925 return (error); 2926 } 2927 2928 /* 2929 * readlinkat_args(int fd, char *path, char *buf, size_t bufsize) 2930 * 2931 * Return target name of a symbolic link. The path is relative to the 2932 * directory associated with fd. 2933 */ 2934 int 2935 sys_readlinkat(struct readlinkat_args *uap) 2936 { 2937 struct nlookupdata nd; 2938 struct file *fp; 2939 int error; 2940 2941 error = nlookup_init_at(&nd, &fp, uap->fd, uap->path, UIO_USERSPACE, 0); 2942 if (error == 0) { 2943 error = kern_readlink(&nd, uap->buf, uap->bufsize, 2944 &uap->sysmsg_result); 2945 } 2946 nlookup_done_at(&nd, fp); 2947 return (error); 2948 } 2949 2950 static int 2951 setfflags(struct vnode *vp, int flags) 2952 { 2953 struct thread *td = curthread; 2954 int error; 2955 struct vattr vattr; 2956 2957 /* 2958 * Prevent non-root users from setting flags on devices. When 2959 * a device is reused, users can retain ownership of the device 2960 * if they are allowed to set flags and programs assume that 2961 * chown can't fail when done as root. 2962 */ 2963 if ((vp->v_type == VCHR || vp->v_type == VBLK) && 2964 ((error = priv_check_cred(td->td_ucred, PRIV_VFS_CHFLAGS_DEV, 0)) != 0)) 2965 return (error); 2966 2967 /* 2968 * note: vget is required for any operation that might mod the vnode 2969 * so VINACTIVE is properly cleared. 2970 */ 2971 if ((error = vget(vp, LK_EXCLUSIVE)) == 0) { 2972 VATTR_NULL(&vattr); 2973 vattr.va_flags = flags; 2974 error = VOP_SETATTR(vp, &vattr, td->td_ucred); 2975 vput(vp); 2976 } 2977 return (error); 2978 } 2979 2980 /* 2981 * chflags(char *path, int flags) 2982 * 2983 * Change flags of a file given a path name. 2984 */ 2985 int 2986 sys_chflags(struct chflags_args *uap) 2987 { 2988 struct nlookupdata nd; 2989 struct vnode *vp; 2990 int error; 2991 2992 vp = NULL; 2993 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 2994 if (error == 0) 2995 error = nlookup(&nd); 2996 if (error == 0) 2997 error = ncp_writechk(&nd.nl_nch); 2998 if (error == 0) 2999 error = cache_vref(&nd.nl_nch, nd.nl_cred, &vp); 3000 nlookup_done(&nd); 3001 if (error == 0) { 3002 error = setfflags(vp, uap->flags); 3003 vrele(vp); 3004 } 3005 return (error); 3006 } 3007 3008 /* 3009 * lchflags(char *path, int flags) 3010 * 3011 * Change flags of a file given a path name, but don't follow symlinks. 3012 */ 3013 int 3014 sys_lchflags(struct lchflags_args *uap) 3015 { 3016 struct nlookupdata nd; 3017 struct vnode *vp; 3018 int error; 3019 3020 vp = NULL; 3021 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 3022 if (error == 0) 3023 error = nlookup(&nd); 3024 if (error == 0) 3025 error = ncp_writechk(&nd.nl_nch); 3026 if (error == 0) 3027 error = cache_vref(&nd.nl_nch, nd.nl_cred, &vp); 3028 nlookup_done(&nd); 3029 if (error == 0) { 3030 error = setfflags(vp, uap->flags); 3031 vrele(vp); 3032 } 3033 return (error); 3034 } 3035 3036 /* 3037 * fchflags_args(int fd, int flags) 3038 * 3039 * Change flags of a file given a file descriptor. 3040 */ 3041 int 3042 sys_fchflags(struct fchflags_args *uap) 3043 { 3044 struct thread *td = curthread; 3045 struct proc *p = td->td_proc; 3046 struct file *fp; 3047 int error; 3048 3049 if ((error = holdvnode(p->p_fd, uap->fd, &fp)) != 0) 3050 return (error); 3051 if (fp->f_nchandle.ncp) 3052 error = ncp_writechk(&fp->f_nchandle); 3053 if (error == 0) 3054 error = setfflags((struct vnode *) fp->f_data, uap->flags); 3055 fdrop(fp); 3056 return (error); 3057 } 3058 3059 static int 3060 setfmode(struct vnode *vp, int mode) 3061 { 3062 struct thread *td = curthread; 3063 int error; 3064 struct vattr vattr; 3065 3066 /* 3067 * note: vget is required for any operation that might mod the vnode 3068 * so VINACTIVE is properly cleared. 3069 */ 3070 if ((error = vget(vp, LK_EXCLUSIVE)) == 0) { 3071 VATTR_NULL(&vattr); 3072 vattr.va_mode = mode & ALLPERMS; 3073 error = VOP_SETATTR(vp, &vattr, td->td_ucred); 3074 vput(vp); 3075 } 3076 return error; 3077 } 3078 3079 int 3080 kern_chmod(struct nlookupdata *nd, int mode) 3081 { 3082 struct vnode *vp; 3083 int error; 3084 3085 if ((error = nlookup(nd)) != 0) 3086 return (error); 3087 if ((error = cache_vref(&nd->nl_nch, nd->nl_cred, &vp)) != 0) 3088 return (error); 3089 if ((error = ncp_writechk(&nd->nl_nch)) == 0) 3090 error = setfmode(vp, mode); 3091 vrele(vp); 3092 return (error); 3093 } 3094 3095 /* 3096 * chmod_args(char *path, int mode) 3097 * 3098 * Change mode of a file given path name. 3099 */ 3100 int 3101 sys_chmod(struct chmod_args *uap) 3102 { 3103 struct nlookupdata nd; 3104 int error; 3105 3106 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 3107 if (error == 0) 3108 error = kern_chmod(&nd, uap->mode); 3109 nlookup_done(&nd); 3110 return (error); 3111 } 3112 3113 /* 3114 * lchmod_args(char *path, int mode) 3115 * 3116 * Change mode of a file given path name (don't follow links.) 3117 */ 3118 int 3119 sys_lchmod(struct lchmod_args *uap) 3120 { 3121 struct nlookupdata nd; 3122 int error; 3123 3124 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 3125 if (error == 0) 3126 error = kern_chmod(&nd, uap->mode); 3127 nlookup_done(&nd); 3128 return (error); 3129 } 3130 3131 /* 3132 * fchmod_args(int fd, int mode) 3133 * 3134 * Change mode of a file given a file descriptor. 3135 */ 3136 int 3137 sys_fchmod(struct fchmod_args *uap) 3138 { 3139 struct thread *td = curthread; 3140 struct proc *p = td->td_proc; 3141 struct file *fp; 3142 int error; 3143 3144 if ((error = holdvnode(p->p_fd, uap->fd, &fp)) != 0) 3145 return (error); 3146 if (fp->f_nchandle.ncp) 3147 error = ncp_writechk(&fp->f_nchandle); 3148 if (error == 0) 3149 error = setfmode((struct vnode *)fp->f_data, uap->mode); 3150 fdrop(fp); 3151 return (error); 3152 } 3153 3154 /* 3155 * fchmodat_args(char *path, int mode) 3156 * 3157 * Change mode of a file pointed to by fd/path. 3158 */ 3159 int 3160 sys_fchmodat(struct fchmodat_args *uap) 3161 { 3162 struct nlookupdata nd; 3163 struct file *fp; 3164 int error; 3165 int flags; 3166 3167 if (uap->flags & ~AT_SYMLINK_NOFOLLOW) 3168 return (EINVAL); 3169 flags = (uap->flags & AT_SYMLINK_NOFOLLOW) ? 0 : NLC_FOLLOW; 3170 3171 error = nlookup_init_at(&nd, &fp, uap->fd, uap->path, 3172 UIO_USERSPACE, flags); 3173 if (error == 0) 3174 error = kern_chmod(&nd, uap->mode); 3175 nlookup_done_at(&nd, fp); 3176 return (error); 3177 } 3178 3179 static int 3180 setfown(struct mount *mp, struct vnode *vp, uid_t uid, gid_t gid) 3181 { 3182 struct thread *td = curthread; 3183 int error; 3184 struct vattr vattr; 3185 uid_t o_uid; 3186 gid_t o_gid; 3187 uint64_t size; 3188 3189 /* 3190 * note: vget is required for any operation that might mod the vnode 3191 * so VINACTIVE is properly cleared. 3192 */ 3193 if ((error = vget(vp, LK_EXCLUSIVE)) == 0) { 3194 if ((error = VOP_GETATTR(vp, &vattr)) != 0) 3195 return error; 3196 o_uid = vattr.va_uid; 3197 o_gid = vattr.va_gid; 3198 size = vattr.va_size; 3199 3200 VATTR_NULL(&vattr); 3201 vattr.va_uid = uid; 3202 vattr.va_gid = gid; 3203 error = VOP_SETATTR(vp, &vattr, td->td_ucred); 3204 vput(vp); 3205 } 3206 3207 if (error == 0) { 3208 if (uid == -1) 3209 uid = o_uid; 3210 if (gid == -1) 3211 gid = o_gid; 3212 VFS_ACCOUNT(mp, o_uid, o_gid, -size); 3213 VFS_ACCOUNT(mp, uid, gid, size); 3214 } 3215 3216 return error; 3217 } 3218 3219 int 3220 kern_chown(struct nlookupdata *nd, int uid, int gid) 3221 { 3222 struct vnode *vp; 3223 int error; 3224 3225 if ((error = nlookup(nd)) != 0) 3226 return (error); 3227 if ((error = cache_vref(&nd->nl_nch, nd->nl_cred, &vp)) != 0) 3228 return (error); 3229 if ((error = ncp_writechk(&nd->nl_nch)) == 0) 3230 error = setfown(nd->nl_nch.mount, vp, uid, gid); 3231 vrele(vp); 3232 return (error); 3233 } 3234 3235 /* 3236 * chown(char *path, int uid, int gid) 3237 * 3238 * Set ownership given a path name. 3239 */ 3240 int 3241 sys_chown(struct chown_args *uap) 3242 { 3243 struct nlookupdata nd; 3244 int error; 3245 3246 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 3247 if (error == 0) 3248 error = kern_chown(&nd, uap->uid, uap->gid); 3249 nlookup_done(&nd); 3250 return (error); 3251 } 3252 3253 /* 3254 * lchown_args(char *path, int uid, int gid) 3255 * 3256 * Set ownership given a path name, do not cross symlinks. 3257 */ 3258 int 3259 sys_lchown(struct lchown_args *uap) 3260 { 3261 struct nlookupdata nd; 3262 int error; 3263 3264 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 3265 if (error == 0) 3266 error = kern_chown(&nd, uap->uid, uap->gid); 3267 nlookup_done(&nd); 3268 return (error); 3269 } 3270 3271 /* 3272 * fchown_args(int fd, int uid, int gid) 3273 * 3274 * Set ownership given a file descriptor. 3275 */ 3276 int 3277 sys_fchown(struct fchown_args *uap) 3278 { 3279 struct thread *td = curthread; 3280 struct proc *p = td->td_proc; 3281 struct file *fp; 3282 int error; 3283 3284 if ((error = holdvnode(p->p_fd, uap->fd, &fp)) != 0) 3285 return (error); 3286 if (fp->f_nchandle.ncp) 3287 error = ncp_writechk(&fp->f_nchandle); 3288 if (error == 0) 3289 error = setfown(p->p_fd->fd_ncdir.mount, 3290 (struct vnode *)fp->f_data, uap->uid, uap->gid); 3291 fdrop(fp); 3292 return (error); 3293 } 3294 3295 /* 3296 * fchownat(int fd, char *path, int uid, int gid, int flags) 3297 * 3298 * Set ownership of file pointed to by fd/path. 3299 */ 3300 int 3301 sys_fchownat(struct fchownat_args *uap) 3302 { 3303 struct nlookupdata nd; 3304 struct file *fp; 3305 int error; 3306 int flags; 3307 3308 if (uap->flags & ~AT_SYMLINK_NOFOLLOW) 3309 return (EINVAL); 3310 flags = (uap->flags & AT_SYMLINK_NOFOLLOW) ? 0 : NLC_FOLLOW; 3311 3312 error = nlookup_init_at(&nd, &fp, uap->fd, uap->path, 3313 UIO_USERSPACE, flags); 3314 if (error == 0) 3315 error = kern_chown(&nd, uap->uid, uap->gid); 3316 nlookup_done_at(&nd, fp); 3317 return (error); 3318 } 3319 3320 3321 static int 3322 getutimes(const struct timeval *tvp, struct timespec *tsp) 3323 { 3324 struct timeval tv[2]; 3325 3326 if (tvp == NULL) { 3327 microtime(&tv[0]); 3328 TIMEVAL_TO_TIMESPEC(&tv[0], &tsp[0]); 3329 tsp[1] = tsp[0]; 3330 } else { 3331 TIMEVAL_TO_TIMESPEC(&tvp[0], &tsp[0]); 3332 TIMEVAL_TO_TIMESPEC(&tvp[1], &tsp[1]); 3333 } 3334 return 0; 3335 } 3336 3337 static int 3338 setutimes(struct vnode *vp, struct vattr *vattr, 3339 const struct timespec *ts, int nullflag) 3340 { 3341 struct thread *td = curthread; 3342 int error; 3343 3344 VATTR_NULL(vattr); 3345 vattr->va_atime = ts[0]; 3346 vattr->va_mtime = ts[1]; 3347 if (nullflag) 3348 vattr->va_vaflags |= VA_UTIMES_NULL; 3349 error = VOP_SETATTR(vp, vattr, td->td_ucred); 3350 3351 return error; 3352 } 3353 3354 int 3355 kern_utimes(struct nlookupdata *nd, struct timeval *tptr) 3356 { 3357 struct timespec ts[2]; 3358 struct vnode *vp; 3359 struct vattr vattr; 3360 int error; 3361 3362 if ((error = getutimes(tptr, ts)) != 0) 3363 return (error); 3364 3365 /* 3366 * NOTE: utimes() succeeds for the owner even if the file 3367 * is not user-writable. 3368 */ 3369 nd->nl_flags |= NLC_OWN | NLC_WRITE; 3370 3371 if ((error = nlookup(nd)) != 0) 3372 return (error); 3373 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 3374 return (error); 3375 if ((error = cache_vref(&nd->nl_nch, nd->nl_cred, &vp)) != 0) 3376 return (error); 3377 3378 /* 3379 * note: vget is required for any operation that might mod the vnode 3380 * so VINACTIVE is properly cleared. 3381 */ 3382 if ((error = vn_writechk(vp, &nd->nl_nch)) == 0) { 3383 error = vget(vp, LK_EXCLUSIVE); 3384 if (error == 0) { 3385 error = setutimes(vp, &vattr, ts, (tptr == NULL)); 3386 vput(vp); 3387 } 3388 } 3389 vrele(vp); 3390 return (error); 3391 } 3392 3393 /* 3394 * utimes_args(char *path, struct timeval *tptr) 3395 * 3396 * Set the access and modification times of a file. 3397 */ 3398 int 3399 sys_utimes(struct utimes_args *uap) 3400 { 3401 struct timeval tv[2]; 3402 struct nlookupdata nd; 3403 int error; 3404 3405 if (uap->tptr) { 3406 error = copyin(uap->tptr, tv, sizeof(tv)); 3407 if (error) 3408 return (error); 3409 } 3410 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 3411 if (error == 0) 3412 error = kern_utimes(&nd, uap->tptr ? tv : NULL); 3413 nlookup_done(&nd); 3414 return (error); 3415 } 3416 3417 /* 3418 * lutimes_args(char *path, struct timeval *tptr) 3419 * 3420 * Set the access and modification times of a file. 3421 */ 3422 int 3423 sys_lutimes(struct lutimes_args *uap) 3424 { 3425 struct timeval tv[2]; 3426 struct nlookupdata nd; 3427 int error; 3428 3429 if (uap->tptr) { 3430 error = copyin(uap->tptr, tv, sizeof(tv)); 3431 if (error) 3432 return (error); 3433 } 3434 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 3435 if (error == 0) 3436 error = kern_utimes(&nd, uap->tptr ? tv : NULL); 3437 nlookup_done(&nd); 3438 return (error); 3439 } 3440 3441 /* 3442 * Set utimes on a file descriptor. The creds used to open the 3443 * file are used to determine whether the operation is allowed 3444 * or not. 3445 */ 3446 int 3447 kern_futimes(int fd, struct timeval *tptr) 3448 { 3449 struct thread *td = curthread; 3450 struct proc *p = td->td_proc; 3451 struct timespec ts[2]; 3452 struct file *fp; 3453 struct vnode *vp; 3454 struct vattr vattr; 3455 int error; 3456 3457 error = getutimes(tptr, ts); 3458 if (error) 3459 return (error); 3460 if ((error = holdvnode(p->p_fd, fd, &fp)) != 0) 3461 return (error); 3462 if (fp->f_nchandle.ncp) 3463 error = ncp_writechk(&fp->f_nchandle); 3464 if (error == 0) { 3465 vp = fp->f_data; 3466 error = vget(vp, LK_EXCLUSIVE); 3467 if (error == 0) { 3468 error = VOP_GETATTR(vp, &vattr); 3469 if (error == 0) { 3470 error = naccess_va(&vattr, NLC_OWN | NLC_WRITE, 3471 fp->f_cred); 3472 } 3473 if (error == 0) { 3474 error = setutimes(vp, &vattr, ts, 3475 (tptr == NULL)); 3476 } 3477 vput(vp); 3478 } 3479 } 3480 fdrop(fp); 3481 return (error); 3482 } 3483 3484 /* 3485 * futimes_args(int fd, struct timeval *tptr) 3486 * 3487 * Set the access and modification times of a file. 3488 */ 3489 int 3490 sys_futimes(struct futimes_args *uap) 3491 { 3492 struct timeval tv[2]; 3493 int error; 3494 3495 if (uap->tptr) { 3496 error = copyin(uap->tptr, tv, sizeof(tv)); 3497 if (error) 3498 return (error); 3499 } 3500 error = kern_futimes(uap->fd, uap->tptr ? tv : NULL); 3501 3502 return (error); 3503 } 3504 3505 int 3506 kern_truncate(struct nlookupdata *nd, off_t length) 3507 { 3508 struct vnode *vp; 3509 struct vattr vattr; 3510 int error; 3511 uid_t uid = 0; 3512 gid_t gid = 0; 3513 uint64_t old_size = 0; 3514 3515 if (length < 0) 3516 return(EINVAL); 3517 nd->nl_flags |= NLC_WRITE | NLC_TRUNCATE; 3518 if ((error = nlookup(nd)) != 0) 3519 return (error); 3520 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 3521 return (error); 3522 if ((error = cache_vref(&nd->nl_nch, nd->nl_cred, &vp)) != 0) 3523 return (error); 3524 if ((error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY)) != 0) { 3525 vrele(vp); 3526 return (error); 3527 } 3528 if (vp->v_type == VDIR) { 3529 error = EISDIR; 3530 goto done; 3531 } 3532 if (vfs_quota_enabled) { 3533 error = VOP_GETATTR(vp, &vattr); 3534 KASSERT(error == 0, ("kern_truncate(): VOP_GETATTR didn't return 0")); 3535 uid = vattr.va_uid; 3536 gid = vattr.va_gid; 3537 old_size = vattr.va_size; 3538 } 3539 3540 if ((error = vn_writechk(vp, &nd->nl_nch)) == 0) { 3541 VATTR_NULL(&vattr); 3542 vattr.va_size = length; 3543 error = VOP_SETATTR(vp, &vattr, nd->nl_cred); 3544 VFS_ACCOUNT(nd->nl_nch.mount, uid, gid, length - old_size); 3545 } 3546 done: 3547 vput(vp); 3548 return (error); 3549 } 3550 3551 /* 3552 * truncate(char *path, int pad, off_t length) 3553 * 3554 * Truncate a file given its path name. 3555 */ 3556 int 3557 sys_truncate(struct truncate_args *uap) 3558 { 3559 struct nlookupdata nd; 3560 int error; 3561 3562 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 3563 if (error == 0) 3564 error = kern_truncate(&nd, uap->length); 3565 nlookup_done(&nd); 3566 return error; 3567 } 3568 3569 int 3570 kern_ftruncate(int fd, off_t length) 3571 { 3572 struct thread *td = curthread; 3573 struct proc *p = td->td_proc; 3574 struct vattr vattr; 3575 struct vnode *vp; 3576 struct file *fp; 3577 int error; 3578 uid_t uid = 0; 3579 gid_t gid = 0; 3580 uint64_t old_size = 0; 3581 struct mount *mp; 3582 3583 if (length < 0) 3584 return(EINVAL); 3585 if ((error = holdvnode(p->p_fd, fd, &fp)) != 0) 3586 return (error); 3587 if (fp->f_nchandle.ncp) { 3588 error = ncp_writechk(&fp->f_nchandle); 3589 if (error) 3590 goto done; 3591 } 3592 if ((fp->f_flag & FWRITE) == 0) { 3593 error = EINVAL; 3594 goto done; 3595 } 3596 if (fp->f_flag & FAPPENDONLY) { /* inode was set s/uapnd */ 3597 error = EINVAL; 3598 goto done; 3599 } 3600 vp = (struct vnode *)fp->f_data; 3601 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 3602 if (vp->v_type == VDIR) { 3603 error = EISDIR; 3604 goto done; 3605 } 3606 3607 if (vfs_quota_enabled) { 3608 error = VOP_GETATTR(vp, &vattr); 3609 KASSERT(error == 0, ("kern_ftruncate(): VOP_GETATTR didn't return 0")); 3610 uid = vattr.va_uid; 3611 gid = vattr.va_gid; 3612 old_size = vattr.va_size; 3613 } 3614 3615 if ((error = vn_writechk(vp, NULL)) == 0) { 3616 VATTR_NULL(&vattr); 3617 vattr.va_size = length; 3618 error = VOP_SETATTR(vp, &vattr, fp->f_cred); 3619 mp = vq_vptomp(vp); 3620 VFS_ACCOUNT(mp, uid, gid, length - old_size); 3621 } 3622 vn_unlock(vp); 3623 done: 3624 fdrop(fp); 3625 return (error); 3626 } 3627 3628 /* 3629 * ftruncate_args(int fd, int pad, off_t length) 3630 * 3631 * Truncate a file given a file descriptor. 3632 */ 3633 int 3634 sys_ftruncate(struct ftruncate_args *uap) 3635 { 3636 int error; 3637 3638 error = kern_ftruncate(uap->fd, uap->length); 3639 3640 return (error); 3641 } 3642 3643 /* 3644 * fsync(int fd) 3645 * 3646 * Sync an open file. 3647 */ 3648 int 3649 sys_fsync(struct fsync_args *uap) 3650 { 3651 struct thread *td = curthread; 3652 struct proc *p = td->td_proc; 3653 struct vnode *vp; 3654 struct file *fp; 3655 vm_object_t obj; 3656 int error; 3657 3658 if ((error = holdvnode(p->p_fd, uap->fd, &fp)) != 0) 3659 return (error); 3660 vp = (struct vnode *)fp->f_data; 3661 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 3662 if ((obj = vp->v_object) != NULL) { 3663 if (vp->v_mount == NULL || 3664 (vp->v_mount->mnt_kern_flag & MNTK_NOMSYNC) == 0) { 3665 vm_object_page_clean(obj, 0, 0, 0); 3666 } 3667 } 3668 error = VOP_FSYNC(vp, MNT_WAIT, VOP_FSYNC_SYSCALL); 3669 if (error == 0 && vp->v_mount) 3670 error = buf_fsync(vp); 3671 vn_unlock(vp); 3672 fdrop(fp); 3673 3674 return (error); 3675 } 3676 3677 int 3678 kern_rename(struct nlookupdata *fromnd, struct nlookupdata *tond) 3679 { 3680 struct nchandle fnchd; 3681 struct nchandle tnchd; 3682 struct namecache *ncp; 3683 struct vnode *fdvp; 3684 struct vnode *tdvp; 3685 struct mount *mp; 3686 int error; 3687 3688 bwillinode(1); 3689 fromnd->nl_flags |= NLC_REFDVP | NLC_RENAME_SRC; 3690 if ((error = nlookup(fromnd)) != 0) 3691 return (error); 3692 if ((fnchd.ncp = fromnd->nl_nch.ncp->nc_parent) == NULL) 3693 return (ENOENT); 3694 fnchd.mount = fromnd->nl_nch.mount; 3695 cache_hold(&fnchd); 3696 3697 /* 3698 * unlock the source nch so we can lookup the target nch without 3699 * deadlocking. The target may or may not exist so we do not check 3700 * for a target vp like kern_mkdir() and other creation functions do. 3701 * 3702 * The source and target directories are ref'd and rechecked after 3703 * everything is relocked to determine if the source or target file 3704 * has been renamed. 3705 */ 3706 KKASSERT(fromnd->nl_flags & NLC_NCPISLOCKED); 3707 fromnd->nl_flags &= ~NLC_NCPISLOCKED; 3708 cache_unlock(&fromnd->nl_nch); 3709 3710 tond->nl_flags |= NLC_RENAME_DST | NLC_REFDVP; 3711 if ((error = nlookup(tond)) != 0) { 3712 cache_drop(&fnchd); 3713 return (error); 3714 } 3715 if ((tnchd.ncp = tond->nl_nch.ncp->nc_parent) == NULL) { 3716 cache_drop(&fnchd); 3717 return (ENOENT); 3718 } 3719 tnchd.mount = tond->nl_nch.mount; 3720 cache_hold(&tnchd); 3721 3722 /* 3723 * If the source and target are the same there is nothing to do 3724 */ 3725 if (fromnd->nl_nch.ncp == tond->nl_nch.ncp) { 3726 cache_drop(&fnchd); 3727 cache_drop(&tnchd); 3728 return (0); 3729 } 3730 3731 /* 3732 * Mount points cannot be renamed or overwritten 3733 */ 3734 if ((fromnd->nl_nch.ncp->nc_flag | tond->nl_nch.ncp->nc_flag) & 3735 NCF_ISMOUNTPT 3736 ) { 3737 cache_drop(&fnchd); 3738 cache_drop(&tnchd); 3739 return (EINVAL); 3740 } 3741 3742 /* 3743 * Relock the source ncp. cache_relock() will deal with any 3744 * deadlocks against the already-locked tond and will also 3745 * make sure both are resolved. 3746 * 3747 * NOTE AFTER RELOCKING: The source or target ncp may have become 3748 * invalid while they were unlocked, nc_vp and nc_mount could 3749 * be NULL. 3750 */ 3751 cache_relock(&fromnd->nl_nch, fromnd->nl_cred, 3752 &tond->nl_nch, tond->nl_cred); 3753 fromnd->nl_flags |= NLC_NCPISLOCKED; 3754 3755 /* 3756 * If either fromnd or tond are marked destroyed a ripout occured 3757 * out from under us and we must retry. 3758 */ 3759 if ((fromnd->nl_nch.ncp->nc_flag & (NCF_DESTROYED | NCF_UNRESOLVED)) || 3760 fromnd->nl_nch.ncp->nc_vp == NULL || 3761 (tond->nl_nch.ncp->nc_flag & NCF_DESTROYED)) { 3762 kprintf("kern_rename: retry due to ripout on: " 3763 "\"%s\" -> \"%s\"\n", 3764 fromnd->nl_nch.ncp->nc_name, 3765 tond->nl_nch.ncp->nc_name); 3766 cache_drop(&fnchd); 3767 cache_drop(&tnchd); 3768 return (EAGAIN); 3769 } 3770 3771 /* 3772 * make sure the parent directories linkages are the same 3773 */ 3774 if (fnchd.ncp != fromnd->nl_nch.ncp->nc_parent || 3775 tnchd.ncp != tond->nl_nch.ncp->nc_parent) { 3776 cache_drop(&fnchd); 3777 cache_drop(&tnchd); 3778 return (ENOENT); 3779 } 3780 3781 /* 3782 * Both the source and target must be within the same filesystem and 3783 * in the same filesystem as their parent directories within the 3784 * namecache topology. 3785 * 3786 * NOTE: fromnd's nc_mount or nc_vp could be NULL. 3787 */ 3788 mp = fnchd.mount; 3789 if (mp != tnchd.mount || mp != fromnd->nl_nch.mount || 3790 mp != tond->nl_nch.mount) { 3791 cache_drop(&fnchd); 3792 cache_drop(&tnchd); 3793 return (EXDEV); 3794 } 3795 3796 /* 3797 * Make sure the mount point is writable 3798 */ 3799 if ((error = ncp_writechk(&tond->nl_nch)) != 0) { 3800 cache_drop(&fnchd); 3801 cache_drop(&tnchd); 3802 return (error); 3803 } 3804 3805 /* 3806 * If the target exists and either the source or target is a directory, 3807 * then both must be directories. 3808 * 3809 * Due to relocking of the source, fromnd->nl_nch.ncp->nc_vp might h 3810 * have become NULL. 3811 */ 3812 if (tond->nl_nch.ncp->nc_vp) { 3813 if (fromnd->nl_nch.ncp->nc_vp == NULL) { 3814 error = ENOENT; 3815 } else if (fromnd->nl_nch.ncp->nc_vp->v_type == VDIR) { 3816 if (tond->nl_nch.ncp->nc_vp->v_type != VDIR) 3817 error = ENOTDIR; 3818 } else if (tond->nl_nch.ncp->nc_vp->v_type == VDIR) { 3819 error = EISDIR; 3820 } 3821 } 3822 3823 /* 3824 * You cannot rename a source into itself or a subdirectory of itself. 3825 * We check this by travsersing the target directory upwards looking 3826 * for a match against the source. 3827 * 3828 * XXX MPSAFE 3829 */ 3830 if (error == 0) { 3831 for (ncp = tnchd.ncp; ncp; ncp = ncp->nc_parent) { 3832 if (fromnd->nl_nch.ncp == ncp) { 3833 error = EINVAL; 3834 break; 3835 } 3836 } 3837 } 3838 3839 cache_drop(&fnchd); 3840 cache_drop(&tnchd); 3841 3842 /* 3843 * Even though the namespaces are different, they may still represent 3844 * hardlinks to the same file. The filesystem might have a hard time 3845 * with this so we issue a NREMOVE of the source instead of a NRENAME 3846 * when we detect the situation. 3847 */ 3848 if (error == 0) { 3849 fdvp = fromnd->nl_dvp; 3850 tdvp = tond->nl_dvp; 3851 if (fdvp == NULL || tdvp == NULL) { 3852 error = EPERM; 3853 } else if (fromnd->nl_nch.ncp->nc_vp == tond->nl_nch.ncp->nc_vp) { 3854 error = VOP_NREMOVE(&fromnd->nl_nch, fdvp, 3855 fromnd->nl_cred); 3856 } else { 3857 error = VOP_NRENAME(&fromnd->nl_nch, &tond->nl_nch, 3858 fdvp, tdvp, tond->nl_cred); 3859 } 3860 } 3861 return (error); 3862 } 3863 3864 /* 3865 * rename_args(char *from, char *to) 3866 * 3867 * Rename files. Source and destination must either both be directories, 3868 * or both not be directories. If target is a directory, it must be empty. 3869 */ 3870 int 3871 sys_rename(struct rename_args *uap) 3872 { 3873 struct nlookupdata fromnd, tond; 3874 int error; 3875 3876 do { 3877 error = nlookup_init(&fromnd, uap->from, UIO_USERSPACE, 0); 3878 if (error == 0) { 3879 error = nlookup_init(&tond, uap->to, UIO_USERSPACE, 0); 3880 if (error == 0) 3881 error = kern_rename(&fromnd, &tond); 3882 nlookup_done(&tond); 3883 } 3884 nlookup_done(&fromnd); 3885 } while (error == EAGAIN); 3886 return (error); 3887 } 3888 3889 /* 3890 * renameat_args(int oldfd, char *old, int newfd, char *new) 3891 * 3892 * Rename files using paths relative to the directories associated with 3893 * oldfd and newfd. Source and destination must either both be directories, 3894 * or both not be directories. If target is a directory, it must be empty. 3895 */ 3896 int 3897 sys_renameat(struct renameat_args *uap) 3898 { 3899 struct nlookupdata oldnd, newnd; 3900 struct file *oldfp, *newfp; 3901 int error; 3902 3903 do { 3904 error = nlookup_init_at(&oldnd, &oldfp, 3905 uap->oldfd, uap->old, 3906 UIO_USERSPACE, 0); 3907 if (error == 0) { 3908 error = nlookup_init_at(&newnd, &newfp, 3909 uap->newfd, uap->new, 3910 UIO_USERSPACE, 0); 3911 if (error == 0) 3912 error = kern_rename(&oldnd, &newnd); 3913 nlookup_done_at(&newnd, newfp); 3914 } 3915 nlookup_done_at(&oldnd, oldfp); 3916 } while (error == EAGAIN); 3917 return (error); 3918 } 3919 3920 int 3921 kern_mkdir(struct nlookupdata *nd, int mode) 3922 { 3923 struct thread *td = curthread; 3924 struct proc *p = td->td_proc; 3925 struct vnode *vp; 3926 struct vattr vattr; 3927 int error; 3928 3929 bwillinode(1); 3930 nd->nl_flags |= NLC_WILLBEDIR | NLC_CREATE | NLC_REFDVP; 3931 if ((error = nlookup(nd)) != 0) 3932 return (error); 3933 3934 if (nd->nl_nch.ncp->nc_vp) 3935 return (EEXIST); 3936 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 3937 return (error); 3938 VATTR_NULL(&vattr); 3939 vattr.va_type = VDIR; 3940 vattr.va_mode = (mode & ACCESSPERMS) &~ p->p_fd->fd_cmask; 3941 3942 vp = NULL; 3943 error = VOP_NMKDIR(&nd->nl_nch, nd->nl_dvp, &vp, td->td_ucred, &vattr); 3944 if (error == 0) 3945 vput(vp); 3946 return (error); 3947 } 3948 3949 /* 3950 * mkdir_args(char *path, int mode) 3951 * 3952 * Make a directory file. 3953 */ 3954 int 3955 sys_mkdir(struct mkdir_args *uap) 3956 { 3957 struct nlookupdata nd; 3958 int error; 3959 3960 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 3961 if (error == 0) 3962 error = kern_mkdir(&nd, uap->mode); 3963 nlookup_done(&nd); 3964 return (error); 3965 } 3966 3967 /* 3968 * mkdirat_args(int fd, char *path, mode_t mode) 3969 * 3970 * Make a directory file. The path is relative to the directory associated 3971 * with fd. 3972 */ 3973 int 3974 sys_mkdirat(struct mkdirat_args *uap) 3975 { 3976 struct nlookupdata nd; 3977 struct file *fp; 3978 int error; 3979 3980 error = nlookup_init_at(&nd, &fp, uap->fd, uap->path, UIO_USERSPACE, 0); 3981 if (error == 0) 3982 error = kern_mkdir(&nd, uap->mode); 3983 nlookup_done_at(&nd, fp); 3984 return (error); 3985 } 3986 3987 int 3988 kern_rmdir(struct nlookupdata *nd) 3989 { 3990 int error; 3991 3992 bwillinode(1); 3993 nd->nl_flags |= NLC_DELETE | NLC_REFDVP; 3994 if ((error = nlookup(nd)) != 0) 3995 return (error); 3996 3997 /* 3998 * Do not allow directories representing mount points to be 3999 * deleted, even if empty. Check write perms on mount point 4000 * in case the vnode is aliased (aka nullfs). 4001 */ 4002 if (nd->nl_nch.ncp->nc_flag & (NCF_ISMOUNTPT)) 4003 return (EINVAL); 4004 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 4005 return (error); 4006 error = VOP_NRMDIR(&nd->nl_nch, nd->nl_dvp, nd->nl_cred); 4007 return (error); 4008 } 4009 4010 /* 4011 * rmdir_args(char *path) 4012 * 4013 * Remove a directory file. 4014 */ 4015 int 4016 sys_rmdir(struct rmdir_args *uap) 4017 { 4018 struct nlookupdata nd; 4019 int error; 4020 4021 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 4022 if (error == 0) 4023 error = kern_rmdir(&nd); 4024 nlookup_done(&nd); 4025 return (error); 4026 } 4027 4028 int 4029 kern_getdirentries(int fd, char *buf, u_int count, long *basep, int *res, 4030 enum uio_seg direction) 4031 { 4032 struct thread *td = curthread; 4033 struct proc *p = td->td_proc; 4034 struct vnode *vp; 4035 struct file *fp; 4036 struct uio auio; 4037 struct iovec aiov; 4038 off_t loff; 4039 int error, eofflag; 4040 4041 if ((error = holdvnode(p->p_fd, fd, &fp)) != 0) 4042 return (error); 4043 if ((fp->f_flag & FREAD) == 0) { 4044 error = EBADF; 4045 goto done; 4046 } 4047 vp = (struct vnode *)fp->f_data; 4048 unionread: 4049 if (vp->v_type != VDIR) { 4050 error = EINVAL; 4051 goto done; 4052 } 4053 aiov.iov_base = buf; 4054 aiov.iov_len = count; 4055 auio.uio_iov = &aiov; 4056 auio.uio_iovcnt = 1; 4057 auio.uio_rw = UIO_READ; 4058 auio.uio_segflg = direction; 4059 auio.uio_td = td; 4060 auio.uio_resid = count; 4061 loff = auio.uio_offset = fp->f_offset; 4062 error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, NULL, NULL); 4063 fp->f_offset = auio.uio_offset; 4064 if (error) 4065 goto done; 4066 if (count == auio.uio_resid) { 4067 if (union_dircheckp) { 4068 error = union_dircheckp(td, &vp, fp); 4069 if (error == -1) 4070 goto unionread; 4071 if (error) 4072 goto done; 4073 } 4074 #if 0 4075 if ((vp->v_flag & VROOT) && 4076 (vp->v_mount->mnt_flag & MNT_UNION)) { 4077 struct vnode *tvp = vp; 4078 vp = vp->v_mount->mnt_vnodecovered; 4079 vref(vp); 4080 fp->f_data = vp; 4081 fp->f_offset = 0; 4082 vrele(tvp); 4083 goto unionread; 4084 } 4085 #endif 4086 } 4087 4088 /* 4089 * WARNING! *basep may not be wide enough to accomodate the 4090 * seek offset. XXX should we hack this to return the upper 32 bits 4091 * for offsets greater then 4G? 4092 */ 4093 if (basep) { 4094 *basep = (long)loff; 4095 } 4096 *res = count - auio.uio_resid; 4097 done: 4098 fdrop(fp); 4099 return (error); 4100 } 4101 4102 /* 4103 * getdirentries_args(int fd, char *buf, u_int conut, long *basep) 4104 * 4105 * Read a block of directory entries in a file system independent format. 4106 */ 4107 int 4108 sys_getdirentries(struct getdirentries_args *uap) 4109 { 4110 long base; 4111 int error; 4112 4113 error = kern_getdirentries(uap->fd, uap->buf, uap->count, &base, 4114 &uap->sysmsg_result, UIO_USERSPACE); 4115 4116 if (error == 0 && uap->basep) 4117 error = copyout(&base, uap->basep, sizeof(*uap->basep)); 4118 return (error); 4119 } 4120 4121 /* 4122 * getdents_args(int fd, char *buf, size_t count) 4123 */ 4124 int 4125 sys_getdents(struct getdents_args *uap) 4126 { 4127 int error; 4128 4129 error = kern_getdirentries(uap->fd, uap->buf, uap->count, NULL, 4130 &uap->sysmsg_result, UIO_USERSPACE); 4131 4132 return (error); 4133 } 4134 4135 /* 4136 * Set the mode mask for creation of filesystem nodes. 4137 * 4138 * umask(int newmask) 4139 */ 4140 int 4141 sys_umask(struct umask_args *uap) 4142 { 4143 struct thread *td = curthread; 4144 struct proc *p = td->td_proc; 4145 struct filedesc *fdp; 4146 4147 fdp = p->p_fd; 4148 uap->sysmsg_result = fdp->fd_cmask; 4149 fdp->fd_cmask = uap->newmask & ALLPERMS; 4150 return (0); 4151 } 4152 4153 /* 4154 * revoke(char *path) 4155 * 4156 * Void all references to file by ripping underlying filesystem 4157 * away from vnode. 4158 */ 4159 int 4160 sys_revoke(struct revoke_args *uap) 4161 { 4162 struct nlookupdata nd; 4163 struct vattr vattr; 4164 struct vnode *vp; 4165 struct ucred *cred; 4166 int error; 4167 4168 vp = NULL; 4169 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 4170 if (error == 0) 4171 error = nlookup(&nd); 4172 if (error == 0) 4173 error = cache_vref(&nd.nl_nch, nd.nl_cred, &vp); 4174 cred = crhold(nd.nl_cred); 4175 nlookup_done(&nd); 4176 if (error == 0) { 4177 if (error == 0) 4178 error = VOP_GETATTR(vp, &vattr); 4179 if (error == 0 && cred->cr_uid != vattr.va_uid) 4180 error = priv_check_cred(cred, PRIV_VFS_REVOKE, 0); 4181 if (error == 0 && (vp->v_type == VCHR || vp->v_type == VBLK)) { 4182 if (vcount(vp) > 0) 4183 error = vrevoke(vp, cred); 4184 } else if (error == 0) { 4185 error = vrevoke(vp, cred); 4186 } 4187 vrele(vp); 4188 } 4189 if (cred) 4190 crfree(cred); 4191 return (error); 4192 } 4193 4194 /* 4195 * getfh_args(char *fname, fhandle_t *fhp) 4196 * 4197 * Get (NFS) file handle 4198 * 4199 * NOTE: We use the fsid of the covering mount, even if it is a nullfs 4200 * mount. This allows nullfs mounts to be explicitly exported. 4201 * 4202 * WARNING: nullfs mounts of HAMMER PFS ROOTs are safe. 4203 * 4204 * nullfs mounts of subdirectories are not safe. That is, it will 4205 * work, but you do not really have protection against access to 4206 * the related parent directories. 4207 */ 4208 int 4209 sys_getfh(struct getfh_args *uap) 4210 { 4211 struct thread *td = curthread; 4212 struct nlookupdata nd; 4213 fhandle_t fh; 4214 struct vnode *vp; 4215 struct mount *mp; 4216 int error; 4217 4218 /* 4219 * Must be super user 4220 */ 4221 if ((error = priv_check(td, PRIV_ROOT)) != 0) 4222 return (error); 4223 4224 vp = NULL; 4225 error = nlookup_init(&nd, uap->fname, UIO_USERSPACE, NLC_FOLLOW); 4226 if (error == 0) 4227 error = nlookup(&nd); 4228 if (error == 0) 4229 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp); 4230 mp = nd.nl_nch.mount; 4231 nlookup_done(&nd); 4232 if (error == 0) { 4233 bzero(&fh, sizeof(fh)); 4234 fh.fh_fsid = mp->mnt_stat.f_fsid; 4235 error = VFS_VPTOFH(vp, &fh.fh_fid); 4236 vput(vp); 4237 if (error == 0) 4238 error = copyout(&fh, uap->fhp, sizeof(fh)); 4239 } 4240 return (error); 4241 } 4242 4243 /* 4244 * fhopen_args(const struct fhandle *u_fhp, int flags) 4245 * 4246 * syscall for the rpc.lockd to use to translate a NFS file handle into 4247 * an open descriptor. 4248 * 4249 * warning: do not remove the priv_check() call or this becomes one giant 4250 * security hole. 4251 */ 4252 int 4253 sys_fhopen(struct fhopen_args *uap) 4254 { 4255 struct thread *td = curthread; 4256 struct filedesc *fdp = td->td_proc->p_fd; 4257 struct mount *mp; 4258 struct vnode *vp; 4259 struct fhandle fhp; 4260 struct vattr vat; 4261 struct vattr *vap = &vat; 4262 struct flock lf; 4263 int fmode, mode, error = 0, type; 4264 struct file *nfp; 4265 struct file *fp; 4266 int indx; 4267 4268 /* 4269 * Must be super user 4270 */ 4271 error = priv_check(td, PRIV_ROOT); 4272 if (error) 4273 return (error); 4274 4275 fmode = FFLAGS(uap->flags); 4276 4277 /* 4278 * Why not allow a non-read/write open for our lockd? 4279 */ 4280 if (((fmode & (FREAD | FWRITE)) == 0) || (fmode & O_CREAT)) 4281 return (EINVAL); 4282 error = copyin(uap->u_fhp, &fhp, sizeof(fhp)); 4283 if (error) 4284 return(error); 4285 4286 /* 4287 * Find the mount point 4288 */ 4289 mp = vfs_getvfs(&fhp.fh_fsid); 4290 if (mp == NULL) { 4291 error = ESTALE; 4292 goto done; 4293 } 4294 /* now give me my vnode, it gets returned to me locked */ 4295 error = VFS_FHTOVP(mp, NULL, &fhp.fh_fid, &vp); 4296 if (error) 4297 goto done; 4298 /* 4299 * from now on we have to make sure not 4300 * to forget about the vnode 4301 * any error that causes an abort must vput(vp) 4302 * just set error = err and 'goto bad;'. 4303 */ 4304 4305 /* 4306 * from vn_open 4307 */ 4308 if (vp->v_type == VLNK) { 4309 error = EMLINK; 4310 goto bad; 4311 } 4312 if (vp->v_type == VSOCK) { 4313 error = EOPNOTSUPP; 4314 goto bad; 4315 } 4316 mode = 0; 4317 if (fmode & (FWRITE | O_TRUNC)) { 4318 if (vp->v_type == VDIR) { 4319 error = EISDIR; 4320 goto bad; 4321 } 4322 error = vn_writechk(vp, NULL); 4323 if (error) 4324 goto bad; 4325 mode |= VWRITE; 4326 } 4327 if (fmode & FREAD) 4328 mode |= VREAD; 4329 if (mode) { 4330 error = VOP_ACCESS(vp, mode, td->td_ucred); 4331 if (error) 4332 goto bad; 4333 } 4334 if (fmode & O_TRUNC) { 4335 vn_unlock(vp); /* XXX */ 4336 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX */ 4337 VATTR_NULL(vap); 4338 vap->va_size = 0; 4339 error = VOP_SETATTR(vp, vap, td->td_ucred); 4340 if (error) 4341 goto bad; 4342 } 4343 4344 /* 4345 * VOP_OPEN needs the file pointer so it can potentially override 4346 * it. 4347 * 4348 * WARNING! no f_nchandle will be associated when fhopen()ing a 4349 * directory. XXX 4350 */ 4351 if ((error = falloc(td->td_lwp, &nfp, &indx)) != 0) 4352 goto bad; 4353 fp = nfp; 4354 4355 error = VOP_OPEN(vp, fmode, td->td_ucred, fp); 4356 if (error) { 4357 /* 4358 * setting f_ops this way prevents VOP_CLOSE from being 4359 * called or fdrop() releasing the vp from v_data. Since 4360 * the VOP_OPEN failed we don't want to VOP_CLOSE. 4361 */ 4362 fp->f_ops = &badfileops; 4363 fp->f_data = NULL; 4364 goto bad_drop; 4365 } 4366 4367 /* 4368 * The fp is given its own reference, we still have our ref and lock. 4369 * 4370 * Assert that all regular files must be created with a VM object. 4371 */ 4372 if (vp->v_type == VREG && vp->v_object == NULL) { 4373 kprintf("fhopen: regular file did not have VM object: %p\n", vp); 4374 goto bad_drop; 4375 } 4376 4377 /* 4378 * The open was successful. Handle any locking requirements. 4379 */ 4380 if (fmode & (O_EXLOCK | O_SHLOCK)) { 4381 lf.l_whence = SEEK_SET; 4382 lf.l_start = 0; 4383 lf.l_len = 0; 4384 if (fmode & O_EXLOCK) 4385 lf.l_type = F_WRLCK; 4386 else 4387 lf.l_type = F_RDLCK; 4388 if (fmode & FNONBLOCK) 4389 type = 0; 4390 else 4391 type = F_WAIT; 4392 vn_unlock(vp); 4393 if ((error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type)) != 0) { 4394 /* 4395 * release our private reference. 4396 */ 4397 fsetfd(fdp, NULL, indx); 4398 fdrop(fp); 4399 vrele(vp); 4400 goto done; 4401 } 4402 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4403 fp->f_flag |= FHASLOCK; 4404 } 4405 4406 /* 4407 * Clean up. Associate the file pointer with the previously 4408 * reserved descriptor and return it. 4409 */ 4410 vput(vp); 4411 fsetfd(fdp, fp, indx); 4412 fdrop(fp); 4413 uap->sysmsg_result = indx; 4414 if (uap->flags & O_CLOEXEC) 4415 error = fsetfdflags(fdp, indx, UF_EXCLOSE); 4416 return (error); 4417 4418 bad_drop: 4419 fsetfd(fdp, NULL, indx); 4420 fdrop(fp); 4421 bad: 4422 vput(vp); 4423 done: 4424 return (error); 4425 } 4426 4427 /* 4428 * fhstat_args(struct fhandle *u_fhp, struct stat *sb) 4429 */ 4430 int 4431 sys_fhstat(struct fhstat_args *uap) 4432 { 4433 struct thread *td = curthread; 4434 struct stat sb; 4435 fhandle_t fh; 4436 struct mount *mp; 4437 struct vnode *vp; 4438 int error; 4439 4440 /* 4441 * Must be super user 4442 */ 4443 error = priv_check(td, PRIV_ROOT); 4444 if (error) 4445 return (error); 4446 4447 error = copyin(uap->u_fhp, &fh, sizeof(fhandle_t)); 4448 if (error) 4449 return (error); 4450 4451 if ((mp = vfs_getvfs(&fh.fh_fsid)) == NULL) 4452 error = ESTALE; 4453 if (error == 0) { 4454 if ((error = VFS_FHTOVP(mp, NULL, &fh.fh_fid, &vp)) == 0) { 4455 error = vn_stat(vp, &sb, td->td_ucred); 4456 vput(vp); 4457 } 4458 } 4459 if (error == 0) 4460 error = copyout(&sb, uap->sb, sizeof(sb)); 4461 return (error); 4462 } 4463 4464 /* 4465 * fhstatfs_args(struct fhandle *u_fhp, struct statfs *buf) 4466 */ 4467 int 4468 sys_fhstatfs(struct fhstatfs_args *uap) 4469 { 4470 struct thread *td = curthread; 4471 struct proc *p = td->td_proc; 4472 struct statfs *sp; 4473 struct mount *mp; 4474 struct vnode *vp; 4475 struct statfs sb; 4476 char *fullpath, *freepath; 4477 fhandle_t fh; 4478 int error; 4479 4480 /* 4481 * Must be super user 4482 */ 4483 if ((error = priv_check(td, PRIV_ROOT))) 4484 return (error); 4485 4486 if ((error = copyin(uap->u_fhp, &fh, sizeof(fhandle_t))) != 0) 4487 return (error); 4488 4489 if ((mp = vfs_getvfs(&fh.fh_fsid)) == NULL) { 4490 error = ESTALE; 4491 goto done; 4492 } 4493 if (p != NULL && !chroot_visible_mnt(mp, p)) { 4494 error = ESTALE; 4495 goto done; 4496 } 4497 4498 if ((error = VFS_FHTOVP(mp, NULL, &fh.fh_fid, &vp)) != 0) 4499 goto done; 4500 mp = vp->v_mount; 4501 sp = &mp->mnt_stat; 4502 vput(vp); 4503 if ((error = VFS_STATFS(mp, sp, td->td_ucred)) != 0) 4504 goto done; 4505 4506 error = mount_path(p, mp, &fullpath, &freepath); 4507 if (error) 4508 goto done; 4509 bzero(sp->f_mntonname, sizeof(sp->f_mntonname)); 4510 strlcpy(sp->f_mntonname, fullpath, sizeof(sp->f_mntonname)); 4511 kfree(freepath, M_TEMP); 4512 4513 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK; 4514 if (priv_check(td, PRIV_ROOT)) { 4515 bcopy(sp, &sb, sizeof(sb)); 4516 sb.f_fsid.val[0] = sb.f_fsid.val[1] = 0; 4517 sp = &sb; 4518 } 4519 error = copyout(sp, uap->buf, sizeof(*sp)); 4520 done: 4521 return (error); 4522 } 4523 4524 /* 4525 * fhstatvfs_args(struct fhandle *u_fhp, struct statvfs *buf) 4526 */ 4527 int 4528 sys_fhstatvfs(struct fhstatvfs_args *uap) 4529 { 4530 struct thread *td = curthread; 4531 struct proc *p = td->td_proc; 4532 struct statvfs *sp; 4533 struct mount *mp; 4534 struct vnode *vp; 4535 fhandle_t fh; 4536 int error; 4537 4538 /* 4539 * Must be super user 4540 */ 4541 if ((error = priv_check(td, PRIV_ROOT))) 4542 return (error); 4543 4544 if ((error = copyin(uap->u_fhp, &fh, sizeof(fhandle_t))) != 0) 4545 return (error); 4546 4547 if ((mp = vfs_getvfs(&fh.fh_fsid)) == NULL) { 4548 error = ESTALE; 4549 goto done; 4550 } 4551 if (p != NULL && !chroot_visible_mnt(mp, p)) { 4552 error = ESTALE; 4553 goto done; 4554 } 4555 4556 if ((error = VFS_FHTOVP(mp, NULL, &fh.fh_fid, &vp))) 4557 goto done; 4558 mp = vp->v_mount; 4559 sp = &mp->mnt_vstat; 4560 vput(vp); 4561 if ((error = VFS_STATVFS(mp, sp, td->td_ucred)) != 0) 4562 goto done; 4563 4564 sp->f_flag = 0; 4565 if (mp->mnt_flag & MNT_RDONLY) 4566 sp->f_flag |= ST_RDONLY; 4567 if (mp->mnt_flag & MNT_NOSUID) 4568 sp->f_flag |= ST_NOSUID; 4569 error = copyout(sp, uap->buf, sizeof(*sp)); 4570 done: 4571 return (error); 4572 } 4573 4574 4575 /* 4576 * Syscall to push extended attribute configuration information into the 4577 * VFS. Accepts a path, which it converts to a mountpoint, as well as 4578 * a command (int cmd), and attribute name and misc data. For now, the 4579 * attribute name is left in userspace for consumption by the VFS_op. 4580 * It will probably be changed to be copied into sysspace by the 4581 * syscall in the future, once issues with various consumers of the 4582 * attribute code have raised their hands. 4583 * 4584 * Currently this is used only by UFS Extended Attributes. 4585 */ 4586 int 4587 sys_extattrctl(struct extattrctl_args *uap) 4588 { 4589 struct nlookupdata nd; 4590 struct vnode *vp; 4591 char attrname[EXTATTR_MAXNAMELEN]; 4592 int error; 4593 size_t size; 4594 4595 attrname[0] = 0; 4596 vp = NULL; 4597 error = 0; 4598 4599 if (error == 0 && uap->filename) { 4600 error = nlookup_init(&nd, uap->filename, UIO_USERSPACE, 4601 NLC_FOLLOW); 4602 if (error == 0) 4603 error = nlookup(&nd); 4604 if (error == 0) 4605 error = cache_vref(&nd.nl_nch, nd.nl_cred, &vp); 4606 nlookup_done(&nd); 4607 } 4608 4609 if (error == 0 && uap->attrname) { 4610 error = copyinstr(uap->attrname, attrname, EXTATTR_MAXNAMELEN, 4611 &size); 4612 } 4613 4614 if (error == 0) { 4615 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 4616 if (error == 0) 4617 error = nlookup(&nd); 4618 if (error == 0) 4619 error = ncp_writechk(&nd.nl_nch); 4620 if (error == 0) { 4621 error = VFS_EXTATTRCTL(nd.nl_nch.mount, uap->cmd, vp, 4622 uap->attrnamespace, 4623 uap->attrname, nd.nl_cred); 4624 } 4625 nlookup_done(&nd); 4626 } 4627 4628 return (error); 4629 } 4630 4631 /* 4632 * Syscall to get a named extended attribute on a file or directory. 4633 */ 4634 int 4635 sys_extattr_set_file(struct extattr_set_file_args *uap) 4636 { 4637 char attrname[EXTATTR_MAXNAMELEN]; 4638 struct nlookupdata nd; 4639 struct vnode *vp; 4640 struct uio auio; 4641 struct iovec aiov; 4642 int error; 4643 4644 error = copyin(uap->attrname, attrname, EXTATTR_MAXNAMELEN); 4645 if (error) 4646 return (error); 4647 4648 vp = NULL; 4649 4650 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 4651 if (error == 0) 4652 error = nlookup(&nd); 4653 if (error == 0) 4654 error = ncp_writechk(&nd.nl_nch); 4655 if (error == 0) 4656 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp); 4657 if (error) { 4658 nlookup_done(&nd); 4659 return (error); 4660 } 4661 4662 bzero(&auio, sizeof(auio)); 4663 aiov.iov_base = uap->data; 4664 aiov.iov_len = uap->nbytes; 4665 auio.uio_iov = &aiov; 4666 auio.uio_iovcnt = 1; 4667 auio.uio_offset = 0; 4668 auio.uio_resid = uap->nbytes; 4669 auio.uio_rw = UIO_WRITE; 4670 auio.uio_td = curthread; 4671 4672 error = VOP_SETEXTATTR(vp, uap->attrnamespace, attrname, 4673 &auio, nd.nl_cred); 4674 4675 vput(vp); 4676 nlookup_done(&nd); 4677 return (error); 4678 } 4679 4680 /* 4681 * Syscall to get a named extended attribute on a file or directory. 4682 */ 4683 int 4684 sys_extattr_get_file(struct extattr_get_file_args *uap) 4685 { 4686 char attrname[EXTATTR_MAXNAMELEN]; 4687 struct nlookupdata nd; 4688 struct uio auio; 4689 struct iovec aiov; 4690 struct vnode *vp; 4691 int error; 4692 4693 error = copyin(uap->attrname, attrname, EXTATTR_MAXNAMELEN); 4694 if (error) 4695 return (error); 4696 4697 vp = NULL; 4698 4699 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 4700 if (error == 0) 4701 error = nlookup(&nd); 4702 if (error == 0) 4703 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp); 4704 if (error) { 4705 nlookup_done(&nd); 4706 return (error); 4707 } 4708 4709 bzero(&auio, sizeof(auio)); 4710 aiov.iov_base = uap->data; 4711 aiov.iov_len = uap->nbytes; 4712 auio.uio_iov = &aiov; 4713 auio.uio_iovcnt = 1; 4714 auio.uio_offset = 0; 4715 auio.uio_resid = uap->nbytes; 4716 auio.uio_rw = UIO_READ; 4717 auio.uio_td = curthread; 4718 4719 error = VOP_GETEXTATTR(vp, uap->attrnamespace, attrname, 4720 &auio, nd.nl_cred); 4721 uap->sysmsg_result = uap->nbytes - auio.uio_resid; 4722 4723 vput(vp); 4724 nlookup_done(&nd); 4725 return(error); 4726 } 4727 4728 /* 4729 * Syscall to delete a named extended attribute from a file or directory. 4730 * Accepts attribute name. The real work happens in VOP_SETEXTATTR(). 4731 */ 4732 int 4733 sys_extattr_delete_file(struct extattr_delete_file_args *uap) 4734 { 4735 char attrname[EXTATTR_MAXNAMELEN]; 4736 struct nlookupdata nd; 4737 struct vnode *vp; 4738 int error; 4739 4740 error = copyin(uap->attrname, attrname, EXTATTR_MAXNAMELEN); 4741 if (error) 4742 return(error); 4743 4744 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 4745 if (error == 0) 4746 error = nlookup(&nd); 4747 if (error == 0) 4748 error = ncp_writechk(&nd.nl_nch); 4749 if (error == 0) { 4750 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp); 4751 if (error == 0) { 4752 error = VOP_SETEXTATTR(vp, uap->attrnamespace, 4753 attrname, NULL, nd.nl_cred); 4754 vput(vp); 4755 } 4756 } 4757 nlookup_done(&nd); 4758 return(error); 4759 } 4760 4761 /* 4762 * Determine if the mount is visible to the process. 4763 */ 4764 static int 4765 chroot_visible_mnt(struct mount *mp, struct proc *p) 4766 { 4767 struct nchandle nch; 4768 4769 /* 4770 * Traverse from the mount point upwards. If we hit the process 4771 * root then the mount point is visible to the process. 4772 */ 4773 nch = mp->mnt_ncmountpt; 4774 while (nch.ncp) { 4775 if (nch.mount == p->p_fd->fd_nrdir.mount && 4776 nch.ncp == p->p_fd->fd_nrdir.ncp) { 4777 return(1); 4778 } 4779 if (nch.ncp == nch.mount->mnt_ncmountpt.ncp) { 4780 nch = nch.mount->mnt_ncmounton; 4781 } else { 4782 nch.ncp = nch.ncp->nc_parent; 4783 } 4784 } 4785 4786 /* 4787 * If the mount point is not visible to the process, but the 4788 * process root is in a subdirectory of the mount, return 4789 * TRUE anyway. 4790 */ 4791 if (p->p_fd->fd_nrdir.mount == mp) 4792 return(1); 4793 4794 return(0); 4795 } 4796 4797