1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)vfs_syscalls.c 8.13 (Berkeley) 4/15/94 39 * $FreeBSD: src/sys/kern/vfs_syscalls.c,v 1.151.2.18 2003/04/04 20:35:58 tegge Exp $ 40 * $DragonFly: src/sys/kern/vfs_syscalls.c,v 1.135 2008/11/11 00:55:49 pavalos Exp $ 41 */ 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/buf.h> 46 #include <sys/conf.h> 47 #include <sys/sysent.h> 48 #include <sys/malloc.h> 49 #include <sys/mount.h> 50 #include <sys/mountctl.h> 51 #include <sys/sysproto.h> 52 #include <sys/filedesc.h> 53 #include <sys/kernel.h> 54 #include <sys/fcntl.h> 55 #include <sys/file.h> 56 #include <sys/linker.h> 57 #include <sys/stat.h> 58 #include <sys/unistd.h> 59 #include <sys/vnode.h> 60 #include <sys/proc.h> 61 #include <sys/priv.h> 62 #include <sys/jail.h> 63 #include <sys/namei.h> 64 #include <sys/nlookup.h> 65 #include <sys/dirent.h> 66 #include <sys/extattr.h> 67 #include <sys/spinlock.h> 68 #include <sys/kern_syscall.h> 69 #include <sys/objcache.h> 70 #include <sys/sysctl.h> 71 72 #include <sys/buf2.h> 73 #include <sys/file2.h> 74 #include <sys/spinlock2.h> 75 #include <sys/mplock2.h> 76 77 #include <vm/vm.h> 78 #include <vm/vm_object.h> 79 #include <vm/vm_page.h> 80 81 #include <machine/limits.h> 82 #include <machine/stdarg.h> 83 84 #include <vfs/union/union.h> 85 86 static void mount_warning(struct mount *mp, const char *ctl, ...); 87 static int mount_path(struct proc *p, struct mount *mp, char **rb, char **fb); 88 static int checkvp_chdir (struct vnode *vn, struct thread *td); 89 static void checkdirs (struct nchandle *old_nch, struct nchandle *new_nch); 90 static int chroot_refuse_vdir_fds (struct filedesc *fdp); 91 static int chroot_visible_mnt(struct mount *mp, struct proc *p); 92 static int getutimes (const struct timeval *, struct timespec *); 93 static int setfown (struct vnode *, uid_t, gid_t); 94 static int setfmode (struct vnode *, int); 95 static int setfflags (struct vnode *, int); 96 static int setutimes (struct vnode *, struct vattr *, 97 const struct timespec *, int); 98 static int usermount = 0; /* if 1, non-root can mount fs. */ 99 100 int (*union_dircheckp) (struct thread *, struct vnode **, struct file *); 101 102 SYSCTL_INT(_vfs, OID_AUTO, usermount, CTLFLAG_RW, &usermount, 0, ""); 103 104 /* 105 * Virtual File System System Calls 106 */ 107 108 /* 109 * Mount a file system. 110 * 111 * mount_args(char *type, char *path, int flags, caddr_t data) 112 * 113 * MPALMOSTSAFE 114 */ 115 int 116 sys_mount(struct mount_args *uap) 117 { 118 struct thread *td = curthread; 119 struct vnode *vp; 120 struct nchandle nch; 121 struct mount *mp, *nullmp; 122 struct vfsconf *vfsp; 123 int error, flag = 0, flag2 = 0; 124 int hasmount; 125 struct vattr va; 126 struct nlookupdata nd; 127 char fstypename[MFSNAMELEN]; 128 struct ucred *cred; 129 130 get_mplock(); 131 cred = td->td_ucred; 132 if (jailed(cred)) { 133 error = EPERM; 134 goto done; 135 } 136 if (usermount == 0 && (error = priv_check(td, PRIV_ROOT))) 137 goto done; 138 139 /* 140 * Do not allow NFS export by non-root users. 141 */ 142 if (uap->flags & MNT_EXPORTED) { 143 error = priv_check(td, PRIV_ROOT); 144 if (error) 145 goto done; 146 } 147 /* 148 * Silently enforce MNT_NOSUID and MNT_NODEV for non-root users 149 */ 150 if (priv_check(td, PRIV_ROOT)) 151 uap->flags |= MNT_NOSUID | MNT_NODEV; 152 153 /* 154 * Lookup the requested path and extract the nch and vnode. 155 */ 156 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 157 if (error == 0) { 158 if ((error = nlookup(&nd)) == 0) { 159 if (nd.nl_nch.ncp->nc_vp == NULL) 160 error = ENOENT; 161 } 162 } 163 if (error) { 164 nlookup_done(&nd); 165 goto done; 166 } 167 168 /* 169 * If the target filesystem is resolved via a nullfs mount, then 170 * nd.nl_nch.mount will be pointing to the nullfs mount structure 171 * instead of the target file system. We need it in case we are 172 * doing an update. 173 */ 174 nullmp = nd.nl_nch.mount; 175 176 /* 177 * Extract the locked+refd ncp and cleanup the nd structure 178 */ 179 nch = nd.nl_nch; 180 cache_zero(&nd.nl_nch); 181 nlookup_done(&nd); 182 183 if ((nch.ncp->nc_flag & NCF_ISMOUNTPT) && cache_findmount(&nch)) 184 hasmount = 1; 185 else 186 hasmount = 0; 187 188 189 /* 190 * now we have the locked ref'd nch and unreferenced vnode. 191 */ 192 vp = nch.ncp->nc_vp; 193 if ((error = vget(vp, LK_EXCLUSIVE)) != 0) { 194 cache_put(&nch); 195 goto done; 196 } 197 cache_unlock(&nch); 198 199 /* 200 * Extract the file system type. We need to know this early, to take 201 * appropriate actions if we are dealing with a nullfs. 202 */ 203 if ((error = copyinstr(uap->type, fstypename, MFSNAMELEN, NULL)) != 0) { 204 cache_drop(&nch); 205 vput(vp); 206 goto done; 207 } 208 209 /* 210 * Now we have an unlocked ref'd nch and a locked ref'd vp 211 */ 212 if (uap->flags & MNT_UPDATE) { 213 if ((vp->v_flag & (VROOT|VPFSROOT)) == 0) { 214 cache_drop(&nch); 215 vput(vp); 216 error = EINVAL; 217 goto done; 218 } 219 220 if (strncmp(fstypename, "null", 5) == 0) { 221 KKASSERT(nullmp); 222 mp = nullmp; 223 } else { 224 mp = vp->v_mount; 225 } 226 227 flag = mp->mnt_flag; 228 flag2 = mp->mnt_kern_flag; 229 /* 230 * We only allow the filesystem to be reloaded if it 231 * is currently mounted read-only. 232 */ 233 if ((uap->flags & MNT_RELOAD) && 234 ((mp->mnt_flag & MNT_RDONLY) == 0)) { 235 cache_drop(&nch); 236 vput(vp); 237 error = EOPNOTSUPP; /* Needs translation */ 238 goto done; 239 } 240 /* 241 * Only root, or the user that did the original mount is 242 * permitted to update it. 243 */ 244 if (mp->mnt_stat.f_owner != cred->cr_uid && 245 (error = priv_check(td, PRIV_ROOT))) { 246 cache_drop(&nch); 247 vput(vp); 248 goto done; 249 } 250 if (vfs_busy(mp, LK_NOWAIT)) { 251 cache_drop(&nch); 252 vput(vp); 253 error = EBUSY; 254 goto done; 255 } 256 if ((vp->v_flag & VMOUNT) != 0 || hasmount) { 257 cache_drop(&nch); 258 vfs_unbusy(mp); 259 vput(vp); 260 error = EBUSY; 261 goto done; 262 } 263 vsetflags(vp, VMOUNT); 264 mp->mnt_flag |= 265 uap->flags & (MNT_RELOAD | MNT_FORCE | MNT_UPDATE); 266 vn_unlock(vp); 267 goto update; 268 } 269 /* 270 * If the user is not root, ensure that they own the directory 271 * onto which we are attempting to mount. 272 */ 273 if ((error = VOP_GETATTR(vp, &va)) || 274 (va.va_uid != cred->cr_uid && (error = priv_check(td, PRIV_ROOT)))) { 275 cache_drop(&nch); 276 vput(vp); 277 goto done; 278 } 279 if ((error = vinvalbuf(vp, V_SAVE, 0, 0)) != 0) { 280 cache_drop(&nch); 281 vput(vp); 282 goto done; 283 } 284 if (vp->v_type != VDIR) { 285 cache_drop(&nch); 286 vput(vp); 287 error = ENOTDIR; 288 goto done; 289 } 290 if (vp->v_mount->mnt_kern_flag & MNTK_NOSTKMNT) { 291 cache_drop(&nch); 292 vput(vp); 293 error = EPERM; 294 goto done; 295 } 296 vfsp = vfsconf_find_by_name(fstypename); 297 if (vfsp == NULL) { 298 linker_file_t lf; 299 300 /* Only load modules for root (very important!) */ 301 if ((error = priv_check(td, PRIV_ROOT)) != 0) { 302 cache_drop(&nch); 303 vput(vp); 304 goto done; 305 } 306 error = linker_load_file(fstypename, &lf); 307 if (error || lf == NULL) { 308 cache_drop(&nch); 309 vput(vp); 310 if (lf == NULL) 311 error = ENODEV; 312 goto done; 313 } 314 lf->userrefs++; 315 /* lookup again, see if the VFS was loaded */ 316 vfsp = vfsconf_find_by_name(fstypename); 317 if (vfsp == NULL) { 318 lf->userrefs--; 319 linker_file_unload(lf); 320 cache_drop(&nch); 321 vput(vp); 322 error = ENODEV; 323 goto done; 324 } 325 } 326 if ((vp->v_flag & VMOUNT) != 0 || hasmount) { 327 cache_drop(&nch); 328 vput(vp); 329 error = EBUSY; 330 goto done; 331 } 332 vsetflags(vp, VMOUNT); 333 334 /* 335 * Allocate and initialize the filesystem. 336 */ 337 mp = kmalloc(sizeof(struct mount), M_MOUNT, M_ZERO|M_WAITOK); 338 mount_init(mp); 339 vfs_busy(mp, LK_NOWAIT); 340 mp->mnt_op = vfsp->vfc_vfsops; 341 mp->mnt_vfc = vfsp; 342 vfsp->vfc_refcount++; 343 mp->mnt_stat.f_type = vfsp->vfc_typenum; 344 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; 345 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 346 mp->mnt_stat.f_owner = cred->cr_uid; 347 vn_unlock(vp); 348 update: 349 /* 350 * Set the mount level flags. 351 */ 352 if (uap->flags & MNT_RDONLY) 353 mp->mnt_flag |= MNT_RDONLY; 354 else if (mp->mnt_flag & MNT_RDONLY) 355 mp->mnt_kern_flag |= MNTK_WANTRDWR; 356 mp->mnt_flag &=~ (MNT_NOSUID | MNT_NOEXEC | MNT_NODEV | 357 MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_NOATIME | 358 MNT_NOSYMFOLLOW | MNT_IGNORE | 359 MNT_NOCLUSTERR | MNT_NOCLUSTERW | MNT_SUIDDIR); 360 mp->mnt_flag |= uap->flags & (MNT_NOSUID | MNT_NOEXEC | 361 MNT_NODEV | MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_FORCE | 362 MNT_NOSYMFOLLOW | MNT_IGNORE | 363 MNT_NOATIME | MNT_NOCLUSTERR | MNT_NOCLUSTERW | MNT_SUIDDIR); 364 /* 365 * Mount the filesystem. 366 * XXX The final recipients of VFS_MOUNT just overwrite the ndp they 367 * get. 368 */ 369 error = VFS_MOUNT(mp, uap->path, uap->data, cred); 370 if (mp->mnt_flag & MNT_UPDATE) { 371 if (mp->mnt_kern_flag & MNTK_WANTRDWR) 372 mp->mnt_flag &= ~MNT_RDONLY; 373 mp->mnt_flag &=~ (MNT_UPDATE | MNT_RELOAD | MNT_FORCE); 374 mp->mnt_kern_flag &=~ MNTK_WANTRDWR; 375 if (error) { 376 mp->mnt_flag = flag; 377 mp->mnt_kern_flag = flag2; 378 } 379 vfs_unbusy(mp); 380 vclrflags(vp, VMOUNT); 381 vrele(vp); 382 cache_drop(&nch); 383 goto done; 384 } 385 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 386 /* 387 * Put the new filesystem on the mount list after root. The mount 388 * point gets its own mnt_ncmountpt (unless the VFS already set one 389 * up) which represents the root of the mount. The lookup code 390 * detects the mount point going forward and checks the root of 391 * the mount going backwards. 392 * 393 * It is not necessary to invalidate or purge the vnode underneath 394 * because elements under the mount will be given their own glue 395 * namecache record. 396 */ 397 if (!error) { 398 if (mp->mnt_ncmountpt.ncp == NULL) { 399 /* 400 * allocate, then unlock, but leave the ref intact 401 */ 402 cache_allocroot(&mp->mnt_ncmountpt, mp, NULL); 403 cache_unlock(&mp->mnt_ncmountpt); 404 } 405 mp->mnt_ncmounton = nch; /* inherits ref */ 406 nch.ncp->nc_flag |= NCF_ISMOUNTPT; 407 408 /* XXX get the root of the fs and cache_setvp(mnt_ncmountpt...) */ 409 vclrflags(vp, VMOUNT); 410 mountlist_insert(mp, MNTINS_LAST); 411 vn_unlock(vp); 412 checkdirs(&mp->mnt_ncmounton, &mp->mnt_ncmountpt); 413 error = vfs_allocate_syncvnode(mp); 414 vfs_unbusy(mp); 415 error = VFS_START(mp, 0); 416 vrele(vp); 417 } else { 418 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_coherency_ops); 419 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_journal_ops); 420 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_norm_ops); 421 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_spec_ops); 422 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_fifo_ops); 423 vclrflags(vp, VMOUNT); 424 mp->mnt_vfc->vfc_refcount--; 425 vfs_unbusy(mp); 426 kfree(mp, M_MOUNT); 427 cache_drop(&nch); 428 vput(vp); 429 } 430 done: 431 rel_mplock(); 432 return (error); 433 } 434 435 /* 436 * Scan all active processes to see if any of them have a current 437 * or root directory onto which the new filesystem has just been 438 * mounted. If so, replace them with the new mount point. 439 * 440 * The passed ncp is ref'd and locked (from the mount code) and 441 * must be associated with the vnode representing the root of the 442 * mount point. 443 */ 444 struct checkdirs_info { 445 struct nchandle old_nch; 446 struct nchandle new_nch; 447 struct vnode *old_vp; 448 struct vnode *new_vp; 449 }; 450 451 static int checkdirs_callback(struct proc *p, void *data); 452 453 static void 454 checkdirs(struct nchandle *old_nch, struct nchandle *new_nch) 455 { 456 struct checkdirs_info info; 457 struct vnode *olddp; 458 struct vnode *newdp; 459 struct mount *mp; 460 461 /* 462 * If the old mount point's vnode has a usecount of 1, it is not 463 * being held as a descriptor anywhere. 464 */ 465 olddp = old_nch->ncp->nc_vp; 466 if (olddp == NULL || olddp->v_sysref.refcnt == 1) 467 return; 468 469 /* 470 * Force the root vnode of the new mount point to be resolved 471 * so we can update any matching processes. 472 */ 473 mp = new_nch->mount; 474 if (VFS_ROOT(mp, &newdp)) 475 panic("mount: lost mount"); 476 cache_setunresolved(new_nch); 477 cache_setvp(new_nch, newdp); 478 479 /* 480 * Special handling of the root node 481 */ 482 if (rootvnode == olddp) { 483 vref(newdp); 484 vfs_cache_setroot(newdp, cache_hold(new_nch)); 485 } 486 487 /* 488 * Pass newdp separately so the callback does not have to access 489 * it via new_nch->ncp->nc_vp. 490 */ 491 info.old_nch = *old_nch; 492 info.new_nch = *new_nch; 493 info.new_vp = newdp; 494 allproc_scan(checkdirs_callback, &info); 495 vput(newdp); 496 } 497 498 /* 499 * NOTE: callback is not MP safe because the scanned process's filedesc 500 * structure can be ripped out from under us, amoung other things. 501 */ 502 static int 503 checkdirs_callback(struct proc *p, void *data) 504 { 505 struct checkdirs_info *info = data; 506 struct filedesc *fdp; 507 struct nchandle ncdrop1; 508 struct nchandle ncdrop2; 509 struct vnode *vprele1; 510 struct vnode *vprele2; 511 512 if ((fdp = p->p_fd) != NULL) { 513 cache_zero(&ncdrop1); 514 cache_zero(&ncdrop2); 515 vprele1 = NULL; 516 vprele2 = NULL; 517 518 /* 519 * MPUNSAFE - XXX fdp can be pulled out from under a 520 * foreign process. 521 * 522 * A shared filedesc is ok, we don't have to copy it 523 * because we are making this change globally. 524 */ 525 spin_lock_wr(&fdp->fd_spin); 526 if (fdp->fd_ncdir.mount == info->old_nch.mount && 527 fdp->fd_ncdir.ncp == info->old_nch.ncp) { 528 vprele1 = fdp->fd_cdir; 529 vref(info->new_vp); 530 fdp->fd_cdir = info->new_vp; 531 ncdrop1 = fdp->fd_ncdir; 532 cache_copy(&info->new_nch, &fdp->fd_ncdir); 533 } 534 if (fdp->fd_nrdir.mount == info->old_nch.mount && 535 fdp->fd_nrdir.ncp == info->old_nch.ncp) { 536 vprele2 = fdp->fd_rdir; 537 vref(info->new_vp); 538 fdp->fd_rdir = info->new_vp; 539 ncdrop2 = fdp->fd_nrdir; 540 cache_copy(&info->new_nch, &fdp->fd_nrdir); 541 } 542 spin_unlock_wr(&fdp->fd_spin); 543 if (ncdrop1.ncp) 544 cache_drop(&ncdrop1); 545 if (ncdrop2.ncp) 546 cache_drop(&ncdrop2); 547 if (vprele1) 548 vrele(vprele1); 549 if (vprele2) 550 vrele(vprele2); 551 } 552 return(0); 553 } 554 555 /* 556 * Unmount a file system. 557 * 558 * Note: unmount takes a path to the vnode mounted on as argument, 559 * not special file (as before). 560 * 561 * umount_args(char *path, int flags) 562 * 563 * MPALMOSTSAFE 564 */ 565 int 566 sys_unmount(struct unmount_args *uap) 567 { 568 struct thread *td = curthread; 569 struct proc *p __debugvar = td->td_proc; 570 struct mount *mp = NULL; 571 struct nlookupdata nd; 572 int error; 573 574 KKASSERT(p); 575 get_mplock(); 576 if (td->td_ucred->cr_prison != NULL) { 577 error = EPERM; 578 goto done; 579 } 580 if (usermount == 0 && (error = priv_check(td, PRIV_ROOT))) 581 goto done; 582 583 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 584 if (error == 0) 585 error = nlookup(&nd); 586 if (error) 587 goto out; 588 589 mp = nd.nl_nch.mount; 590 591 /* 592 * Only root, or the user that did the original mount is 593 * permitted to unmount this filesystem. 594 */ 595 if ((mp->mnt_stat.f_owner != td->td_ucred->cr_uid) && 596 (error = priv_check(td, PRIV_ROOT))) 597 goto out; 598 599 /* 600 * Don't allow unmounting the root file system. 601 */ 602 if (mp->mnt_flag & MNT_ROOTFS) { 603 error = EINVAL; 604 goto out; 605 } 606 607 /* 608 * Must be the root of the filesystem 609 */ 610 if (nd.nl_nch.ncp != mp->mnt_ncmountpt.ncp) { 611 error = EINVAL; 612 goto out; 613 } 614 615 out: 616 nlookup_done(&nd); 617 if (error == 0) 618 error = dounmount(mp, uap->flags); 619 done: 620 rel_mplock(); 621 return (error); 622 } 623 624 /* 625 * Do the actual file system unmount. 626 */ 627 static int 628 dounmount_interlock(struct mount *mp) 629 { 630 if (mp->mnt_kern_flag & MNTK_UNMOUNT) 631 return (EBUSY); 632 mp->mnt_kern_flag |= MNTK_UNMOUNT; 633 return(0); 634 } 635 636 int 637 dounmount(struct mount *mp, int flags) 638 { 639 struct namecache *ncp; 640 struct nchandle nch; 641 struct vnode *vp; 642 int error; 643 int async_flag; 644 int lflags; 645 int freeok = 1; 646 647 /* 648 * Exclusive access for unmounting purposes 649 */ 650 if ((error = mountlist_interlock(dounmount_interlock, mp)) != 0) 651 return (error); 652 653 /* 654 * Allow filesystems to detect that a forced unmount is in progress. 655 */ 656 if (flags & MNT_FORCE) 657 mp->mnt_kern_flag |= MNTK_UNMOUNTF; 658 lflags = LK_EXCLUSIVE | ((flags & MNT_FORCE) ? 0 : LK_NOWAIT); 659 error = lockmgr(&mp->mnt_lock, lflags); 660 if (error) { 661 mp->mnt_kern_flag &= ~(MNTK_UNMOUNT | MNTK_UNMOUNTF); 662 if (mp->mnt_kern_flag & MNTK_MWAIT) 663 wakeup(mp); 664 return (error); 665 } 666 667 if (mp->mnt_flag & MNT_EXPUBLIC) 668 vfs_setpublicfs(NULL, NULL, NULL); 669 670 vfs_msync(mp, MNT_WAIT); 671 async_flag = mp->mnt_flag & MNT_ASYNC; 672 mp->mnt_flag &=~ MNT_ASYNC; 673 674 /* 675 * If this filesystem isn't aliasing other filesystems, 676 * try to invalidate any remaining namecache entries and 677 * check the count afterwords. 678 */ 679 if ((mp->mnt_kern_flag & MNTK_NCALIASED) == 0) { 680 cache_lock(&mp->mnt_ncmountpt); 681 cache_inval(&mp->mnt_ncmountpt, CINV_DESTROY|CINV_CHILDREN); 682 cache_unlock(&mp->mnt_ncmountpt); 683 684 if ((ncp = mp->mnt_ncmountpt.ncp) != NULL && 685 (ncp->nc_refs != 1 || TAILQ_FIRST(&ncp->nc_list))) { 686 687 if ((flags & MNT_FORCE) == 0) { 688 error = EBUSY; 689 mount_warning(mp, "Cannot unmount: " 690 "%d namecache " 691 "references still " 692 "present", 693 ncp->nc_refs - 1); 694 } else { 695 mount_warning(mp, "Forced unmount: " 696 "%d namecache " 697 "references still " 698 "present", 699 ncp->nc_refs - 1); 700 freeok = 0; 701 } 702 } 703 } 704 705 /* 706 * nchandle records ref the mount structure. Expect a count of 1 707 * (our mount->mnt_ncmountpt). 708 */ 709 if (mp->mnt_refs != 1) { 710 if ((flags & MNT_FORCE) == 0) { 711 mount_warning(mp, "Cannot unmount: " 712 "%d process references still " 713 "present", mp->mnt_refs); 714 error = EBUSY; 715 } else { 716 mount_warning(mp, "Forced unmount: " 717 "%d process references still " 718 "present", mp->mnt_refs); 719 freeok = 0; 720 } 721 } 722 723 /* 724 * Decomission our special mnt_syncer vnode. This also stops 725 * the vnlru code. If we are unable to unmount we recommission 726 * the vnode. 727 */ 728 if (error == 0) { 729 if ((vp = mp->mnt_syncer) != NULL) { 730 mp->mnt_syncer = NULL; 731 vrele(vp); 732 } 733 if (((mp->mnt_flag & MNT_RDONLY) || 734 (error = VFS_SYNC(mp, MNT_WAIT)) == 0) || 735 (flags & MNT_FORCE)) { 736 error = VFS_UNMOUNT(mp, flags); 737 } 738 } 739 if (error) { 740 if (mp->mnt_syncer == NULL) 741 vfs_allocate_syncvnode(mp); 742 mp->mnt_kern_flag &= ~(MNTK_UNMOUNT | MNTK_UNMOUNTF); 743 mp->mnt_flag |= async_flag; 744 lockmgr(&mp->mnt_lock, LK_RELEASE); 745 if (mp->mnt_kern_flag & MNTK_MWAIT) 746 wakeup(mp); 747 return (error); 748 } 749 /* 750 * Clean up any journals still associated with the mount after 751 * filesystem activity has ceased. 752 */ 753 journal_remove_all_journals(mp, 754 ((flags & MNT_FORCE) ? MC_JOURNAL_STOP_IMM : 0)); 755 756 mountlist_remove(mp); 757 758 /* 759 * Remove any installed vnode ops here so the individual VFSs don't 760 * have to. 761 */ 762 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_coherency_ops); 763 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_journal_ops); 764 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_norm_ops); 765 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_spec_ops); 766 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_fifo_ops); 767 768 if (mp->mnt_ncmountpt.ncp != NULL) { 769 nch = mp->mnt_ncmountpt; 770 cache_zero(&mp->mnt_ncmountpt); 771 cache_clrmountpt(&nch); 772 cache_drop(&nch); 773 } 774 if (mp->mnt_ncmounton.ncp != NULL) { 775 nch = mp->mnt_ncmounton; 776 cache_zero(&mp->mnt_ncmounton); 777 cache_clrmountpt(&nch); 778 cache_drop(&nch); 779 } 780 781 mp->mnt_vfc->vfc_refcount--; 782 if (!TAILQ_EMPTY(&mp->mnt_nvnodelist)) 783 panic("unmount: dangling vnode"); 784 lockmgr(&mp->mnt_lock, LK_RELEASE); 785 if (mp->mnt_kern_flag & MNTK_MWAIT) 786 wakeup(mp); 787 if (freeok) 788 kfree(mp, M_MOUNT); 789 return (0); 790 } 791 792 static 793 void 794 mount_warning(struct mount *mp, const char *ctl, ...) 795 { 796 char *ptr; 797 char *buf; 798 __va_list va; 799 800 __va_start(va, ctl); 801 if (cache_fullpath(NULL, &mp->mnt_ncmounton, &ptr, &buf, 0) == 0) { 802 kprintf("unmount(%s): ", ptr); 803 kvprintf(ctl, va); 804 kprintf("\n"); 805 kfree(buf, M_TEMP); 806 } else { 807 kprintf("unmount(%p", mp); 808 if (mp->mnt_ncmounton.ncp && mp->mnt_ncmounton.ncp->nc_name) 809 kprintf(",%s", mp->mnt_ncmounton.ncp->nc_name); 810 kprintf("): "); 811 kvprintf(ctl, va); 812 kprintf("\n"); 813 } 814 __va_end(va); 815 } 816 817 /* 818 * Shim cache_fullpath() to handle the case where a process is chrooted into 819 * a subdirectory of a mount. In this case if the root mount matches the 820 * process root directory's mount we have to specify the process's root 821 * directory instead of the mount point, because the mount point might 822 * be above the root directory. 823 */ 824 static 825 int 826 mount_path(struct proc *p, struct mount *mp, char **rb, char **fb) 827 { 828 struct nchandle *nch; 829 830 if (p && p->p_fd->fd_nrdir.mount == mp) 831 nch = &p->p_fd->fd_nrdir; 832 else 833 nch = &mp->mnt_ncmountpt; 834 return(cache_fullpath(p, nch, rb, fb, 0)); 835 } 836 837 /* 838 * Sync each mounted filesystem. 839 */ 840 841 #ifdef DEBUG 842 static int syncprt = 0; 843 SYSCTL_INT(_debug, OID_AUTO, syncprt, CTLFLAG_RW, &syncprt, 0, ""); 844 #endif /* DEBUG */ 845 846 static int sync_callback(struct mount *mp, void *data); 847 848 /* 849 * MPALMOSTSAFE 850 */ 851 int 852 sys_sync(struct sync_args *uap) 853 { 854 get_mplock(); 855 mountlist_scan(sync_callback, NULL, MNTSCAN_FORWARD); 856 #ifdef DEBUG 857 /* 858 * print out buffer pool stat information on each sync() call. 859 */ 860 if (syncprt) 861 vfs_bufstats(); 862 #endif /* DEBUG */ 863 rel_mplock(); 864 return (0); 865 } 866 867 static 868 int 869 sync_callback(struct mount *mp, void *data __unused) 870 { 871 int asyncflag; 872 873 if ((mp->mnt_flag & MNT_RDONLY) == 0) { 874 asyncflag = mp->mnt_flag & MNT_ASYNC; 875 mp->mnt_flag &= ~MNT_ASYNC; 876 vfs_msync(mp, MNT_NOWAIT); 877 VFS_SYNC(mp, MNT_NOWAIT); 878 mp->mnt_flag |= asyncflag; 879 } 880 return(0); 881 } 882 883 /* XXX PRISON: could be per prison flag */ 884 static int prison_quotas; 885 #if 0 886 SYSCTL_INT(_kern_prison, OID_AUTO, quotas, CTLFLAG_RW, &prison_quotas, 0, ""); 887 #endif 888 889 /* 890 * quotactl_args(char *path, int fcmd, int uid, caddr_t arg) 891 * 892 * Change filesystem quotas. 893 * 894 * MPALMOSTSAFE 895 */ 896 int 897 sys_quotactl(struct quotactl_args *uap) 898 { 899 struct nlookupdata nd; 900 struct thread *td; 901 struct proc *p; 902 struct mount *mp; 903 int error; 904 905 get_mplock(); 906 td = curthread; 907 p = td->td_proc; 908 if (td->td_ucred->cr_prison && !prison_quotas) { 909 error = EPERM; 910 goto done; 911 } 912 913 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 914 if (error == 0) 915 error = nlookup(&nd); 916 if (error == 0) { 917 mp = nd.nl_nch.mount; 918 error = VFS_QUOTACTL(mp, uap->cmd, uap->uid, 919 uap->arg, nd.nl_cred); 920 } 921 nlookup_done(&nd); 922 done: 923 rel_mplock(); 924 return (error); 925 } 926 927 /* 928 * mountctl(char *path, int op, int fd, const void *ctl, int ctllen, 929 * void *buf, int buflen) 930 * 931 * This function operates on a mount point and executes the specified 932 * operation using the specified control data, and possibly returns data. 933 * 934 * The actual number of bytes stored in the result buffer is returned, 0 935 * if none, otherwise an error is returned. 936 * 937 * MPALMOSTSAFE 938 */ 939 int 940 sys_mountctl(struct mountctl_args *uap) 941 { 942 struct thread *td = curthread; 943 struct proc *p = td->td_proc; 944 struct file *fp; 945 void *ctl = NULL; 946 void *buf = NULL; 947 char *path = NULL; 948 int error; 949 950 /* 951 * Sanity and permissions checks. We must be root. 952 */ 953 KKASSERT(p); 954 if (td->td_ucred->cr_prison != NULL) 955 return (EPERM); 956 if ((uap->op != MOUNTCTL_MOUNTFLAGS) && 957 (error = priv_check(td, PRIV_ROOT)) != 0) 958 return (error); 959 960 /* 961 * Argument length checks 962 */ 963 if (uap->ctllen < 0 || uap->ctllen > 1024) 964 return (EINVAL); 965 if (uap->buflen < 0 || uap->buflen > 16 * 1024) 966 return (EINVAL); 967 if (uap->path == NULL) 968 return (EINVAL); 969 970 /* 971 * Allocate the necessary buffers and copyin data 972 */ 973 path = objcache_get(namei_oc, M_WAITOK); 974 error = copyinstr(uap->path, path, MAXPATHLEN, NULL); 975 if (error) 976 goto done; 977 978 if (uap->ctllen) { 979 ctl = kmalloc(uap->ctllen + 1, M_TEMP, M_WAITOK|M_ZERO); 980 error = copyin(uap->ctl, ctl, uap->ctllen); 981 if (error) 982 goto done; 983 } 984 if (uap->buflen) 985 buf = kmalloc(uap->buflen + 1, M_TEMP, M_WAITOK|M_ZERO); 986 987 /* 988 * Validate the descriptor 989 */ 990 if (uap->fd >= 0) { 991 fp = holdfp(p->p_fd, uap->fd, -1); 992 if (fp == NULL) { 993 error = EBADF; 994 goto done; 995 } 996 } else { 997 fp = NULL; 998 } 999 1000 /* 1001 * Execute the internal kernel function and clean up. 1002 */ 1003 get_mplock(); 1004 error = kern_mountctl(path, uap->op, fp, ctl, uap->ctllen, buf, uap->buflen, &uap->sysmsg_result); 1005 rel_mplock(); 1006 if (fp) 1007 fdrop(fp); 1008 if (error == 0 && uap->sysmsg_result > 0) 1009 error = copyout(buf, uap->buf, uap->sysmsg_result); 1010 done: 1011 if (path) 1012 objcache_put(namei_oc, path); 1013 if (ctl) 1014 kfree(ctl, M_TEMP); 1015 if (buf) 1016 kfree(buf, M_TEMP); 1017 return (error); 1018 } 1019 1020 /* 1021 * Execute a mount control operation by resolving the path to a mount point 1022 * and calling vop_mountctl(). 1023 * 1024 * Use the mount point from the nch instead of the vnode so nullfs mounts 1025 * can properly spike the VOP. 1026 */ 1027 int 1028 kern_mountctl(const char *path, int op, struct file *fp, 1029 const void *ctl, int ctllen, 1030 void *buf, int buflen, int *res) 1031 { 1032 struct vnode *vp; 1033 struct mount *mp; 1034 struct nlookupdata nd; 1035 int error; 1036 1037 *res = 0; 1038 vp = NULL; 1039 error = nlookup_init(&nd, path, UIO_SYSSPACE, NLC_FOLLOW); 1040 if (error == 0) 1041 error = nlookup(&nd); 1042 if (error == 0) 1043 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp); 1044 mp = nd.nl_nch.mount; 1045 nlookup_done(&nd); 1046 if (error) 1047 return (error); 1048 vn_unlock(vp); 1049 1050 /* 1051 * Must be the root of the filesystem 1052 */ 1053 if ((vp->v_flag & (VROOT|VPFSROOT)) == 0) { 1054 vrele(vp); 1055 return (EINVAL); 1056 } 1057 error = vop_mountctl(mp->mnt_vn_use_ops, vp, op, fp, ctl, ctllen, 1058 buf, buflen, res); 1059 vrele(vp); 1060 return (error); 1061 } 1062 1063 int 1064 kern_statfs(struct nlookupdata *nd, struct statfs *buf) 1065 { 1066 struct thread *td = curthread; 1067 struct proc *p = td->td_proc; 1068 struct mount *mp; 1069 struct statfs *sp; 1070 char *fullpath, *freepath; 1071 int error; 1072 1073 if ((error = nlookup(nd)) != 0) 1074 return (error); 1075 mp = nd->nl_nch.mount; 1076 sp = &mp->mnt_stat; 1077 if ((error = VFS_STATFS(mp, sp, nd->nl_cred)) != 0) 1078 return (error); 1079 1080 error = mount_path(p, mp, &fullpath, &freepath); 1081 if (error) 1082 return(error); 1083 bzero(sp->f_mntonname, sizeof(sp->f_mntonname)); 1084 strlcpy(sp->f_mntonname, fullpath, sizeof(sp->f_mntonname)); 1085 kfree(freepath, M_TEMP); 1086 1087 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK; 1088 bcopy(sp, buf, sizeof(*buf)); 1089 /* Only root should have access to the fsid's. */ 1090 if (priv_check(td, PRIV_ROOT)) 1091 buf->f_fsid.val[0] = buf->f_fsid.val[1] = 0; 1092 return (0); 1093 } 1094 1095 /* 1096 * statfs_args(char *path, struct statfs *buf) 1097 * 1098 * Get filesystem statistics. 1099 * 1100 * MPALMOSTSAFE 1101 */ 1102 int 1103 sys_statfs(struct statfs_args *uap) 1104 { 1105 struct nlookupdata nd; 1106 struct statfs buf; 1107 int error; 1108 1109 get_mplock(); 1110 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 1111 if (error == 0) 1112 error = kern_statfs(&nd, &buf); 1113 nlookup_done(&nd); 1114 if (error == 0) 1115 error = copyout(&buf, uap->buf, sizeof(*uap->buf)); 1116 rel_mplock(); 1117 return (error); 1118 } 1119 1120 /* 1121 * MPALMOSTSAFE 1122 */ 1123 int 1124 kern_fstatfs(int fd, struct statfs *buf) 1125 { 1126 struct thread *td = curthread; 1127 struct proc *p = td->td_proc; 1128 struct file *fp; 1129 struct mount *mp; 1130 struct statfs *sp; 1131 char *fullpath, *freepath; 1132 int error; 1133 1134 KKASSERT(p); 1135 if ((error = holdvnode(p->p_fd, fd, &fp)) != 0) 1136 return (error); 1137 get_mplock(); 1138 mp = ((struct vnode *)fp->f_data)->v_mount; 1139 if (mp == NULL) { 1140 error = EBADF; 1141 goto done; 1142 } 1143 if (fp->f_cred == NULL) { 1144 error = EINVAL; 1145 goto done; 1146 } 1147 sp = &mp->mnt_stat; 1148 if ((error = VFS_STATFS(mp, sp, fp->f_cred)) != 0) 1149 goto done; 1150 1151 if ((error = mount_path(p, mp, &fullpath, &freepath)) != 0) 1152 goto done; 1153 bzero(sp->f_mntonname, sizeof(sp->f_mntonname)); 1154 strlcpy(sp->f_mntonname, fullpath, sizeof(sp->f_mntonname)); 1155 kfree(freepath, M_TEMP); 1156 1157 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK; 1158 bcopy(sp, buf, sizeof(*buf)); 1159 1160 /* Only root should have access to the fsid's. */ 1161 if (priv_check(td, PRIV_ROOT)) 1162 buf->f_fsid.val[0] = buf->f_fsid.val[1] = 0; 1163 error = 0; 1164 done: 1165 rel_mplock(); 1166 fdrop(fp); 1167 return (error); 1168 } 1169 1170 /* 1171 * fstatfs_args(int fd, struct statfs *buf) 1172 * 1173 * Get filesystem statistics. 1174 * 1175 * MPSAFE 1176 */ 1177 int 1178 sys_fstatfs(struct fstatfs_args *uap) 1179 { 1180 struct statfs buf; 1181 int error; 1182 1183 error = kern_fstatfs(uap->fd, &buf); 1184 1185 if (error == 0) 1186 error = copyout(&buf, uap->buf, sizeof(*uap->buf)); 1187 return (error); 1188 } 1189 1190 int 1191 kern_statvfs(struct nlookupdata *nd, struct statvfs *buf) 1192 { 1193 struct mount *mp; 1194 struct statvfs *sp; 1195 int error; 1196 1197 if ((error = nlookup(nd)) != 0) 1198 return (error); 1199 mp = nd->nl_nch.mount; 1200 sp = &mp->mnt_vstat; 1201 if ((error = VFS_STATVFS(mp, sp, nd->nl_cred)) != 0) 1202 return (error); 1203 1204 sp->f_flag = 0; 1205 if (mp->mnt_flag & MNT_RDONLY) 1206 sp->f_flag |= ST_RDONLY; 1207 if (mp->mnt_flag & MNT_NOSUID) 1208 sp->f_flag |= ST_NOSUID; 1209 bcopy(sp, buf, sizeof(*buf)); 1210 return (0); 1211 } 1212 1213 /* 1214 * statfs_args(char *path, struct statfs *buf) 1215 * 1216 * Get filesystem statistics. 1217 * 1218 * MPALMOSTSAFE 1219 */ 1220 int 1221 sys_statvfs(struct statvfs_args *uap) 1222 { 1223 struct nlookupdata nd; 1224 struct statvfs buf; 1225 int error; 1226 1227 get_mplock(); 1228 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 1229 if (error == 0) 1230 error = kern_statvfs(&nd, &buf); 1231 nlookup_done(&nd); 1232 if (error == 0) 1233 error = copyout(&buf, uap->buf, sizeof(*uap->buf)); 1234 rel_mplock(); 1235 return (error); 1236 } 1237 1238 int 1239 kern_fstatvfs(int fd, struct statvfs *buf) 1240 { 1241 struct thread *td = curthread; 1242 struct proc *p = td->td_proc; 1243 struct file *fp; 1244 struct mount *mp; 1245 struct statvfs *sp; 1246 int error; 1247 1248 KKASSERT(p); 1249 if ((error = holdvnode(p->p_fd, fd, &fp)) != 0) 1250 return (error); 1251 mp = ((struct vnode *)fp->f_data)->v_mount; 1252 if (mp == NULL) { 1253 error = EBADF; 1254 goto done; 1255 } 1256 if (fp->f_cred == NULL) { 1257 error = EINVAL; 1258 goto done; 1259 } 1260 sp = &mp->mnt_vstat; 1261 if ((error = VFS_STATVFS(mp, sp, fp->f_cred)) != 0) 1262 goto done; 1263 1264 sp->f_flag = 0; 1265 if (mp->mnt_flag & MNT_RDONLY) 1266 sp->f_flag |= ST_RDONLY; 1267 if (mp->mnt_flag & MNT_NOSUID) 1268 sp->f_flag |= ST_NOSUID; 1269 1270 bcopy(sp, buf, sizeof(*buf)); 1271 error = 0; 1272 done: 1273 fdrop(fp); 1274 return (error); 1275 } 1276 1277 /* 1278 * fstatfs_args(int fd, struct statfs *buf) 1279 * 1280 * Get filesystem statistics. 1281 * 1282 * MPALMOSTSAFE 1283 */ 1284 int 1285 sys_fstatvfs(struct fstatvfs_args *uap) 1286 { 1287 struct statvfs buf; 1288 int error; 1289 1290 get_mplock(); 1291 error = kern_fstatvfs(uap->fd, &buf); 1292 rel_mplock(); 1293 1294 if (error == 0) 1295 error = copyout(&buf, uap->buf, sizeof(*uap->buf)); 1296 return (error); 1297 } 1298 1299 /* 1300 * getfsstat_args(struct statfs *buf, long bufsize, int flags) 1301 * 1302 * Get statistics on all filesystems. 1303 */ 1304 1305 struct getfsstat_info { 1306 struct statfs *sfsp; 1307 long count; 1308 long maxcount; 1309 int error; 1310 int flags; 1311 struct thread *td; 1312 }; 1313 1314 static int getfsstat_callback(struct mount *, void *); 1315 1316 /* 1317 * MPALMOSTSAFE 1318 */ 1319 int 1320 sys_getfsstat(struct getfsstat_args *uap) 1321 { 1322 struct thread *td = curthread; 1323 struct getfsstat_info info; 1324 1325 bzero(&info, sizeof(info)); 1326 1327 info.maxcount = uap->bufsize / sizeof(struct statfs); 1328 info.sfsp = uap->buf; 1329 info.count = 0; 1330 info.flags = uap->flags; 1331 info.td = td; 1332 1333 get_mplock(); 1334 mountlist_scan(getfsstat_callback, &info, MNTSCAN_FORWARD); 1335 rel_mplock(); 1336 if (info.sfsp && info.count > info.maxcount) 1337 uap->sysmsg_result = info.maxcount; 1338 else 1339 uap->sysmsg_result = info.count; 1340 return (info.error); 1341 } 1342 1343 static int 1344 getfsstat_callback(struct mount *mp, void *data) 1345 { 1346 struct getfsstat_info *info = data; 1347 struct statfs *sp; 1348 char *freepath; 1349 char *fullpath; 1350 int error; 1351 1352 if (info->sfsp && info->count < info->maxcount) { 1353 if (info->td->td_proc && 1354 !chroot_visible_mnt(mp, info->td->td_proc)) { 1355 return(0); 1356 } 1357 sp = &mp->mnt_stat; 1358 1359 /* 1360 * If MNT_NOWAIT or MNT_LAZY is specified, do not 1361 * refresh the fsstat cache. MNT_NOWAIT or MNT_LAZY 1362 * overrides MNT_WAIT. 1363 */ 1364 if (((info->flags & (MNT_LAZY|MNT_NOWAIT)) == 0 || 1365 (info->flags & MNT_WAIT)) && 1366 (error = VFS_STATFS(mp, sp, info->td->td_ucred))) { 1367 return(0); 1368 } 1369 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK; 1370 1371 error = mount_path(info->td->td_proc, mp, &fullpath, &freepath); 1372 if (error) { 1373 info->error = error; 1374 return(-1); 1375 } 1376 bzero(sp->f_mntonname, sizeof(sp->f_mntonname)); 1377 strlcpy(sp->f_mntonname, fullpath, sizeof(sp->f_mntonname)); 1378 kfree(freepath, M_TEMP); 1379 1380 error = copyout(sp, info->sfsp, sizeof(*sp)); 1381 if (error) { 1382 info->error = error; 1383 return (-1); 1384 } 1385 ++info->sfsp; 1386 } 1387 info->count++; 1388 return(0); 1389 } 1390 1391 /* 1392 * getvfsstat_args(struct statfs *buf, struct statvfs *vbuf, 1393 long bufsize, int flags) 1394 * 1395 * Get statistics on all filesystems. 1396 */ 1397 1398 struct getvfsstat_info { 1399 struct statfs *sfsp; 1400 struct statvfs *vsfsp; 1401 long count; 1402 long maxcount; 1403 int error; 1404 int flags; 1405 struct thread *td; 1406 }; 1407 1408 static int getvfsstat_callback(struct mount *, void *); 1409 1410 /* 1411 * MPALMOSTSAFE 1412 */ 1413 int 1414 sys_getvfsstat(struct getvfsstat_args *uap) 1415 { 1416 struct thread *td = curthread; 1417 struct getvfsstat_info info; 1418 1419 bzero(&info, sizeof(info)); 1420 1421 info.maxcount = uap->vbufsize / sizeof(struct statvfs); 1422 info.sfsp = uap->buf; 1423 info.vsfsp = uap->vbuf; 1424 info.count = 0; 1425 info.flags = uap->flags; 1426 info.td = td; 1427 1428 get_mplock(); 1429 mountlist_scan(getvfsstat_callback, &info, MNTSCAN_FORWARD); 1430 if (info.vsfsp && info.count > info.maxcount) 1431 uap->sysmsg_result = info.maxcount; 1432 else 1433 uap->sysmsg_result = info.count; 1434 rel_mplock(); 1435 return (info.error); 1436 } 1437 1438 static int 1439 getvfsstat_callback(struct mount *mp, void *data) 1440 { 1441 struct getvfsstat_info *info = data; 1442 struct statfs *sp; 1443 struct statvfs *vsp; 1444 char *freepath; 1445 char *fullpath; 1446 int error; 1447 1448 if (info->vsfsp && info->count < info->maxcount) { 1449 if (info->td->td_proc && 1450 !chroot_visible_mnt(mp, info->td->td_proc)) { 1451 return(0); 1452 } 1453 sp = &mp->mnt_stat; 1454 vsp = &mp->mnt_vstat; 1455 1456 /* 1457 * If MNT_NOWAIT or MNT_LAZY is specified, do not 1458 * refresh the fsstat cache. MNT_NOWAIT or MNT_LAZY 1459 * overrides MNT_WAIT. 1460 */ 1461 if (((info->flags & (MNT_LAZY|MNT_NOWAIT)) == 0 || 1462 (info->flags & MNT_WAIT)) && 1463 (error = VFS_STATFS(mp, sp, info->td->td_ucred))) { 1464 return(0); 1465 } 1466 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK; 1467 1468 if (((info->flags & (MNT_LAZY|MNT_NOWAIT)) == 0 || 1469 (info->flags & MNT_WAIT)) && 1470 (error = VFS_STATVFS(mp, vsp, info->td->td_ucred))) { 1471 return(0); 1472 } 1473 vsp->f_flag = 0; 1474 if (mp->mnt_flag & MNT_RDONLY) 1475 vsp->f_flag |= ST_RDONLY; 1476 if (mp->mnt_flag & MNT_NOSUID) 1477 vsp->f_flag |= ST_NOSUID; 1478 1479 error = mount_path(info->td->td_proc, mp, &fullpath, &freepath); 1480 if (error) { 1481 info->error = error; 1482 return(-1); 1483 } 1484 bzero(sp->f_mntonname, sizeof(sp->f_mntonname)); 1485 strlcpy(sp->f_mntonname, fullpath, sizeof(sp->f_mntonname)); 1486 kfree(freepath, M_TEMP); 1487 1488 error = copyout(sp, info->sfsp, sizeof(*sp)); 1489 if (error == 0) 1490 error = copyout(vsp, info->vsfsp, sizeof(*vsp)); 1491 if (error) { 1492 info->error = error; 1493 return (-1); 1494 } 1495 ++info->sfsp; 1496 ++info->vsfsp; 1497 } 1498 info->count++; 1499 return(0); 1500 } 1501 1502 1503 /* 1504 * fchdir_args(int fd) 1505 * 1506 * Change current working directory to a given file descriptor. 1507 * 1508 * MPALMOSTSAFE 1509 */ 1510 int 1511 sys_fchdir(struct fchdir_args *uap) 1512 { 1513 struct thread *td = curthread; 1514 struct proc *p = td->td_proc; 1515 struct filedesc *fdp = p->p_fd; 1516 struct vnode *vp, *ovp; 1517 struct mount *mp; 1518 struct file *fp; 1519 struct nchandle nch, onch, tnch; 1520 int error; 1521 1522 if ((error = holdvnode(fdp, uap->fd, &fp)) != 0) 1523 return (error); 1524 get_mplock(); 1525 vp = (struct vnode *)fp->f_data; 1526 vref(vp); 1527 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1528 if (fp->f_nchandle.ncp == NULL) 1529 error = ENOTDIR; 1530 else 1531 error = checkvp_chdir(vp, td); 1532 if (error) { 1533 vput(vp); 1534 goto done; 1535 } 1536 cache_copy(&fp->f_nchandle, &nch); 1537 1538 /* 1539 * If the ncp has become a mount point, traverse through 1540 * the mount point. 1541 */ 1542 1543 while (!error && (nch.ncp->nc_flag & NCF_ISMOUNTPT) && 1544 (mp = cache_findmount(&nch)) != NULL 1545 ) { 1546 error = nlookup_mp(mp, &tnch); 1547 if (error == 0) { 1548 cache_unlock(&tnch); /* leave ref intact */ 1549 vput(vp); 1550 vp = tnch.ncp->nc_vp; 1551 error = vget(vp, LK_SHARED); 1552 KKASSERT(error == 0); 1553 cache_drop(&nch); 1554 nch = tnch; 1555 } 1556 } 1557 if (error == 0) { 1558 ovp = fdp->fd_cdir; 1559 onch = fdp->fd_ncdir; 1560 vn_unlock(vp); /* leave ref intact */ 1561 fdp->fd_cdir = vp; 1562 fdp->fd_ncdir = nch; 1563 cache_drop(&onch); 1564 vrele(ovp); 1565 } else { 1566 cache_drop(&nch); 1567 vput(vp); 1568 } 1569 fdrop(fp); 1570 done: 1571 rel_mplock(); 1572 return (error); 1573 } 1574 1575 int 1576 kern_chdir(struct nlookupdata *nd) 1577 { 1578 struct thread *td = curthread; 1579 struct proc *p = td->td_proc; 1580 struct filedesc *fdp = p->p_fd; 1581 struct vnode *vp, *ovp; 1582 struct nchandle onch; 1583 int error; 1584 1585 if ((error = nlookup(nd)) != 0) 1586 return (error); 1587 if ((vp = nd->nl_nch.ncp->nc_vp) == NULL) 1588 return (ENOENT); 1589 if ((error = vget(vp, LK_SHARED)) != 0) 1590 return (error); 1591 1592 error = checkvp_chdir(vp, td); 1593 vn_unlock(vp); 1594 if (error == 0) { 1595 ovp = fdp->fd_cdir; 1596 onch = fdp->fd_ncdir; 1597 cache_unlock(&nd->nl_nch); /* leave reference intact */ 1598 fdp->fd_ncdir = nd->nl_nch; 1599 fdp->fd_cdir = vp; 1600 cache_drop(&onch); 1601 vrele(ovp); 1602 cache_zero(&nd->nl_nch); 1603 } else { 1604 vrele(vp); 1605 } 1606 return (error); 1607 } 1608 1609 /* 1610 * chdir_args(char *path) 1611 * 1612 * Change current working directory (``.''). 1613 * 1614 * MPALMOSTSAFE 1615 */ 1616 int 1617 sys_chdir(struct chdir_args *uap) 1618 { 1619 struct nlookupdata nd; 1620 int error; 1621 1622 get_mplock(); 1623 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 1624 if (error == 0) 1625 error = kern_chdir(&nd); 1626 nlookup_done(&nd); 1627 rel_mplock(); 1628 return (error); 1629 } 1630 1631 /* 1632 * Helper function for raised chroot(2) security function: Refuse if 1633 * any filedescriptors are open directories. 1634 */ 1635 static int 1636 chroot_refuse_vdir_fds(struct filedesc *fdp) 1637 { 1638 struct vnode *vp; 1639 struct file *fp; 1640 int error; 1641 int fd; 1642 1643 for (fd = 0; fd < fdp->fd_nfiles ; fd++) { 1644 if ((error = holdvnode(fdp, fd, &fp)) != 0) 1645 continue; 1646 vp = (struct vnode *)fp->f_data; 1647 if (vp->v_type != VDIR) { 1648 fdrop(fp); 1649 continue; 1650 } 1651 fdrop(fp); 1652 return(EPERM); 1653 } 1654 return (0); 1655 } 1656 1657 /* 1658 * This sysctl determines if we will allow a process to chroot(2) if it 1659 * has a directory open: 1660 * 0: disallowed for all processes. 1661 * 1: allowed for processes that were not already chroot(2)'ed. 1662 * 2: allowed for all processes. 1663 */ 1664 1665 static int chroot_allow_open_directories = 1; 1666 1667 SYSCTL_INT(_kern, OID_AUTO, chroot_allow_open_directories, CTLFLAG_RW, 1668 &chroot_allow_open_directories, 0, ""); 1669 1670 /* 1671 * chroot to the specified namecache entry. We obtain the vp from the 1672 * namecache data. The passed ncp must be locked and referenced and will 1673 * remain locked and referenced on return. 1674 */ 1675 int 1676 kern_chroot(struct nchandle *nch) 1677 { 1678 struct thread *td = curthread; 1679 struct proc *p = td->td_proc; 1680 struct filedesc *fdp = p->p_fd; 1681 struct vnode *vp; 1682 int error; 1683 1684 /* 1685 * Only privileged user can chroot 1686 */ 1687 error = priv_check_cred(td->td_ucred, PRIV_VFS_CHROOT, 0); 1688 if (error) 1689 return (error); 1690 1691 /* 1692 * Disallow open directory descriptors (fchdir() breakouts). 1693 */ 1694 if (chroot_allow_open_directories == 0 || 1695 (chroot_allow_open_directories == 1 && fdp->fd_rdir != rootvnode)) { 1696 if ((error = chroot_refuse_vdir_fds(fdp)) != 0) 1697 return (error); 1698 } 1699 if ((vp = nch->ncp->nc_vp) == NULL) 1700 return (ENOENT); 1701 1702 if ((error = vget(vp, LK_SHARED)) != 0) 1703 return (error); 1704 1705 /* 1706 * Check the validity of vp as a directory to change to and 1707 * associate it with rdir/jdir. 1708 */ 1709 error = checkvp_chdir(vp, td); 1710 vn_unlock(vp); /* leave reference intact */ 1711 if (error == 0) { 1712 vrele(fdp->fd_rdir); 1713 fdp->fd_rdir = vp; /* reference inherited by fd_rdir */ 1714 cache_drop(&fdp->fd_nrdir); 1715 cache_copy(nch, &fdp->fd_nrdir); 1716 if (fdp->fd_jdir == NULL) { 1717 fdp->fd_jdir = vp; 1718 vref(fdp->fd_jdir); 1719 cache_copy(nch, &fdp->fd_njdir); 1720 } 1721 } else { 1722 vrele(vp); 1723 } 1724 return (error); 1725 } 1726 1727 /* 1728 * chroot_args(char *path) 1729 * 1730 * Change notion of root (``/'') directory. 1731 * 1732 * MPALMOSTSAFE 1733 */ 1734 int 1735 sys_chroot(struct chroot_args *uap) 1736 { 1737 struct thread *td __debugvar = curthread; 1738 struct nlookupdata nd; 1739 int error; 1740 1741 KKASSERT(td->td_proc); 1742 get_mplock(); 1743 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 1744 if (error == 0) { 1745 nd.nl_flags |= NLC_EXEC; 1746 error = nlookup(&nd); 1747 if (error == 0) 1748 error = kern_chroot(&nd.nl_nch); 1749 } 1750 nlookup_done(&nd); 1751 rel_mplock(); 1752 return(error); 1753 } 1754 1755 int 1756 sys_chroot_kernel(struct chroot_kernel_args *uap) 1757 { 1758 struct thread *td = curthread; 1759 struct nlookupdata nd; 1760 struct nchandle *nch; 1761 struct vnode *vp; 1762 int error; 1763 1764 get_mplock(); 1765 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 1766 if (error) 1767 goto error_nond; 1768 1769 error = nlookup(&nd); 1770 if (error) 1771 goto error_out; 1772 1773 nch = &nd.nl_nch; 1774 1775 error = priv_check_cred(td->td_ucred, PRIV_VFS_CHROOT, 0); 1776 if (error) 1777 goto error_out; 1778 1779 if ((vp = nch->ncp->nc_vp) == NULL) { 1780 error = ENOENT; 1781 goto error_out; 1782 } 1783 1784 if ((error = cache_vref(nch, nd.nl_cred, &vp)) != 0) 1785 goto error_out; 1786 1787 kprintf("chroot_kernel: set new rootnch/rootvnode to %s\n", uap->path); 1788 vfs_cache_setroot(vp, cache_hold(nch)); 1789 1790 error_out: 1791 nlookup_done(&nd); 1792 error_nond: 1793 rel_mplock(); 1794 return(error); 1795 } 1796 1797 /* 1798 * Common routine for chroot and chdir. Given a locked, referenced vnode, 1799 * determine whether it is legal to chdir to the vnode. The vnode's state 1800 * is not changed by this call. 1801 */ 1802 int 1803 checkvp_chdir(struct vnode *vp, struct thread *td) 1804 { 1805 int error; 1806 1807 if (vp->v_type != VDIR) 1808 error = ENOTDIR; 1809 else 1810 error = VOP_EACCESS(vp, VEXEC, td->td_ucred); 1811 return (error); 1812 } 1813 1814 /* 1815 * MPSAFE 1816 */ 1817 int 1818 kern_open(struct nlookupdata *nd, int oflags, int mode, int *res) 1819 { 1820 struct thread *td = curthread; 1821 struct proc *p = td->td_proc; 1822 struct lwp *lp = td->td_lwp; 1823 struct filedesc *fdp = p->p_fd; 1824 int cmode, flags; 1825 struct file *nfp; 1826 struct file *fp; 1827 struct vnode *vp; 1828 int type, indx, error; 1829 struct flock lf; 1830 1831 if ((oflags & O_ACCMODE) == O_ACCMODE) 1832 return (EINVAL); 1833 flags = FFLAGS(oflags); 1834 error = falloc(lp, &nfp, NULL); 1835 if (error) 1836 return (error); 1837 fp = nfp; 1838 cmode = ((mode &~ fdp->fd_cmask) & ALLPERMS) & ~S_ISTXT; 1839 1840 /* 1841 * XXX p_dupfd is a real mess. It allows a device to return a 1842 * file descriptor to be duplicated rather then doing the open 1843 * itself. 1844 */ 1845 lp->lwp_dupfd = -1; 1846 1847 /* 1848 * Call vn_open() to do the lookup and assign the vnode to the 1849 * file pointer. vn_open() does not change the ref count on fp 1850 * and the vnode, on success, will be inherited by the file pointer 1851 * and unlocked. 1852 */ 1853 nd->nl_flags |= NLC_LOCKVP; 1854 error = vn_open(nd, fp, flags, cmode); 1855 nlookup_done(nd); 1856 if (error) { 1857 /* 1858 * handle special fdopen() case. bleh. dupfdopen() is 1859 * responsible for dropping the old contents of ofiles[indx] 1860 * if it succeeds. 1861 * 1862 * Note that fsetfd() will add a ref to fp which represents 1863 * the fd_files[] assignment. We must still drop our 1864 * reference. 1865 */ 1866 if ((error == ENODEV || error == ENXIO) && lp->lwp_dupfd >= 0) { 1867 if (fdalloc(p, 0, &indx) == 0) { 1868 error = dupfdopen(fdp, indx, lp->lwp_dupfd, flags, error); 1869 if (error == 0) { 1870 *res = indx; 1871 fdrop(fp); /* our ref */ 1872 return (0); 1873 } 1874 fsetfd(fdp, NULL, indx); 1875 } 1876 } 1877 fdrop(fp); /* our ref */ 1878 if (error == ERESTART) 1879 error = EINTR; 1880 return (error); 1881 } 1882 1883 /* 1884 * ref the vnode for ourselves so it can't be ripped out from under 1885 * is. XXX need an ND flag to request that the vnode be returned 1886 * anyway. 1887 * 1888 * Reserve a file descriptor but do not assign it until the open 1889 * succeeds. 1890 */ 1891 vp = (struct vnode *)fp->f_data; 1892 vref(vp); 1893 if ((error = fdalloc(p, 0, &indx)) != 0) { 1894 fdrop(fp); 1895 vrele(vp); 1896 return (error); 1897 } 1898 1899 /* 1900 * If no error occurs the vp will have been assigned to the file 1901 * pointer. 1902 */ 1903 lp->lwp_dupfd = 0; 1904 1905 if (flags & (O_EXLOCK | O_SHLOCK)) { 1906 lf.l_whence = SEEK_SET; 1907 lf.l_start = 0; 1908 lf.l_len = 0; 1909 if (flags & O_EXLOCK) 1910 lf.l_type = F_WRLCK; 1911 else 1912 lf.l_type = F_RDLCK; 1913 if (flags & FNONBLOCK) 1914 type = 0; 1915 else 1916 type = F_WAIT; 1917 1918 if ((error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type)) != 0) { 1919 /* 1920 * lock request failed. Clean up the reserved 1921 * descriptor. 1922 */ 1923 vrele(vp); 1924 fsetfd(fdp, NULL, indx); 1925 fdrop(fp); 1926 return (error); 1927 } 1928 fp->f_flag |= FHASLOCK; 1929 } 1930 #if 0 1931 /* 1932 * Assert that all regular file vnodes were created with a object. 1933 */ 1934 KASSERT(vp->v_type != VREG || vp->v_object != NULL, 1935 ("open: regular file has no backing object after vn_open")); 1936 #endif 1937 1938 vrele(vp); 1939 1940 /* 1941 * release our private reference, leaving the one associated with the 1942 * descriptor table intact. 1943 */ 1944 fsetfd(fdp, fp, indx); 1945 fdrop(fp); 1946 *res = indx; 1947 return (0); 1948 } 1949 1950 /* 1951 * open_args(char *path, int flags, int mode) 1952 * 1953 * Check permissions, allocate an open file structure, 1954 * and call the device open routine if any. 1955 * 1956 * MPALMOSTSAFE 1957 */ 1958 int 1959 sys_open(struct open_args *uap) 1960 { 1961 CACHE_MPLOCK_DECLARE; 1962 struct nlookupdata nd; 1963 int error; 1964 1965 CACHE_GETMPLOCK1(); 1966 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 1967 if (error == 0) { 1968 error = kern_open(&nd, uap->flags, 1969 uap->mode, &uap->sysmsg_result); 1970 } 1971 nlookup_done(&nd); 1972 CACHE_RELMPLOCK(); 1973 return (error); 1974 } 1975 1976 /* 1977 * openat_args(int fd, char *path, int flags, int mode) 1978 * 1979 * MPALMOSTSAFE 1980 */ 1981 int 1982 sys_openat(struct openat_args *uap) 1983 { 1984 CACHE_MPLOCK_DECLARE; 1985 struct nlookupdata nd; 1986 int error; 1987 struct file *fp; 1988 1989 CACHE_GETMPLOCK1(); 1990 error = nlookup_init_at(&nd, &fp, uap->fd, uap->path, UIO_USERSPACE, 0); 1991 if (error == 0) { 1992 error = kern_open(&nd, uap->flags, uap->mode, 1993 &uap->sysmsg_result); 1994 } 1995 nlookup_done_at(&nd, fp); 1996 CACHE_RELMPLOCK(); 1997 return (error); 1998 } 1999 2000 int 2001 kern_mknod(struct nlookupdata *nd, int mode, int rmajor, int rminor) 2002 { 2003 struct thread *td = curthread; 2004 struct proc *p = td->td_proc; 2005 struct vnode *vp; 2006 struct vattr vattr; 2007 int error; 2008 int whiteout = 0; 2009 2010 KKASSERT(p); 2011 2012 VATTR_NULL(&vattr); 2013 vattr.va_mode = (mode & ALLPERMS) &~ p->p_fd->fd_cmask; 2014 vattr.va_rmajor = rmajor; 2015 vattr.va_rminor = rminor; 2016 2017 switch (mode & S_IFMT) { 2018 case S_IFMT: /* used by badsect to flag bad sectors */ 2019 error = priv_check_cred(td->td_ucred, PRIV_VFS_MKNOD_BAD, 0); 2020 vattr.va_type = VBAD; 2021 break; 2022 case S_IFCHR: 2023 error = priv_check(td, PRIV_VFS_MKNOD_DEV); 2024 vattr.va_type = VCHR; 2025 break; 2026 case S_IFBLK: 2027 error = priv_check(td, PRIV_VFS_MKNOD_DEV); 2028 vattr.va_type = VBLK; 2029 break; 2030 case S_IFWHT: 2031 error = priv_check_cred(td->td_ucred, PRIV_VFS_MKNOD_WHT, 0); 2032 whiteout = 1; 2033 break; 2034 case S_IFDIR: /* special directories support for HAMMER */ 2035 error = priv_check_cred(td->td_ucred, PRIV_VFS_MKNOD_DIR, 0); 2036 vattr.va_type = VDIR; 2037 break; 2038 default: 2039 error = EINVAL; 2040 break; 2041 } 2042 2043 if (error) 2044 return (error); 2045 2046 bwillinode(1); 2047 nd->nl_flags |= NLC_CREATE | NLC_REFDVP; 2048 if ((error = nlookup(nd)) != 0) 2049 return (error); 2050 if (nd->nl_nch.ncp->nc_vp) 2051 return (EEXIST); 2052 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 2053 return (error); 2054 2055 if (whiteout) { 2056 error = VOP_NWHITEOUT(&nd->nl_nch, nd->nl_dvp, 2057 nd->nl_cred, NAMEI_CREATE); 2058 } else { 2059 vp = NULL; 2060 error = VOP_NMKNOD(&nd->nl_nch, nd->nl_dvp, 2061 &vp, nd->nl_cred, &vattr); 2062 if (error == 0) 2063 vput(vp); 2064 } 2065 return (error); 2066 } 2067 2068 /* 2069 * mknod_args(char *path, int mode, int dev) 2070 * 2071 * Create a special file. 2072 * 2073 * MPALMOSTSAFE 2074 */ 2075 int 2076 sys_mknod(struct mknod_args *uap) 2077 { 2078 struct nlookupdata nd; 2079 int error; 2080 2081 get_mplock(); 2082 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 2083 if (error == 0) { 2084 error = kern_mknod(&nd, uap->mode, 2085 umajor(uap->dev), uminor(uap->dev)); 2086 } 2087 nlookup_done(&nd); 2088 rel_mplock(); 2089 return (error); 2090 } 2091 2092 int 2093 kern_mkfifo(struct nlookupdata *nd, int mode) 2094 { 2095 struct thread *td = curthread; 2096 struct proc *p = td->td_proc; 2097 struct vattr vattr; 2098 struct vnode *vp; 2099 int error; 2100 2101 bwillinode(1); 2102 2103 nd->nl_flags |= NLC_CREATE | NLC_REFDVP; 2104 if ((error = nlookup(nd)) != 0) 2105 return (error); 2106 if (nd->nl_nch.ncp->nc_vp) 2107 return (EEXIST); 2108 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 2109 return (error); 2110 2111 VATTR_NULL(&vattr); 2112 vattr.va_type = VFIFO; 2113 vattr.va_mode = (mode & ALLPERMS) &~ p->p_fd->fd_cmask; 2114 vp = NULL; 2115 error = VOP_NMKNOD(&nd->nl_nch, nd->nl_dvp, &vp, nd->nl_cred, &vattr); 2116 if (error == 0) 2117 vput(vp); 2118 return (error); 2119 } 2120 2121 /* 2122 * mkfifo_args(char *path, int mode) 2123 * 2124 * Create a named pipe. 2125 * 2126 * MPALMOSTSAFE 2127 */ 2128 int 2129 sys_mkfifo(struct mkfifo_args *uap) 2130 { 2131 struct nlookupdata nd; 2132 int error; 2133 2134 get_mplock(); 2135 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 2136 if (error == 0) 2137 error = kern_mkfifo(&nd, uap->mode); 2138 nlookup_done(&nd); 2139 rel_mplock(); 2140 return (error); 2141 } 2142 2143 static int hardlink_check_uid = 0; 2144 SYSCTL_INT(_security, OID_AUTO, hardlink_check_uid, CTLFLAG_RW, 2145 &hardlink_check_uid, 0, 2146 "Unprivileged processes cannot create hard links to files owned by other " 2147 "users"); 2148 static int hardlink_check_gid = 0; 2149 SYSCTL_INT(_security, OID_AUTO, hardlink_check_gid, CTLFLAG_RW, 2150 &hardlink_check_gid, 0, 2151 "Unprivileged processes cannot create hard links to files owned by other " 2152 "groups"); 2153 2154 static int 2155 can_hardlink(struct vnode *vp, struct thread *td, struct ucred *cred) 2156 { 2157 struct vattr va; 2158 int error; 2159 2160 /* 2161 * Shortcut if disabled 2162 */ 2163 if (hardlink_check_uid == 0 && hardlink_check_gid == 0) 2164 return (0); 2165 2166 /* 2167 * Privileged user can always hardlink 2168 */ 2169 if (priv_check_cred(cred, PRIV_VFS_LINK, 0) == 0) 2170 return (0); 2171 2172 /* 2173 * Otherwise only if the originating file is owned by the 2174 * same user or group. Note that any group is allowed if 2175 * the file is owned by the caller. 2176 */ 2177 error = VOP_GETATTR(vp, &va); 2178 if (error != 0) 2179 return (error); 2180 2181 if (hardlink_check_uid) { 2182 if (cred->cr_uid != va.va_uid) 2183 return (EPERM); 2184 } 2185 2186 if (hardlink_check_gid) { 2187 if (cred->cr_uid != va.va_uid && !groupmember(va.va_gid, cred)) 2188 return (EPERM); 2189 } 2190 2191 return (0); 2192 } 2193 2194 int 2195 kern_link(struct nlookupdata *nd, struct nlookupdata *linknd) 2196 { 2197 struct thread *td = curthread; 2198 struct vnode *vp; 2199 int error; 2200 2201 /* 2202 * Lookup the source and obtained a locked vnode. 2203 * 2204 * You may only hardlink a file which you have write permission 2205 * on or which you own. 2206 * 2207 * XXX relookup on vget failure / race ? 2208 */ 2209 bwillinode(1); 2210 nd->nl_flags |= NLC_WRITE | NLC_OWN | NLC_HLINK; 2211 if ((error = nlookup(nd)) != 0) 2212 return (error); 2213 vp = nd->nl_nch.ncp->nc_vp; 2214 KKASSERT(vp != NULL); 2215 if (vp->v_type == VDIR) 2216 return (EPERM); /* POSIX */ 2217 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 2218 return (error); 2219 if ((error = vget(vp, LK_EXCLUSIVE)) != 0) 2220 return (error); 2221 2222 /* 2223 * Unlock the source so we can lookup the target without deadlocking 2224 * (XXX vp is locked already, possible other deadlock?). The target 2225 * must not exist. 2226 */ 2227 KKASSERT(nd->nl_flags & NLC_NCPISLOCKED); 2228 nd->nl_flags &= ~NLC_NCPISLOCKED; 2229 cache_unlock(&nd->nl_nch); 2230 vn_unlock(vp); 2231 2232 linknd->nl_flags |= NLC_CREATE | NLC_REFDVP; 2233 if ((error = nlookup(linknd)) != 0) { 2234 vrele(vp); 2235 return (error); 2236 } 2237 if (linknd->nl_nch.ncp->nc_vp) { 2238 vrele(vp); 2239 return (EEXIST); 2240 } 2241 if ((error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY)) != 0) { 2242 vrele(vp); 2243 return (error); 2244 } 2245 2246 /* 2247 * Finally run the new API VOP. 2248 */ 2249 error = can_hardlink(vp, td, td->td_ucred); 2250 if (error == 0) { 2251 error = VOP_NLINK(&linknd->nl_nch, linknd->nl_dvp, 2252 vp, linknd->nl_cred); 2253 } 2254 vput(vp); 2255 return (error); 2256 } 2257 2258 /* 2259 * link_args(char *path, char *link) 2260 * 2261 * Make a hard file link. 2262 * 2263 * MPALMOSTSAFE 2264 */ 2265 int 2266 sys_link(struct link_args *uap) 2267 { 2268 struct nlookupdata nd, linknd; 2269 int error; 2270 2271 get_mplock(); 2272 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 2273 if (error == 0) { 2274 error = nlookup_init(&linknd, uap->link, UIO_USERSPACE, 0); 2275 if (error == 0) 2276 error = kern_link(&nd, &linknd); 2277 nlookup_done(&linknd); 2278 } 2279 nlookup_done(&nd); 2280 rel_mplock(); 2281 return (error); 2282 } 2283 2284 int 2285 kern_symlink(struct nlookupdata *nd, char *path, int mode) 2286 { 2287 struct vattr vattr; 2288 struct vnode *vp; 2289 struct vnode *dvp; 2290 int error; 2291 2292 bwillinode(1); 2293 nd->nl_flags |= NLC_CREATE | NLC_REFDVP; 2294 if ((error = nlookup(nd)) != 0) 2295 return (error); 2296 if (nd->nl_nch.ncp->nc_vp) 2297 return (EEXIST); 2298 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 2299 return (error); 2300 dvp = nd->nl_dvp; 2301 VATTR_NULL(&vattr); 2302 vattr.va_mode = mode; 2303 error = VOP_NSYMLINK(&nd->nl_nch, dvp, &vp, nd->nl_cred, &vattr, path); 2304 if (error == 0) 2305 vput(vp); 2306 return (error); 2307 } 2308 2309 /* 2310 * symlink(char *path, char *link) 2311 * 2312 * Make a symbolic link. 2313 * 2314 * MPALMOSTSAFE 2315 */ 2316 int 2317 sys_symlink(struct symlink_args *uap) 2318 { 2319 struct thread *td = curthread; 2320 struct nlookupdata nd; 2321 char *path; 2322 int error; 2323 int mode; 2324 2325 path = objcache_get(namei_oc, M_WAITOK); 2326 error = copyinstr(uap->path, path, MAXPATHLEN, NULL); 2327 if (error == 0) { 2328 get_mplock(); 2329 error = nlookup_init(&nd, uap->link, UIO_USERSPACE, 0); 2330 if (error == 0) { 2331 mode = ACCESSPERMS & ~td->td_proc->p_fd->fd_cmask; 2332 error = kern_symlink(&nd, path, mode); 2333 } 2334 nlookup_done(&nd); 2335 rel_mplock(); 2336 } 2337 objcache_put(namei_oc, path); 2338 return (error); 2339 } 2340 2341 /* 2342 * undelete_args(char *path) 2343 * 2344 * Delete a whiteout from the filesystem. 2345 * 2346 * MPALMOSTSAFE 2347 */ 2348 int 2349 sys_undelete(struct undelete_args *uap) 2350 { 2351 struct nlookupdata nd; 2352 int error; 2353 2354 get_mplock(); 2355 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 2356 bwillinode(1); 2357 nd.nl_flags |= NLC_DELETE | NLC_REFDVP; 2358 if (error == 0) 2359 error = nlookup(&nd); 2360 if (error == 0) 2361 error = ncp_writechk(&nd.nl_nch); 2362 if (error == 0) { 2363 error = VOP_NWHITEOUT(&nd.nl_nch, nd.nl_dvp, nd.nl_cred, 2364 NAMEI_DELETE); 2365 } 2366 nlookup_done(&nd); 2367 rel_mplock(); 2368 return (error); 2369 } 2370 2371 int 2372 kern_unlink(struct nlookupdata *nd) 2373 { 2374 int error; 2375 2376 bwillinode(1); 2377 nd->nl_flags |= NLC_DELETE | NLC_REFDVP; 2378 if ((error = nlookup(nd)) != 0) 2379 return (error); 2380 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 2381 return (error); 2382 error = VOP_NREMOVE(&nd->nl_nch, nd->nl_dvp, nd->nl_cred); 2383 return (error); 2384 } 2385 2386 /* 2387 * unlink_args(char *path) 2388 * 2389 * Delete a name from the filesystem. 2390 * 2391 * MPALMOSTSAFE 2392 */ 2393 int 2394 sys_unlink(struct unlink_args *uap) 2395 { 2396 struct nlookupdata nd; 2397 int error; 2398 2399 get_mplock(); 2400 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 2401 if (error == 0) 2402 error = kern_unlink(&nd); 2403 nlookup_done(&nd); 2404 rel_mplock(); 2405 return (error); 2406 } 2407 2408 2409 /* 2410 * unlinkat_args(int fd, char *path, int flags) 2411 * 2412 * Delete the file or directory entry pointed to by fd/path. 2413 * 2414 * MPALMOSTSAFE 2415 */ 2416 int 2417 sys_unlinkat(struct unlinkat_args *uap) 2418 { 2419 struct nlookupdata nd; 2420 struct file *fp; 2421 int error; 2422 2423 if (uap->flags & ~AT_REMOVEDIR) 2424 return (EINVAL); 2425 2426 get_mplock(); 2427 error = nlookup_init_at(&nd, &fp, uap->fd, uap->path, UIO_USERSPACE, 0); 2428 if (error == 0) { 2429 if (uap->flags & AT_REMOVEDIR) 2430 error = kern_rmdir(&nd); 2431 else 2432 error = kern_unlink(&nd); 2433 } 2434 nlookup_done_at(&nd, fp); 2435 rel_mplock(); 2436 return (error); 2437 } 2438 2439 /* 2440 * MPALMOSTSAFE 2441 */ 2442 int 2443 kern_lseek(int fd, off_t offset, int whence, off_t *res) 2444 { 2445 struct thread *td = curthread; 2446 struct proc *p = td->td_proc; 2447 struct file *fp; 2448 struct vnode *vp; 2449 struct vattr vattr; 2450 off_t new_offset; 2451 int error; 2452 2453 fp = holdfp(p->p_fd, fd, -1); 2454 if (fp == NULL) 2455 return (EBADF); 2456 if (fp->f_type != DTYPE_VNODE) { 2457 error = ESPIPE; 2458 goto done; 2459 } 2460 vp = (struct vnode *)fp->f_data; 2461 2462 switch (whence) { 2463 case L_INCR: 2464 spin_lock_wr(&fp->f_spin); 2465 new_offset = fp->f_offset + offset; 2466 error = 0; 2467 break; 2468 case L_XTND: 2469 get_mplock(); 2470 error = VOP_GETATTR(vp, &vattr); 2471 rel_mplock(); 2472 spin_lock_wr(&fp->f_spin); 2473 new_offset = offset + vattr.va_size; 2474 break; 2475 case L_SET: 2476 new_offset = offset; 2477 error = 0; 2478 spin_lock_wr(&fp->f_spin); 2479 break; 2480 default: 2481 new_offset = 0; 2482 error = EINVAL; 2483 spin_lock_wr(&fp->f_spin); 2484 break; 2485 } 2486 2487 /* 2488 * Validate the seek position. Negative offsets are not allowed 2489 * for regular files or directories. 2490 * 2491 * Normally we would also not want to allow negative offsets for 2492 * character and block-special devices. However kvm addresses 2493 * on 64 bit architectures might appear to be negative and must 2494 * be allowed. 2495 */ 2496 if (error == 0) { 2497 if (new_offset < 0 && 2498 (vp->v_type == VREG || vp->v_type == VDIR)) { 2499 error = EINVAL; 2500 } else { 2501 fp->f_offset = new_offset; 2502 } 2503 } 2504 *res = fp->f_offset; 2505 spin_unlock_wr(&fp->f_spin); 2506 done: 2507 fdrop(fp); 2508 return (error); 2509 } 2510 2511 /* 2512 * lseek_args(int fd, int pad, off_t offset, int whence) 2513 * 2514 * Reposition read/write file offset. 2515 * 2516 * MPSAFE 2517 */ 2518 int 2519 sys_lseek(struct lseek_args *uap) 2520 { 2521 int error; 2522 2523 error = kern_lseek(uap->fd, uap->offset, uap->whence, 2524 &uap->sysmsg_offset); 2525 2526 return (error); 2527 } 2528 2529 /* 2530 * Check if current process can access given file. amode is a bitmask of *_OK 2531 * access bits. flags is a bitmask of AT_* flags. 2532 */ 2533 int 2534 kern_access(struct nlookupdata *nd, int amode, int flags) 2535 { 2536 struct vnode *vp; 2537 int error, mode; 2538 2539 if (flags & ~AT_EACCESS) 2540 return (EINVAL); 2541 if ((error = nlookup(nd)) != 0) 2542 return (error); 2543 retry: 2544 error = cache_vget(&nd->nl_nch, nd->nl_cred, LK_EXCLUSIVE, &vp); 2545 if (error) 2546 return (error); 2547 2548 /* Flags == 0 means only check for existence. */ 2549 if (amode) { 2550 mode = 0; 2551 if (amode & R_OK) 2552 mode |= VREAD; 2553 if (amode & W_OK) 2554 mode |= VWRITE; 2555 if (amode & X_OK) 2556 mode |= VEXEC; 2557 if ((mode & VWRITE) == 0 || 2558 (error = vn_writechk(vp, &nd->nl_nch)) == 0) 2559 error = VOP_ACCESS_FLAGS(vp, mode, flags, nd->nl_cred); 2560 2561 /* 2562 * If the file handle is stale we have to re-resolve the 2563 * entry. This is a hack at the moment. 2564 */ 2565 if (error == ESTALE) { 2566 vput(vp); 2567 cache_setunresolved(&nd->nl_nch); 2568 error = cache_resolve(&nd->nl_nch, nd->nl_cred); 2569 if (error == 0) { 2570 vp = NULL; 2571 goto retry; 2572 } 2573 return(error); 2574 } 2575 } 2576 vput(vp); 2577 return (error); 2578 } 2579 2580 /* 2581 * access_args(char *path, int flags) 2582 * 2583 * Check access permissions. 2584 * 2585 * MPALMOSTSAFE 2586 */ 2587 int 2588 sys_access(struct access_args *uap) 2589 { 2590 struct nlookupdata nd; 2591 int error; 2592 2593 get_mplock(); 2594 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 2595 if (error == 0) 2596 error = kern_access(&nd, uap->flags, 0); 2597 nlookup_done(&nd); 2598 rel_mplock(); 2599 return (error); 2600 } 2601 2602 2603 /* 2604 * faccessat_args(int fd, char *path, int amode, int flags) 2605 * 2606 * Check access permissions. 2607 * 2608 * MPALMOSTSAFE 2609 */ 2610 int 2611 sys_faccessat(struct faccessat_args *uap) 2612 { 2613 struct nlookupdata nd; 2614 struct file *fp; 2615 int error; 2616 2617 get_mplock(); 2618 error = nlookup_init_at(&nd, &fp, uap->fd, uap->path, UIO_USERSPACE, 2619 NLC_FOLLOW); 2620 if (error == 0) 2621 error = kern_access(&nd, uap->amode, uap->flags); 2622 nlookup_done_at(&nd, fp); 2623 rel_mplock(); 2624 return (error); 2625 } 2626 2627 2628 /* 2629 * MPSAFE 2630 */ 2631 int 2632 kern_stat(struct nlookupdata *nd, struct stat *st) 2633 { 2634 int error; 2635 struct vnode *vp; 2636 thread_t td; 2637 2638 if ((error = nlookup(nd)) != 0) 2639 return (error); 2640 again: 2641 if ((vp = nd->nl_nch.ncp->nc_vp) == NULL) 2642 return (ENOENT); 2643 2644 td = curthread; 2645 if ((error = vget(vp, LK_SHARED)) != 0) 2646 return (error); 2647 error = vn_stat(vp, st, nd->nl_cred); 2648 2649 /* 2650 * If the file handle is stale we have to re-resolve the entry. This 2651 * is a hack at the moment. 2652 */ 2653 if (error == ESTALE) { 2654 vput(vp); 2655 cache_setunresolved(&nd->nl_nch); 2656 error = cache_resolve(&nd->nl_nch, nd->nl_cred); 2657 if (error == 0) 2658 goto again; 2659 } else { 2660 vput(vp); 2661 } 2662 return (error); 2663 } 2664 2665 /* 2666 * stat_args(char *path, struct stat *ub) 2667 * 2668 * Get file status; this version follows links. 2669 * 2670 * MPSAFE 2671 */ 2672 int 2673 sys_stat(struct stat_args *uap) 2674 { 2675 CACHE_MPLOCK_DECLARE; 2676 struct nlookupdata nd; 2677 struct stat st; 2678 int error; 2679 2680 CACHE_GETMPLOCK1(); 2681 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 2682 if (error == 0) { 2683 error = kern_stat(&nd, &st); 2684 if (error == 0) 2685 error = copyout(&st, uap->ub, sizeof(*uap->ub)); 2686 } 2687 nlookup_done(&nd); 2688 CACHE_RELMPLOCK(); 2689 return (error); 2690 } 2691 2692 /* 2693 * lstat_args(char *path, struct stat *ub) 2694 * 2695 * Get file status; this version does not follow links. 2696 * 2697 * MPALMOSTSAFE 2698 */ 2699 int 2700 sys_lstat(struct lstat_args *uap) 2701 { 2702 CACHE_MPLOCK_DECLARE; 2703 struct nlookupdata nd; 2704 struct stat st; 2705 int error; 2706 2707 CACHE_GETMPLOCK1(); 2708 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 2709 if (error == 0) { 2710 error = kern_stat(&nd, &st); 2711 if (error == 0) 2712 error = copyout(&st, uap->ub, sizeof(*uap->ub)); 2713 } 2714 nlookup_done(&nd); 2715 CACHE_RELMPLOCK(); 2716 return (error); 2717 } 2718 2719 /* 2720 * fstatat_args(int fd, char *path, struct stat *sb, int flags) 2721 * 2722 * Get status of file pointed to by fd/path. 2723 * 2724 * MPALMOSTSAFE 2725 */ 2726 int 2727 sys_fstatat(struct fstatat_args *uap) 2728 { 2729 CACHE_MPLOCK_DECLARE; 2730 struct nlookupdata nd; 2731 struct stat st; 2732 int error; 2733 int flags; 2734 struct file *fp; 2735 2736 if (uap->flags & ~AT_SYMLINK_NOFOLLOW) 2737 return (EINVAL); 2738 2739 flags = (uap->flags & AT_SYMLINK_NOFOLLOW) ? 0 : NLC_FOLLOW; 2740 2741 CACHE_GETMPLOCK1(); 2742 error = nlookup_init_at(&nd, &fp, uap->fd, uap->path, 2743 UIO_USERSPACE, flags); 2744 if (error == 0) { 2745 error = kern_stat(&nd, &st); 2746 if (error == 0) 2747 error = copyout(&st, uap->sb, sizeof(*uap->sb)); 2748 } 2749 nlookup_done_at(&nd, fp); 2750 CACHE_RELMPLOCK(); 2751 return (error); 2752 } 2753 2754 /* 2755 * pathconf_Args(char *path, int name) 2756 * 2757 * Get configurable pathname variables. 2758 * 2759 * MPALMOSTSAFE 2760 */ 2761 int 2762 sys_pathconf(struct pathconf_args *uap) 2763 { 2764 struct nlookupdata nd; 2765 struct vnode *vp; 2766 int error; 2767 2768 vp = NULL; 2769 get_mplock(); 2770 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 2771 if (error == 0) 2772 error = nlookup(&nd); 2773 if (error == 0) 2774 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp); 2775 nlookup_done(&nd); 2776 if (error == 0) { 2777 error = VOP_PATHCONF(vp, uap->name, &uap->sysmsg_reg); 2778 vput(vp); 2779 } 2780 rel_mplock(); 2781 return (error); 2782 } 2783 2784 /* 2785 * XXX: daver 2786 * kern_readlink isn't properly split yet. There is a copyin burried 2787 * in VOP_READLINK(). 2788 */ 2789 int 2790 kern_readlink(struct nlookupdata *nd, char *buf, int count, int *res) 2791 { 2792 struct thread *td = curthread; 2793 struct vnode *vp; 2794 struct iovec aiov; 2795 struct uio auio; 2796 int error; 2797 2798 if ((error = nlookup(nd)) != 0) 2799 return (error); 2800 error = cache_vget(&nd->nl_nch, nd->nl_cred, LK_EXCLUSIVE, &vp); 2801 if (error) 2802 return (error); 2803 if (vp->v_type != VLNK) { 2804 error = EINVAL; 2805 } else { 2806 aiov.iov_base = buf; 2807 aiov.iov_len = count; 2808 auio.uio_iov = &aiov; 2809 auio.uio_iovcnt = 1; 2810 auio.uio_offset = 0; 2811 auio.uio_rw = UIO_READ; 2812 auio.uio_segflg = UIO_USERSPACE; 2813 auio.uio_td = td; 2814 auio.uio_resid = count; 2815 error = VOP_READLINK(vp, &auio, td->td_ucred); 2816 } 2817 vput(vp); 2818 *res = count - auio.uio_resid; 2819 return (error); 2820 } 2821 2822 /* 2823 * readlink_args(char *path, char *buf, int count) 2824 * 2825 * Return target name of a symbolic link. 2826 * 2827 * MPALMOSTSAFE 2828 */ 2829 int 2830 sys_readlink(struct readlink_args *uap) 2831 { 2832 struct nlookupdata nd; 2833 int error; 2834 2835 get_mplock(); 2836 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 2837 if (error == 0) { 2838 error = kern_readlink(&nd, uap->buf, uap->count, 2839 &uap->sysmsg_result); 2840 } 2841 nlookup_done(&nd); 2842 rel_mplock(); 2843 return (error); 2844 } 2845 2846 static int 2847 setfflags(struct vnode *vp, int flags) 2848 { 2849 struct thread *td = curthread; 2850 int error; 2851 struct vattr vattr; 2852 2853 /* 2854 * Prevent non-root users from setting flags on devices. When 2855 * a device is reused, users can retain ownership of the device 2856 * if they are allowed to set flags and programs assume that 2857 * chown can't fail when done as root. 2858 */ 2859 if ((vp->v_type == VCHR || vp->v_type == VBLK) && 2860 ((error = priv_check_cred(td->td_ucred, PRIV_VFS_CHFLAGS_DEV, 0)) != 0)) 2861 return (error); 2862 2863 /* 2864 * note: vget is required for any operation that might mod the vnode 2865 * so VINACTIVE is properly cleared. 2866 */ 2867 if ((error = vget(vp, LK_EXCLUSIVE)) == 0) { 2868 VATTR_NULL(&vattr); 2869 vattr.va_flags = flags; 2870 error = VOP_SETATTR(vp, &vattr, td->td_ucred); 2871 vput(vp); 2872 } 2873 return (error); 2874 } 2875 2876 /* 2877 * chflags(char *path, int flags) 2878 * 2879 * Change flags of a file given a path name. 2880 * 2881 * MPALMOSTSAFE 2882 */ 2883 int 2884 sys_chflags(struct chflags_args *uap) 2885 { 2886 struct nlookupdata nd; 2887 struct vnode *vp; 2888 int error; 2889 2890 vp = NULL; 2891 get_mplock(); 2892 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 2893 if (error == 0) 2894 error = nlookup(&nd); 2895 if (error == 0) 2896 error = ncp_writechk(&nd.nl_nch); 2897 if (error == 0) 2898 error = cache_vref(&nd.nl_nch, nd.nl_cred, &vp); 2899 nlookup_done(&nd); 2900 if (error == 0) { 2901 error = setfflags(vp, uap->flags); 2902 vrele(vp); 2903 } 2904 rel_mplock(); 2905 return (error); 2906 } 2907 2908 /* 2909 * lchflags(char *path, int flags) 2910 * 2911 * Change flags of a file given a path name, but don't follow symlinks. 2912 * 2913 * MPALMOSTSAFE 2914 */ 2915 int 2916 sys_lchflags(struct lchflags_args *uap) 2917 { 2918 struct nlookupdata nd; 2919 struct vnode *vp; 2920 int error; 2921 2922 vp = NULL; 2923 get_mplock(); 2924 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 2925 if (error == 0) 2926 error = nlookup(&nd); 2927 if (error == 0) 2928 error = ncp_writechk(&nd.nl_nch); 2929 if (error == 0) 2930 error = cache_vref(&nd.nl_nch, nd.nl_cred, &vp); 2931 nlookup_done(&nd); 2932 if (error == 0) { 2933 error = setfflags(vp, uap->flags); 2934 vrele(vp); 2935 } 2936 rel_mplock(); 2937 return (error); 2938 } 2939 2940 /* 2941 * fchflags_args(int fd, int flags) 2942 * 2943 * Change flags of a file given a file descriptor. 2944 * 2945 * MPALMOSTSAFE 2946 */ 2947 int 2948 sys_fchflags(struct fchflags_args *uap) 2949 { 2950 struct thread *td = curthread; 2951 struct proc *p = td->td_proc; 2952 struct file *fp; 2953 int error; 2954 2955 if ((error = holdvnode(p->p_fd, uap->fd, &fp)) != 0) 2956 return (error); 2957 get_mplock(); 2958 if (fp->f_nchandle.ncp) 2959 error = ncp_writechk(&fp->f_nchandle); 2960 if (error == 0) 2961 error = setfflags((struct vnode *) fp->f_data, uap->flags); 2962 rel_mplock(); 2963 fdrop(fp); 2964 return (error); 2965 } 2966 2967 static int 2968 setfmode(struct vnode *vp, int mode) 2969 { 2970 struct thread *td = curthread; 2971 int error; 2972 struct vattr vattr; 2973 2974 /* 2975 * note: vget is required for any operation that might mod the vnode 2976 * so VINACTIVE is properly cleared. 2977 */ 2978 if ((error = vget(vp, LK_EXCLUSIVE)) == 0) { 2979 VATTR_NULL(&vattr); 2980 vattr.va_mode = mode & ALLPERMS; 2981 error = VOP_SETATTR(vp, &vattr, td->td_ucred); 2982 vput(vp); 2983 } 2984 return error; 2985 } 2986 2987 int 2988 kern_chmod(struct nlookupdata *nd, int mode) 2989 { 2990 struct vnode *vp; 2991 int error; 2992 2993 if ((error = nlookup(nd)) != 0) 2994 return (error); 2995 if ((error = cache_vref(&nd->nl_nch, nd->nl_cred, &vp)) != 0) 2996 return (error); 2997 if ((error = ncp_writechk(&nd->nl_nch)) == 0) 2998 error = setfmode(vp, mode); 2999 vrele(vp); 3000 return (error); 3001 } 3002 3003 /* 3004 * chmod_args(char *path, int mode) 3005 * 3006 * Change mode of a file given path name. 3007 * 3008 * MPALMOSTSAFE 3009 */ 3010 int 3011 sys_chmod(struct chmod_args *uap) 3012 { 3013 struct nlookupdata nd; 3014 int error; 3015 3016 get_mplock(); 3017 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 3018 if (error == 0) 3019 error = kern_chmod(&nd, uap->mode); 3020 nlookup_done(&nd); 3021 rel_mplock(); 3022 return (error); 3023 } 3024 3025 /* 3026 * lchmod_args(char *path, int mode) 3027 * 3028 * Change mode of a file given path name (don't follow links.) 3029 * 3030 * MPALMOSTSAFE 3031 */ 3032 int 3033 sys_lchmod(struct lchmod_args *uap) 3034 { 3035 struct nlookupdata nd; 3036 int error; 3037 3038 get_mplock(); 3039 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 3040 if (error == 0) 3041 error = kern_chmod(&nd, uap->mode); 3042 nlookup_done(&nd); 3043 rel_mplock(); 3044 return (error); 3045 } 3046 3047 /* 3048 * fchmod_args(int fd, int mode) 3049 * 3050 * Change mode of a file given a file descriptor. 3051 * 3052 * MPALMOSTSAFE 3053 */ 3054 int 3055 sys_fchmod(struct fchmod_args *uap) 3056 { 3057 struct thread *td = curthread; 3058 struct proc *p = td->td_proc; 3059 struct file *fp; 3060 int error; 3061 3062 if ((error = holdvnode(p->p_fd, uap->fd, &fp)) != 0) 3063 return (error); 3064 get_mplock(); 3065 if (fp->f_nchandle.ncp) 3066 error = ncp_writechk(&fp->f_nchandle); 3067 if (error == 0) 3068 error = setfmode((struct vnode *)fp->f_data, uap->mode); 3069 rel_mplock(); 3070 fdrop(fp); 3071 return (error); 3072 } 3073 3074 /* 3075 * fchmodat_args(char *path, int mode) 3076 * 3077 * Change mode of a file pointed to by fd/path. 3078 * 3079 * MPALMOSTSAFE 3080 */ 3081 int 3082 sys_fchmodat(struct fchmodat_args *uap) 3083 { 3084 struct nlookupdata nd; 3085 struct file *fp; 3086 int error; 3087 int flags; 3088 3089 if (uap->flags & ~AT_SYMLINK_NOFOLLOW) 3090 return (EINVAL); 3091 flags = (uap->flags & AT_SYMLINK_NOFOLLOW) ? 0 : NLC_FOLLOW; 3092 3093 get_mplock(); 3094 error = nlookup_init_at(&nd, &fp, uap->fd, uap->path, 3095 UIO_USERSPACE, flags); 3096 if (error == 0) 3097 error = kern_chmod(&nd, uap->mode); 3098 nlookup_done_at(&nd, fp); 3099 rel_mplock(); 3100 return (error); 3101 } 3102 3103 static int 3104 setfown(struct vnode *vp, uid_t uid, gid_t gid) 3105 { 3106 struct thread *td = curthread; 3107 int error; 3108 struct vattr vattr; 3109 3110 /* 3111 * note: vget is required for any operation that might mod the vnode 3112 * so VINACTIVE is properly cleared. 3113 */ 3114 if ((error = vget(vp, LK_EXCLUSIVE)) == 0) { 3115 VATTR_NULL(&vattr); 3116 vattr.va_uid = uid; 3117 vattr.va_gid = gid; 3118 error = VOP_SETATTR(vp, &vattr, td->td_ucred); 3119 vput(vp); 3120 } 3121 return error; 3122 } 3123 3124 int 3125 kern_chown(struct nlookupdata *nd, int uid, int gid) 3126 { 3127 struct vnode *vp; 3128 int error; 3129 3130 if ((error = nlookup(nd)) != 0) 3131 return (error); 3132 if ((error = cache_vref(&nd->nl_nch, nd->nl_cred, &vp)) != 0) 3133 return (error); 3134 if ((error = ncp_writechk(&nd->nl_nch)) == 0) 3135 error = setfown(vp, uid, gid); 3136 vrele(vp); 3137 return (error); 3138 } 3139 3140 /* 3141 * chown(char *path, int uid, int gid) 3142 * 3143 * Set ownership given a path name. 3144 * 3145 * MPALMOSTSAFE 3146 */ 3147 int 3148 sys_chown(struct chown_args *uap) 3149 { 3150 struct nlookupdata nd; 3151 int error; 3152 3153 get_mplock(); 3154 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 3155 if (error == 0) 3156 error = kern_chown(&nd, uap->uid, uap->gid); 3157 nlookup_done(&nd); 3158 rel_mplock(); 3159 return (error); 3160 } 3161 3162 /* 3163 * lchown_args(char *path, int uid, int gid) 3164 * 3165 * Set ownership given a path name, do not cross symlinks. 3166 * 3167 * MPALMOSTSAFE 3168 */ 3169 int 3170 sys_lchown(struct lchown_args *uap) 3171 { 3172 struct nlookupdata nd; 3173 int error; 3174 3175 get_mplock(); 3176 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 3177 if (error == 0) 3178 error = kern_chown(&nd, uap->uid, uap->gid); 3179 nlookup_done(&nd); 3180 rel_mplock(); 3181 return (error); 3182 } 3183 3184 /* 3185 * fchown_args(int fd, int uid, int gid) 3186 * 3187 * Set ownership given a file descriptor. 3188 * 3189 * MPALMOSTSAFE 3190 */ 3191 int 3192 sys_fchown(struct fchown_args *uap) 3193 { 3194 struct thread *td = curthread; 3195 struct proc *p = td->td_proc; 3196 struct file *fp; 3197 int error; 3198 3199 if ((error = holdvnode(p->p_fd, uap->fd, &fp)) != 0) 3200 return (error); 3201 get_mplock(); 3202 if (fp->f_nchandle.ncp) 3203 error = ncp_writechk(&fp->f_nchandle); 3204 if (error == 0) 3205 error = setfown((struct vnode *)fp->f_data, uap->uid, uap->gid); 3206 rel_mplock(); 3207 fdrop(fp); 3208 return (error); 3209 } 3210 3211 /* 3212 * fchownat(int fd, char *path, int uid, int gid, int flags) 3213 * 3214 * Set ownership of file pointed to by fd/path. 3215 * 3216 * MPALMOSTSAFE 3217 */ 3218 int 3219 sys_fchownat(struct fchownat_args *uap) 3220 { 3221 struct nlookupdata nd; 3222 struct file *fp; 3223 int error; 3224 int flags; 3225 3226 if (uap->flags & ~AT_SYMLINK_NOFOLLOW) 3227 return (EINVAL); 3228 flags = (uap->flags & AT_SYMLINK_NOFOLLOW) ? 0 : NLC_FOLLOW; 3229 3230 get_mplock(); 3231 error = nlookup_init_at(&nd, &fp, uap->fd, uap->path, 3232 UIO_USERSPACE, flags); 3233 if (error == 0) 3234 error = kern_chown(&nd, uap->uid, uap->gid); 3235 nlookup_done_at(&nd, fp); 3236 rel_mplock(); 3237 return (error); 3238 } 3239 3240 3241 static int 3242 getutimes(const struct timeval *tvp, struct timespec *tsp) 3243 { 3244 struct timeval tv[2]; 3245 3246 if (tvp == NULL) { 3247 microtime(&tv[0]); 3248 TIMEVAL_TO_TIMESPEC(&tv[0], &tsp[0]); 3249 tsp[1] = tsp[0]; 3250 } else { 3251 TIMEVAL_TO_TIMESPEC(&tvp[0], &tsp[0]); 3252 TIMEVAL_TO_TIMESPEC(&tvp[1], &tsp[1]); 3253 } 3254 return 0; 3255 } 3256 3257 static int 3258 setutimes(struct vnode *vp, struct vattr *vattr, 3259 const struct timespec *ts, int nullflag) 3260 { 3261 struct thread *td = curthread; 3262 int error; 3263 3264 VATTR_NULL(vattr); 3265 vattr->va_atime = ts[0]; 3266 vattr->va_mtime = ts[1]; 3267 if (nullflag) 3268 vattr->va_vaflags |= VA_UTIMES_NULL; 3269 error = VOP_SETATTR(vp, vattr, td->td_ucred); 3270 3271 return error; 3272 } 3273 3274 int 3275 kern_utimes(struct nlookupdata *nd, struct timeval *tptr) 3276 { 3277 struct timespec ts[2]; 3278 struct vnode *vp; 3279 struct vattr vattr; 3280 int error; 3281 3282 if ((error = getutimes(tptr, ts)) != 0) 3283 return (error); 3284 3285 /* 3286 * NOTE: utimes() succeeds for the owner even if the file 3287 * is not user-writable. 3288 */ 3289 nd->nl_flags |= NLC_OWN | NLC_WRITE; 3290 3291 if ((error = nlookup(nd)) != 0) 3292 return (error); 3293 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 3294 return (error); 3295 if ((error = cache_vref(&nd->nl_nch, nd->nl_cred, &vp)) != 0) 3296 return (error); 3297 3298 /* 3299 * note: vget is required for any operation that might mod the vnode 3300 * so VINACTIVE is properly cleared. 3301 */ 3302 if ((error = vn_writechk(vp, &nd->nl_nch)) == 0) { 3303 error = vget(vp, LK_EXCLUSIVE); 3304 if (error == 0) { 3305 error = setutimes(vp, &vattr, ts, (tptr == NULL)); 3306 vput(vp); 3307 } 3308 } 3309 vrele(vp); 3310 return (error); 3311 } 3312 3313 /* 3314 * utimes_args(char *path, struct timeval *tptr) 3315 * 3316 * Set the access and modification times of a file. 3317 * 3318 * MPALMOSTSAFE 3319 */ 3320 int 3321 sys_utimes(struct utimes_args *uap) 3322 { 3323 struct timeval tv[2]; 3324 struct nlookupdata nd; 3325 int error; 3326 3327 if (uap->tptr) { 3328 error = copyin(uap->tptr, tv, sizeof(tv)); 3329 if (error) 3330 return (error); 3331 } 3332 get_mplock(); 3333 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 3334 if (error == 0) 3335 error = kern_utimes(&nd, uap->tptr ? tv : NULL); 3336 nlookup_done(&nd); 3337 rel_mplock(); 3338 return (error); 3339 } 3340 3341 /* 3342 * lutimes_args(char *path, struct timeval *tptr) 3343 * 3344 * Set the access and modification times of a file. 3345 * 3346 * MPALMOSTSAFE 3347 */ 3348 int 3349 sys_lutimes(struct lutimes_args *uap) 3350 { 3351 struct timeval tv[2]; 3352 struct nlookupdata nd; 3353 int error; 3354 3355 if (uap->tptr) { 3356 error = copyin(uap->tptr, tv, sizeof(tv)); 3357 if (error) 3358 return (error); 3359 } 3360 get_mplock(); 3361 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 3362 if (error == 0) 3363 error = kern_utimes(&nd, uap->tptr ? tv : NULL); 3364 nlookup_done(&nd); 3365 rel_mplock(); 3366 return (error); 3367 } 3368 3369 /* 3370 * Set utimes on a file descriptor. The creds used to open the 3371 * file are used to determine whether the operation is allowed 3372 * or not. 3373 */ 3374 int 3375 kern_futimes(int fd, struct timeval *tptr) 3376 { 3377 struct thread *td = curthread; 3378 struct proc *p = td->td_proc; 3379 struct timespec ts[2]; 3380 struct file *fp; 3381 struct vnode *vp; 3382 struct vattr vattr; 3383 int error; 3384 3385 error = getutimes(tptr, ts); 3386 if (error) 3387 return (error); 3388 if ((error = holdvnode(p->p_fd, fd, &fp)) != 0) 3389 return (error); 3390 if (fp->f_nchandle.ncp) 3391 error = ncp_writechk(&fp->f_nchandle); 3392 if (error == 0) { 3393 vp = fp->f_data; 3394 error = vget(vp, LK_EXCLUSIVE); 3395 if (error == 0) { 3396 error = VOP_GETATTR(vp, &vattr); 3397 if (error == 0) { 3398 error = naccess_va(&vattr, NLC_OWN | NLC_WRITE, 3399 fp->f_cred); 3400 } 3401 if (error == 0) { 3402 error = setutimes(vp, &vattr, ts, 3403 (tptr == NULL)); 3404 } 3405 vput(vp); 3406 } 3407 } 3408 fdrop(fp); 3409 return (error); 3410 } 3411 3412 /* 3413 * futimes_args(int fd, struct timeval *tptr) 3414 * 3415 * Set the access and modification times of a file. 3416 * 3417 * MPALMOSTSAFE 3418 */ 3419 int 3420 sys_futimes(struct futimes_args *uap) 3421 { 3422 struct timeval tv[2]; 3423 int error; 3424 3425 if (uap->tptr) { 3426 error = copyin(uap->tptr, tv, sizeof(tv)); 3427 if (error) 3428 return (error); 3429 } 3430 get_mplock(); 3431 error = kern_futimes(uap->fd, uap->tptr ? tv : NULL); 3432 rel_mplock(); 3433 3434 return (error); 3435 } 3436 3437 int 3438 kern_truncate(struct nlookupdata *nd, off_t length) 3439 { 3440 struct vnode *vp; 3441 struct vattr vattr; 3442 int error; 3443 3444 if (length < 0) 3445 return(EINVAL); 3446 nd->nl_flags |= NLC_WRITE | NLC_TRUNCATE; 3447 if ((error = nlookup(nd)) != 0) 3448 return (error); 3449 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 3450 return (error); 3451 if ((error = cache_vref(&nd->nl_nch, nd->nl_cred, &vp)) != 0) 3452 return (error); 3453 if ((error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY)) != 0) { 3454 vrele(vp); 3455 return (error); 3456 } 3457 if (vp->v_type == VDIR) { 3458 error = EISDIR; 3459 } else if ((error = vn_writechk(vp, &nd->nl_nch)) == 0) { 3460 VATTR_NULL(&vattr); 3461 vattr.va_size = length; 3462 error = VOP_SETATTR(vp, &vattr, nd->nl_cred); 3463 } 3464 vput(vp); 3465 return (error); 3466 } 3467 3468 /* 3469 * truncate(char *path, int pad, off_t length) 3470 * 3471 * Truncate a file given its path name. 3472 * 3473 * MPALMOSTSAFE 3474 */ 3475 int 3476 sys_truncate(struct truncate_args *uap) 3477 { 3478 struct nlookupdata nd; 3479 int error; 3480 3481 get_mplock(); 3482 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 3483 if (error == 0) 3484 error = kern_truncate(&nd, uap->length); 3485 nlookup_done(&nd); 3486 rel_mplock(); 3487 return error; 3488 } 3489 3490 int 3491 kern_ftruncate(int fd, off_t length) 3492 { 3493 struct thread *td = curthread; 3494 struct proc *p = td->td_proc; 3495 struct vattr vattr; 3496 struct vnode *vp; 3497 struct file *fp; 3498 int error; 3499 3500 if (length < 0) 3501 return(EINVAL); 3502 if ((error = holdvnode(p->p_fd, fd, &fp)) != 0) 3503 return (error); 3504 if (fp->f_nchandle.ncp) { 3505 error = ncp_writechk(&fp->f_nchandle); 3506 if (error) 3507 goto done; 3508 } 3509 if ((fp->f_flag & FWRITE) == 0) { 3510 error = EINVAL; 3511 goto done; 3512 } 3513 if (fp->f_flag & FAPPENDONLY) { /* inode was set s/uapnd */ 3514 error = EINVAL; 3515 goto done; 3516 } 3517 vp = (struct vnode *)fp->f_data; 3518 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 3519 if (vp->v_type == VDIR) { 3520 error = EISDIR; 3521 } else if ((error = vn_writechk(vp, NULL)) == 0) { 3522 VATTR_NULL(&vattr); 3523 vattr.va_size = length; 3524 error = VOP_SETATTR(vp, &vattr, fp->f_cred); 3525 } 3526 vn_unlock(vp); 3527 done: 3528 fdrop(fp); 3529 return (error); 3530 } 3531 3532 /* 3533 * ftruncate_args(int fd, int pad, off_t length) 3534 * 3535 * Truncate a file given a file descriptor. 3536 * 3537 * MPALMOSTSAFE 3538 */ 3539 int 3540 sys_ftruncate(struct ftruncate_args *uap) 3541 { 3542 int error; 3543 3544 get_mplock(); 3545 error = kern_ftruncate(uap->fd, uap->length); 3546 rel_mplock(); 3547 3548 return (error); 3549 } 3550 3551 /* 3552 * fsync(int fd) 3553 * 3554 * Sync an open file. 3555 * 3556 * MPALMOSTSAFE 3557 */ 3558 int 3559 sys_fsync(struct fsync_args *uap) 3560 { 3561 struct thread *td = curthread; 3562 struct proc *p = td->td_proc; 3563 struct vnode *vp; 3564 struct file *fp; 3565 vm_object_t obj; 3566 int error; 3567 3568 if ((error = holdvnode(p->p_fd, uap->fd, &fp)) != 0) 3569 return (error); 3570 get_mplock(); 3571 vp = (struct vnode *)fp->f_data; 3572 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 3573 if ((obj = vp->v_object) != NULL) 3574 vm_object_page_clean(obj, 0, 0, 0); 3575 error = VOP_FSYNC(vp, MNT_WAIT, VOP_FSYNC_SYSCALL); 3576 if (error == 0 && vp->v_mount) 3577 error = buf_fsync(vp); 3578 vn_unlock(vp); 3579 rel_mplock(); 3580 fdrop(fp); 3581 3582 return (error); 3583 } 3584 3585 int 3586 kern_rename(struct nlookupdata *fromnd, struct nlookupdata *tond) 3587 { 3588 struct nchandle fnchd; 3589 struct nchandle tnchd; 3590 struct namecache *ncp; 3591 struct vnode *fdvp; 3592 struct vnode *tdvp; 3593 struct mount *mp; 3594 int error; 3595 3596 bwillinode(1); 3597 fromnd->nl_flags |= NLC_REFDVP | NLC_RENAME_SRC; 3598 if ((error = nlookup(fromnd)) != 0) 3599 return (error); 3600 if ((fnchd.ncp = fromnd->nl_nch.ncp->nc_parent) == NULL) 3601 return (ENOENT); 3602 fnchd.mount = fromnd->nl_nch.mount; 3603 cache_hold(&fnchd); 3604 3605 /* 3606 * unlock the source nch so we can lookup the target nch without 3607 * deadlocking. The target may or may not exist so we do not check 3608 * for a target vp like kern_mkdir() and other creation functions do. 3609 * 3610 * The source and target directories are ref'd and rechecked after 3611 * everything is relocked to determine if the source or target file 3612 * has been renamed. 3613 */ 3614 KKASSERT(fromnd->nl_flags & NLC_NCPISLOCKED); 3615 fromnd->nl_flags &= ~NLC_NCPISLOCKED; 3616 cache_unlock(&fromnd->nl_nch); 3617 3618 tond->nl_flags |= NLC_RENAME_DST | NLC_REFDVP; 3619 if ((error = nlookup(tond)) != 0) { 3620 cache_drop(&fnchd); 3621 return (error); 3622 } 3623 if ((tnchd.ncp = tond->nl_nch.ncp->nc_parent) == NULL) { 3624 cache_drop(&fnchd); 3625 return (ENOENT); 3626 } 3627 tnchd.mount = tond->nl_nch.mount; 3628 cache_hold(&tnchd); 3629 3630 /* 3631 * If the source and target are the same there is nothing to do 3632 */ 3633 if (fromnd->nl_nch.ncp == tond->nl_nch.ncp) { 3634 cache_drop(&fnchd); 3635 cache_drop(&tnchd); 3636 return (0); 3637 } 3638 3639 /* 3640 * Mount points cannot be renamed or overwritten 3641 */ 3642 if ((fromnd->nl_nch.ncp->nc_flag | tond->nl_nch.ncp->nc_flag) & 3643 NCF_ISMOUNTPT 3644 ) { 3645 cache_drop(&fnchd); 3646 cache_drop(&tnchd); 3647 return (EINVAL); 3648 } 3649 3650 /* 3651 * Relock the source ncp. cache_relock() will deal with any 3652 * deadlocks against the already-locked tond and will also 3653 * make sure both are resolved. 3654 * 3655 * NOTE AFTER RELOCKING: The source or target ncp may have become 3656 * invalid while they were unlocked, nc_vp and nc_mount could 3657 * be NULL. 3658 */ 3659 cache_relock(&fromnd->nl_nch, fromnd->nl_cred, 3660 &tond->nl_nch, tond->nl_cred); 3661 fromnd->nl_flags |= NLC_NCPISLOCKED; 3662 3663 /* 3664 * make sure the parent directories linkages are the same 3665 */ 3666 if (fnchd.ncp != fromnd->nl_nch.ncp->nc_parent || 3667 tnchd.ncp != tond->nl_nch.ncp->nc_parent) { 3668 cache_drop(&fnchd); 3669 cache_drop(&tnchd); 3670 return (ENOENT); 3671 } 3672 3673 /* 3674 * Both the source and target must be within the same filesystem and 3675 * in the same filesystem as their parent directories within the 3676 * namecache topology. 3677 * 3678 * NOTE: fromnd's nc_mount or nc_vp could be NULL. 3679 */ 3680 mp = fnchd.mount; 3681 if (mp != tnchd.mount || mp != fromnd->nl_nch.mount || 3682 mp != tond->nl_nch.mount) { 3683 cache_drop(&fnchd); 3684 cache_drop(&tnchd); 3685 return (EXDEV); 3686 } 3687 3688 /* 3689 * Make sure the mount point is writable 3690 */ 3691 if ((error = ncp_writechk(&tond->nl_nch)) != 0) { 3692 cache_drop(&fnchd); 3693 cache_drop(&tnchd); 3694 return (error); 3695 } 3696 3697 /* 3698 * If the target exists and either the source or target is a directory, 3699 * then both must be directories. 3700 * 3701 * Due to relocking of the source, fromnd->nl_nch.ncp->nc_vp might h 3702 * have become NULL. 3703 */ 3704 if (tond->nl_nch.ncp->nc_vp) { 3705 if (fromnd->nl_nch.ncp->nc_vp == NULL) { 3706 error = ENOENT; 3707 } else if (fromnd->nl_nch.ncp->nc_vp->v_type == VDIR) { 3708 if (tond->nl_nch.ncp->nc_vp->v_type != VDIR) 3709 error = ENOTDIR; 3710 } else if (tond->nl_nch.ncp->nc_vp->v_type == VDIR) { 3711 error = EISDIR; 3712 } 3713 } 3714 3715 /* 3716 * You cannot rename a source into itself or a subdirectory of itself. 3717 * We check this by travsersing the target directory upwards looking 3718 * for a match against the source. 3719 * 3720 * XXX MPSAFE 3721 */ 3722 if (error == 0) { 3723 for (ncp = tnchd.ncp; ncp; ncp = ncp->nc_parent) { 3724 if (fromnd->nl_nch.ncp == ncp) { 3725 error = EINVAL; 3726 break; 3727 } 3728 } 3729 } 3730 3731 cache_drop(&fnchd); 3732 cache_drop(&tnchd); 3733 3734 /* 3735 * Even though the namespaces are different, they may still represent 3736 * hardlinks to the same file. The filesystem might have a hard time 3737 * with this so we issue a NREMOVE of the source instead of a NRENAME 3738 * when we detect the situation. 3739 */ 3740 if (error == 0) { 3741 fdvp = fromnd->nl_dvp; 3742 tdvp = tond->nl_dvp; 3743 if (fdvp == NULL || tdvp == NULL) { 3744 error = EPERM; 3745 } else if (fromnd->nl_nch.ncp->nc_vp == tond->nl_nch.ncp->nc_vp) { 3746 error = VOP_NREMOVE(&fromnd->nl_nch, fdvp, 3747 fromnd->nl_cred); 3748 } else { 3749 error = VOP_NRENAME(&fromnd->nl_nch, &tond->nl_nch, 3750 fdvp, tdvp, tond->nl_cred); 3751 } 3752 } 3753 return (error); 3754 } 3755 3756 /* 3757 * rename_args(char *from, char *to) 3758 * 3759 * Rename files. Source and destination must either both be directories, 3760 * or both not be directories. If target is a directory, it must be empty. 3761 * 3762 * MPALMOSTSAFE 3763 */ 3764 int 3765 sys_rename(struct rename_args *uap) 3766 { 3767 struct nlookupdata fromnd, tond; 3768 int error; 3769 3770 get_mplock(); 3771 error = nlookup_init(&fromnd, uap->from, UIO_USERSPACE, 0); 3772 if (error == 0) { 3773 error = nlookup_init(&tond, uap->to, UIO_USERSPACE, 0); 3774 if (error == 0) 3775 error = kern_rename(&fromnd, &tond); 3776 nlookup_done(&tond); 3777 } 3778 nlookup_done(&fromnd); 3779 rel_mplock(); 3780 return (error); 3781 } 3782 3783 int 3784 kern_mkdir(struct nlookupdata *nd, int mode) 3785 { 3786 struct thread *td = curthread; 3787 struct proc *p = td->td_proc; 3788 struct vnode *vp; 3789 struct vattr vattr; 3790 int error; 3791 3792 bwillinode(1); 3793 nd->nl_flags |= NLC_WILLBEDIR | NLC_CREATE | NLC_REFDVP; 3794 if ((error = nlookup(nd)) != 0) 3795 return (error); 3796 3797 if (nd->nl_nch.ncp->nc_vp) 3798 return (EEXIST); 3799 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 3800 return (error); 3801 VATTR_NULL(&vattr); 3802 vattr.va_type = VDIR; 3803 vattr.va_mode = (mode & ACCESSPERMS) &~ p->p_fd->fd_cmask; 3804 3805 vp = NULL; 3806 error = VOP_NMKDIR(&nd->nl_nch, nd->nl_dvp, &vp, td->td_ucred, &vattr); 3807 if (error == 0) 3808 vput(vp); 3809 return (error); 3810 } 3811 3812 /* 3813 * mkdir_args(char *path, int mode) 3814 * 3815 * Make a directory file. 3816 * 3817 * MPALMOSTSAFE 3818 */ 3819 int 3820 sys_mkdir(struct mkdir_args *uap) 3821 { 3822 struct nlookupdata nd; 3823 int error; 3824 3825 get_mplock(); 3826 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 3827 if (error == 0) 3828 error = kern_mkdir(&nd, uap->mode); 3829 nlookup_done(&nd); 3830 rel_mplock(); 3831 return (error); 3832 } 3833 3834 int 3835 kern_rmdir(struct nlookupdata *nd) 3836 { 3837 int error; 3838 3839 bwillinode(1); 3840 nd->nl_flags |= NLC_DELETE | NLC_REFDVP; 3841 if ((error = nlookup(nd)) != 0) 3842 return (error); 3843 3844 /* 3845 * Do not allow directories representing mount points to be 3846 * deleted, even if empty. Check write perms on mount point 3847 * in case the vnode is aliased (aka nullfs). 3848 */ 3849 if (nd->nl_nch.ncp->nc_flag & (NCF_ISMOUNTPT)) 3850 return (EINVAL); 3851 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 3852 return (error); 3853 error = VOP_NRMDIR(&nd->nl_nch, nd->nl_dvp, nd->nl_cred); 3854 return (error); 3855 } 3856 3857 /* 3858 * rmdir_args(char *path) 3859 * 3860 * Remove a directory file. 3861 * 3862 * MPALMOSTSAFE 3863 */ 3864 int 3865 sys_rmdir(struct rmdir_args *uap) 3866 { 3867 struct nlookupdata nd; 3868 int error; 3869 3870 get_mplock(); 3871 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 3872 if (error == 0) 3873 error = kern_rmdir(&nd); 3874 nlookup_done(&nd); 3875 rel_mplock(); 3876 return (error); 3877 } 3878 3879 int 3880 kern_getdirentries(int fd, char *buf, u_int count, long *basep, int *res, 3881 enum uio_seg direction) 3882 { 3883 struct thread *td = curthread; 3884 struct proc *p = td->td_proc; 3885 struct vnode *vp; 3886 struct file *fp; 3887 struct uio auio; 3888 struct iovec aiov; 3889 off_t loff; 3890 int error, eofflag; 3891 3892 if ((error = holdvnode(p->p_fd, fd, &fp)) != 0) 3893 return (error); 3894 if ((fp->f_flag & FREAD) == 0) { 3895 error = EBADF; 3896 goto done; 3897 } 3898 vp = (struct vnode *)fp->f_data; 3899 unionread: 3900 if (vp->v_type != VDIR) { 3901 error = EINVAL; 3902 goto done; 3903 } 3904 aiov.iov_base = buf; 3905 aiov.iov_len = count; 3906 auio.uio_iov = &aiov; 3907 auio.uio_iovcnt = 1; 3908 auio.uio_rw = UIO_READ; 3909 auio.uio_segflg = direction; 3910 auio.uio_td = td; 3911 auio.uio_resid = count; 3912 loff = auio.uio_offset = fp->f_offset; 3913 error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, NULL, NULL); 3914 fp->f_offset = auio.uio_offset; 3915 if (error) 3916 goto done; 3917 if (count == auio.uio_resid) { 3918 if (union_dircheckp) { 3919 error = union_dircheckp(td, &vp, fp); 3920 if (error == -1) 3921 goto unionread; 3922 if (error) 3923 goto done; 3924 } 3925 #if 0 3926 if ((vp->v_flag & VROOT) && 3927 (vp->v_mount->mnt_flag & MNT_UNION)) { 3928 struct vnode *tvp = vp; 3929 vp = vp->v_mount->mnt_vnodecovered; 3930 vref(vp); 3931 fp->f_data = vp; 3932 fp->f_offset = 0; 3933 vrele(tvp); 3934 goto unionread; 3935 } 3936 #endif 3937 } 3938 3939 /* 3940 * WARNING! *basep may not be wide enough to accomodate the 3941 * seek offset. XXX should we hack this to return the upper 32 bits 3942 * for offsets greater then 4G? 3943 */ 3944 if (basep) { 3945 *basep = (long)loff; 3946 } 3947 *res = count - auio.uio_resid; 3948 done: 3949 fdrop(fp); 3950 return (error); 3951 } 3952 3953 /* 3954 * getdirentries_args(int fd, char *buf, u_int conut, long *basep) 3955 * 3956 * Read a block of directory entries in a file system independent format. 3957 * 3958 * MPALMOSTSAFE 3959 */ 3960 int 3961 sys_getdirentries(struct getdirentries_args *uap) 3962 { 3963 long base; 3964 int error; 3965 3966 get_mplock(); 3967 error = kern_getdirentries(uap->fd, uap->buf, uap->count, &base, 3968 &uap->sysmsg_result, UIO_USERSPACE); 3969 rel_mplock(); 3970 3971 if (error == 0 && uap->basep) 3972 error = copyout(&base, uap->basep, sizeof(*uap->basep)); 3973 return (error); 3974 } 3975 3976 /* 3977 * getdents_args(int fd, char *buf, size_t count) 3978 * 3979 * MPALMOSTSAFE 3980 */ 3981 int 3982 sys_getdents(struct getdents_args *uap) 3983 { 3984 int error; 3985 3986 get_mplock(); 3987 error = kern_getdirentries(uap->fd, uap->buf, uap->count, NULL, 3988 &uap->sysmsg_result, UIO_USERSPACE); 3989 rel_mplock(); 3990 3991 return (error); 3992 } 3993 3994 /* 3995 * Set the mode mask for creation of filesystem nodes. 3996 * 3997 * umask(int newmask) 3998 * 3999 * MPSAFE 4000 */ 4001 int 4002 sys_umask(struct umask_args *uap) 4003 { 4004 struct thread *td = curthread; 4005 struct proc *p = td->td_proc; 4006 struct filedesc *fdp; 4007 4008 fdp = p->p_fd; 4009 uap->sysmsg_result = fdp->fd_cmask; 4010 fdp->fd_cmask = uap->newmask & ALLPERMS; 4011 return (0); 4012 } 4013 4014 /* 4015 * revoke(char *path) 4016 * 4017 * Void all references to file by ripping underlying filesystem 4018 * away from vnode. 4019 * 4020 * MPALMOSTSAFE 4021 */ 4022 int 4023 sys_revoke(struct revoke_args *uap) 4024 { 4025 struct nlookupdata nd; 4026 struct vattr vattr; 4027 struct vnode *vp; 4028 struct ucred *cred; 4029 int error; 4030 4031 vp = NULL; 4032 get_mplock(); 4033 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 4034 if (error == 0) 4035 error = nlookup(&nd); 4036 if (error == 0) 4037 error = cache_vref(&nd.nl_nch, nd.nl_cred, &vp); 4038 cred = crhold(nd.nl_cred); 4039 nlookup_done(&nd); 4040 if (error == 0) { 4041 if (error == 0) 4042 error = VOP_GETATTR(vp, &vattr); 4043 if (error == 0 && cred->cr_uid != vattr.va_uid) 4044 error = priv_check_cred(cred, PRIV_VFS_REVOKE, 0); 4045 if (error == 0 && (vp->v_type == VCHR || vp->v_type == VBLK)) { 4046 if (vcount(vp) > 0) 4047 error = vrevoke(vp, cred); 4048 } else if (error == 0) { 4049 error = vrevoke(vp, cred); 4050 } 4051 vrele(vp); 4052 } 4053 if (cred) 4054 crfree(cred); 4055 rel_mplock(); 4056 return (error); 4057 } 4058 4059 /* 4060 * getfh_args(char *fname, fhandle_t *fhp) 4061 * 4062 * Get (NFS) file handle 4063 * 4064 * NOTE: We use the fsid of the covering mount, even if it is a nullfs 4065 * mount. This allows nullfs mounts to be explicitly exported. 4066 * 4067 * WARNING: nullfs mounts of HAMMER PFS ROOTs are safe. 4068 * 4069 * nullfs mounts of subdirectories are not safe. That is, it will 4070 * work, but you do not really have protection against access to 4071 * the related parent directories. 4072 * 4073 * MPALMOSTSAFE 4074 */ 4075 int 4076 sys_getfh(struct getfh_args *uap) 4077 { 4078 struct thread *td = curthread; 4079 struct nlookupdata nd; 4080 fhandle_t fh; 4081 struct vnode *vp; 4082 struct mount *mp; 4083 int error; 4084 4085 /* 4086 * Must be super user 4087 */ 4088 if ((error = priv_check(td, PRIV_ROOT)) != 0) 4089 return (error); 4090 4091 vp = NULL; 4092 get_mplock(); 4093 error = nlookup_init(&nd, uap->fname, UIO_USERSPACE, NLC_FOLLOW); 4094 if (error == 0) 4095 error = nlookup(&nd); 4096 if (error == 0) 4097 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp); 4098 mp = nd.nl_nch.mount; 4099 nlookup_done(&nd); 4100 if (error == 0) { 4101 bzero(&fh, sizeof(fh)); 4102 fh.fh_fsid = mp->mnt_stat.f_fsid; 4103 error = VFS_VPTOFH(vp, &fh.fh_fid); 4104 vput(vp); 4105 if (error == 0) 4106 error = copyout(&fh, uap->fhp, sizeof(fh)); 4107 } 4108 rel_mplock(); 4109 return (error); 4110 } 4111 4112 /* 4113 * fhopen_args(const struct fhandle *u_fhp, int flags) 4114 * 4115 * syscall for the rpc.lockd to use to translate a NFS file handle into 4116 * an open descriptor. 4117 * 4118 * warning: do not remove the priv_check() call or this becomes one giant 4119 * security hole. 4120 * 4121 * MPALMOSTSAFE 4122 */ 4123 int 4124 sys_fhopen(struct fhopen_args *uap) 4125 { 4126 struct thread *td = curthread; 4127 struct filedesc *fdp = td->td_proc->p_fd; 4128 struct mount *mp; 4129 struct vnode *vp; 4130 struct fhandle fhp; 4131 struct vattr vat; 4132 struct vattr *vap = &vat; 4133 struct flock lf; 4134 int fmode, mode, error, type; 4135 struct file *nfp; 4136 struct file *fp; 4137 int indx; 4138 4139 /* 4140 * Must be super user 4141 */ 4142 error = priv_check(td, PRIV_ROOT); 4143 if (error) 4144 return (error); 4145 4146 fmode = FFLAGS(uap->flags); 4147 4148 /* 4149 * Why not allow a non-read/write open for our lockd? 4150 */ 4151 if (((fmode & (FREAD | FWRITE)) == 0) || (fmode & O_CREAT)) 4152 return (EINVAL); 4153 error = copyin(uap->u_fhp, &fhp, sizeof(fhp)); 4154 if (error) 4155 return(error); 4156 4157 /* 4158 * Find the mount point 4159 */ 4160 get_mplock(); 4161 mp = vfs_getvfs(&fhp.fh_fsid); 4162 if (mp == NULL) { 4163 error = ESTALE; 4164 goto done; 4165 } 4166 /* now give me my vnode, it gets returned to me locked */ 4167 error = VFS_FHTOVP(mp, NULL, &fhp.fh_fid, &vp); 4168 if (error) 4169 goto done; 4170 /* 4171 * from now on we have to make sure not 4172 * to forget about the vnode 4173 * any error that causes an abort must vput(vp) 4174 * just set error = err and 'goto bad;'. 4175 */ 4176 4177 /* 4178 * from vn_open 4179 */ 4180 if (vp->v_type == VLNK) { 4181 error = EMLINK; 4182 goto bad; 4183 } 4184 if (vp->v_type == VSOCK) { 4185 error = EOPNOTSUPP; 4186 goto bad; 4187 } 4188 mode = 0; 4189 if (fmode & (FWRITE | O_TRUNC)) { 4190 if (vp->v_type == VDIR) { 4191 error = EISDIR; 4192 goto bad; 4193 } 4194 error = vn_writechk(vp, NULL); 4195 if (error) 4196 goto bad; 4197 mode |= VWRITE; 4198 } 4199 if (fmode & FREAD) 4200 mode |= VREAD; 4201 if (mode) { 4202 error = VOP_ACCESS(vp, mode, td->td_ucred); 4203 if (error) 4204 goto bad; 4205 } 4206 if (fmode & O_TRUNC) { 4207 vn_unlock(vp); /* XXX */ 4208 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX */ 4209 VATTR_NULL(vap); 4210 vap->va_size = 0; 4211 error = VOP_SETATTR(vp, vap, td->td_ucred); 4212 if (error) 4213 goto bad; 4214 } 4215 4216 /* 4217 * VOP_OPEN needs the file pointer so it can potentially override 4218 * it. 4219 * 4220 * WARNING! no f_nchandle will be associated when fhopen()ing a 4221 * directory. XXX 4222 */ 4223 if ((error = falloc(td->td_lwp, &nfp, &indx)) != 0) 4224 goto bad; 4225 fp = nfp; 4226 4227 error = VOP_OPEN(vp, fmode, td->td_ucred, fp); 4228 if (error) { 4229 /* 4230 * setting f_ops this way prevents VOP_CLOSE from being 4231 * called or fdrop() releasing the vp from v_data. Since 4232 * the VOP_OPEN failed we don't want to VOP_CLOSE. 4233 */ 4234 fp->f_ops = &badfileops; 4235 fp->f_data = NULL; 4236 goto bad_drop; 4237 } 4238 4239 /* 4240 * The fp is given its own reference, we still have our ref and lock. 4241 * 4242 * Assert that all regular files must be created with a VM object. 4243 */ 4244 if (vp->v_type == VREG && vp->v_object == NULL) { 4245 kprintf("fhopen: regular file did not have VM object: %p\n", vp); 4246 goto bad_drop; 4247 } 4248 4249 /* 4250 * The open was successful. Handle any locking requirements. 4251 */ 4252 if (fmode & (O_EXLOCK | O_SHLOCK)) { 4253 lf.l_whence = SEEK_SET; 4254 lf.l_start = 0; 4255 lf.l_len = 0; 4256 if (fmode & O_EXLOCK) 4257 lf.l_type = F_WRLCK; 4258 else 4259 lf.l_type = F_RDLCK; 4260 if (fmode & FNONBLOCK) 4261 type = 0; 4262 else 4263 type = F_WAIT; 4264 vn_unlock(vp); 4265 if ((error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type)) != 0) { 4266 /* 4267 * release our private reference. 4268 */ 4269 fsetfd(fdp, NULL, indx); 4270 fdrop(fp); 4271 vrele(vp); 4272 goto done; 4273 } 4274 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4275 fp->f_flag |= FHASLOCK; 4276 } 4277 4278 /* 4279 * Clean up. Associate the file pointer with the previously 4280 * reserved descriptor and return it. 4281 */ 4282 vput(vp); 4283 rel_mplock(); 4284 fsetfd(fdp, fp, indx); 4285 fdrop(fp); 4286 uap->sysmsg_result = indx; 4287 return (0); 4288 4289 bad_drop: 4290 fsetfd(fdp, NULL, indx); 4291 fdrop(fp); 4292 bad: 4293 vput(vp); 4294 done: 4295 rel_mplock(); 4296 return (error); 4297 } 4298 4299 /* 4300 * fhstat_args(struct fhandle *u_fhp, struct stat *sb) 4301 * 4302 * MPALMOSTSAFE 4303 */ 4304 int 4305 sys_fhstat(struct fhstat_args *uap) 4306 { 4307 struct thread *td = curthread; 4308 struct stat sb; 4309 fhandle_t fh; 4310 struct mount *mp; 4311 struct vnode *vp; 4312 int error; 4313 4314 /* 4315 * Must be super user 4316 */ 4317 error = priv_check(td, PRIV_ROOT); 4318 if (error) 4319 return (error); 4320 4321 error = copyin(uap->u_fhp, &fh, sizeof(fhandle_t)); 4322 if (error) 4323 return (error); 4324 4325 get_mplock(); 4326 if ((mp = vfs_getvfs(&fh.fh_fsid)) == NULL) 4327 error = ESTALE; 4328 if (error == 0) { 4329 if ((error = VFS_FHTOVP(mp, NULL, &fh.fh_fid, &vp)) == 0) { 4330 error = vn_stat(vp, &sb, td->td_ucred); 4331 vput(vp); 4332 } 4333 } 4334 rel_mplock(); 4335 if (error == 0) 4336 error = copyout(&sb, uap->sb, sizeof(sb)); 4337 return (error); 4338 } 4339 4340 /* 4341 * fhstatfs_args(struct fhandle *u_fhp, struct statfs *buf) 4342 * 4343 * MPALMOSTSAFE 4344 */ 4345 int 4346 sys_fhstatfs(struct fhstatfs_args *uap) 4347 { 4348 struct thread *td = curthread; 4349 struct proc *p = td->td_proc; 4350 struct statfs *sp; 4351 struct mount *mp; 4352 struct vnode *vp; 4353 struct statfs sb; 4354 char *fullpath, *freepath; 4355 fhandle_t fh; 4356 int error; 4357 4358 /* 4359 * Must be super user 4360 */ 4361 if ((error = priv_check(td, PRIV_ROOT))) 4362 return (error); 4363 4364 if ((error = copyin(uap->u_fhp, &fh, sizeof(fhandle_t))) != 0) 4365 return (error); 4366 4367 get_mplock(); 4368 4369 if ((mp = vfs_getvfs(&fh.fh_fsid)) == NULL) { 4370 error = ESTALE; 4371 goto done; 4372 } 4373 if (p != NULL && !chroot_visible_mnt(mp, p)) { 4374 error = ESTALE; 4375 goto done; 4376 } 4377 4378 if ((error = VFS_FHTOVP(mp, NULL, &fh.fh_fid, &vp)) != 0) 4379 goto done; 4380 mp = vp->v_mount; 4381 sp = &mp->mnt_stat; 4382 vput(vp); 4383 if ((error = VFS_STATFS(mp, sp, td->td_ucred)) != 0) 4384 goto done; 4385 4386 error = mount_path(p, mp, &fullpath, &freepath); 4387 if (error) 4388 goto done; 4389 bzero(sp->f_mntonname, sizeof(sp->f_mntonname)); 4390 strlcpy(sp->f_mntonname, fullpath, sizeof(sp->f_mntonname)); 4391 kfree(freepath, M_TEMP); 4392 4393 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK; 4394 if (priv_check(td, PRIV_ROOT)) { 4395 bcopy(sp, &sb, sizeof(sb)); 4396 sb.f_fsid.val[0] = sb.f_fsid.val[1] = 0; 4397 sp = &sb; 4398 } 4399 error = copyout(sp, uap->buf, sizeof(*sp)); 4400 done: 4401 rel_mplock(); 4402 return (error); 4403 } 4404 4405 /* 4406 * fhstatvfs_args(struct fhandle *u_fhp, struct statvfs *buf) 4407 * 4408 * MPALMOSTSAFE 4409 */ 4410 int 4411 sys_fhstatvfs(struct fhstatvfs_args *uap) 4412 { 4413 struct thread *td = curthread; 4414 struct proc *p = td->td_proc; 4415 struct statvfs *sp; 4416 struct mount *mp; 4417 struct vnode *vp; 4418 fhandle_t fh; 4419 int error; 4420 4421 /* 4422 * Must be super user 4423 */ 4424 if ((error = priv_check(td, PRIV_ROOT))) 4425 return (error); 4426 4427 if ((error = copyin(uap->u_fhp, &fh, sizeof(fhandle_t))) != 0) 4428 return (error); 4429 4430 get_mplock(); 4431 4432 if ((mp = vfs_getvfs(&fh.fh_fsid)) == NULL) { 4433 error = ESTALE; 4434 goto done; 4435 } 4436 if (p != NULL && !chroot_visible_mnt(mp, p)) { 4437 error = ESTALE; 4438 goto done; 4439 } 4440 4441 if ((error = VFS_FHTOVP(mp, NULL, &fh.fh_fid, &vp))) 4442 goto done; 4443 mp = vp->v_mount; 4444 sp = &mp->mnt_vstat; 4445 vput(vp); 4446 if ((error = VFS_STATVFS(mp, sp, td->td_ucred)) != 0) 4447 goto done; 4448 4449 sp->f_flag = 0; 4450 if (mp->mnt_flag & MNT_RDONLY) 4451 sp->f_flag |= ST_RDONLY; 4452 if (mp->mnt_flag & MNT_NOSUID) 4453 sp->f_flag |= ST_NOSUID; 4454 error = copyout(sp, uap->buf, sizeof(*sp)); 4455 done: 4456 rel_mplock(); 4457 return (error); 4458 } 4459 4460 4461 /* 4462 * Syscall to push extended attribute configuration information into the 4463 * VFS. Accepts a path, which it converts to a mountpoint, as well as 4464 * a command (int cmd), and attribute name and misc data. For now, the 4465 * attribute name is left in userspace for consumption by the VFS_op. 4466 * It will probably be changed to be copied into sysspace by the 4467 * syscall in the future, once issues with various consumers of the 4468 * attribute code have raised their hands. 4469 * 4470 * Currently this is used only by UFS Extended Attributes. 4471 * 4472 * MPALMOSTSAFE 4473 */ 4474 int 4475 sys_extattrctl(struct extattrctl_args *uap) 4476 { 4477 struct nlookupdata nd; 4478 struct vnode *vp; 4479 char attrname[EXTATTR_MAXNAMELEN]; 4480 int error; 4481 size_t size; 4482 4483 get_mplock(); 4484 4485 attrname[0] = 0; 4486 vp = NULL; 4487 error = 0; 4488 4489 if (error == 0 && uap->filename) { 4490 error = nlookup_init(&nd, uap->filename, UIO_USERSPACE, 4491 NLC_FOLLOW); 4492 if (error == 0) 4493 error = nlookup(&nd); 4494 if (error == 0) 4495 error = cache_vref(&nd.nl_nch, nd.nl_cred, &vp); 4496 nlookup_done(&nd); 4497 } 4498 4499 if (error == 0 && uap->attrname) { 4500 error = copyinstr(uap->attrname, attrname, EXTATTR_MAXNAMELEN, 4501 &size); 4502 } 4503 4504 if (error == 0) { 4505 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 4506 if (error == 0) 4507 error = nlookup(&nd); 4508 if (error == 0) 4509 error = ncp_writechk(&nd.nl_nch); 4510 if (error == 0) { 4511 error = VFS_EXTATTRCTL(nd.nl_nch.mount, uap->cmd, vp, 4512 uap->attrnamespace, 4513 uap->attrname, nd.nl_cred); 4514 } 4515 nlookup_done(&nd); 4516 } 4517 4518 rel_mplock(); 4519 4520 return (error); 4521 } 4522 4523 /* 4524 * Syscall to get a named extended attribute on a file or directory. 4525 * 4526 * MPALMOSTSAFE 4527 */ 4528 int 4529 sys_extattr_set_file(struct extattr_set_file_args *uap) 4530 { 4531 char attrname[EXTATTR_MAXNAMELEN]; 4532 struct nlookupdata nd; 4533 struct vnode *vp; 4534 struct uio auio; 4535 struct iovec aiov; 4536 int error; 4537 4538 error = copyin(uap->attrname, attrname, EXTATTR_MAXNAMELEN); 4539 if (error) 4540 return (error); 4541 4542 vp = NULL; 4543 get_mplock(); 4544 4545 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 4546 if (error == 0) 4547 error = nlookup(&nd); 4548 if (error == 0) 4549 error = ncp_writechk(&nd.nl_nch); 4550 if (error == 0) 4551 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp); 4552 if (error) { 4553 nlookup_done(&nd); 4554 rel_mplock(); 4555 return (error); 4556 } 4557 4558 bzero(&auio, sizeof(auio)); 4559 aiov.iov_base = uap->data; 4560 aiov.iov_len = uap->nbytes; 4561 auio.uio_iov = &aiov; 4562 auio.uio_iovcnt = 1; 4563 auio.uio_offset = 0; 4564 auio.uio_resid = uap->nbytes; 4565 auio.uio_rw = UIO_WRITE; 4566 auio.uio_td = curthread; 4567 4568 error = VOP_SETEXTATTR(vp, uap->attrnamespace, attrname, 4569 &auio, nd.nl_cred); 4570 4571 vput(vp); 4572 nlookup_done(&nd); 4573 rel_mplock(); 4574 return (error); 4575 } 4576 4577 /* 4578 * Syscall to get a named extended attribute on a file or directory. 4579 * 4580 * MPALMOSTSAFE 4581 */ 4582 int 4583 sys_extattr_get_file(struct extattr_get_file_args *uap) 4584 { 4585 char attrname[EXTATTR_MAXNAMELEN]; 4586 struct nlookupdata nd; 4587 struct uio auio; 4588 struct iovec aiov; 4589 struct vnode *vp; 4590 int error; 4591 4592 error = copyin(uap->attrname, attrname, EXTATTR_MAXNAMELEN); 4593 if (error) 4594 return (error); 4595 4596 vp = NULL; 4597 get_mplock(); 4598 4599 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 4600 if (error == 0) 4601 error = nlookup(&nd); 4602 if (error == 0) 4603 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp); 4604 if (error) { 4605 nlookup_done(&nd); 4606 rel_mplock(); 4607 return (error); 4608 } 4609 4610 bzero(&auio, sizeof(auio)); 4611 aiov.iov_base = uap->data; 4612 aiov.iov_len = uap->nbytes; 4613 auio.uio_iov = &aiov; 4614 auio.uio_iovcnt = 1; 4615 auio.uio_offset = 0; 4616 auio.uio_resid = uap->nbytes; 4617 auio.uio_rw = UIO_READ; 4618 auio.uio_td = curthread; 4619 4620 error = VOP_GETEXTATTR(vp, uap->attrnamespace, attrname, 4621 &auio, nd.nl_cred); 4622 uap->sysmsg_result = uap->nbytes - auio.uio_resid; 4623 4624 vput(vp); 4625 nlookup_done(&nd); 4626 rel_mplock(); 4627 return(error); 4628 } 4629 4630 /* 4631 * Syscall to delete a named extended attribute from a file or directory. 4632 * Accepts attribute name. The real work happens in VOP_SETEXTATTR(). 4633 * 4634 * MPALMOSTSAFE 4635 */ 4636 int 4637 sys_extattr_delete_file(struct extattr_delete_file_args *uap) 4638 { 4639 char attrname[EXTATTR_MAXNAMELEN]; 4640 struct nlookupdata nd; 4641 struct vnode *vp; 4642 int error; 4643 4644 error = copyin(uap->attrname, attrname, EXTATTR_MAXNAMELEN); 4645 if (error) 4646 return(error); 4647 4648 get_mplock(); 4649 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 4650 if (error == 0) 4651 error = nlookup(&nd); 4652 if (error == 0) 4653 error = ncp_writechk(&nd.nl_nch); 4654 if (error == 0) { 4655 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp); 4656 if (error == 0) { 4657 error = VOP_SETEXTATTR(vp, uap->attrnamespace, 4658 attrname, NULL, nd.nl_cred); 4659 vput(vp); 4660 } 4661 } 4662 nlookup_done(&nd); 4663 rel_mplock(); 4664 return(error); 4665 } 4666 4667 /* 4668 * Determine if the mount is visible to the process. 4669 */ 4670 static int 4671 chroot_visible_mnt(struct mount *mp, struct proc *p) 4672 { 4673 struct nchandle nch; 4674 4675 /* 4676 * Traverse from the mount point upwards. If we hit the process 4677 * root then the mount point is visible to the process. 4678 */ 4679 nch = mp->mnt_ncmountpt; 4680 while (nch.ncp) { 4681 if (nch.mount == p->p_fd->fd_nrdir.mount && 4682 nch.ncp == p->p_fd->fd_nrdir.ncp) { 4683 return(1); 4684 } 4685 if (nch.ncp == nch.mount->mnt_ncmountpt.ncp) { 4686 nch = nch.mount->mnt_ncmounton; 4687 } else { 4688 nch.ncp = nch.ncp->nc_parent; 4689 } 4690 } 4691 4692 /* 4693 * If the mount point is not visible to the process, but the 4694 * process root is in a subdirectory of the mount, return 4695 * TRUE anyway. 4696 */ 4697 if (p->p_fd->fd_nrdir.mount == mp) 4698 return(1); 4699 4700 return(0); 4701 } 4702 4703