1 /* 2 * Copyright (c) 1982, 1986, 1989 Regents of the University of California. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms are permitted 6 * provided that the above copyright notice and this paragraph are 7 * duplicated in all such forms and that any documentation, 8 * advertising materials, and other materials related to such 9 * distribution and use acknowledge that the software was developed 10 * by the University of California, Berkeley. The name of the 11 * University may not be used to endorse or promote products derived 12 * from this software without specific prior written permission. 13 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 14 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 15 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. 16 * 17 * @(#)vfs_vnops.c 7.14 (Berkeley) 02/08/90 18 */ 19 20 #include "param.h" 21 #include "systm.h" 22 #include "user.h" 23 #include "kernel.h" 24 #include "file.h" 25 #include "stat.h" 26 #include "buf.h" 27 #include "proc.h" 28 #include "uio.h" 29 #include "socket.h" 30 #include "socketvar.h" 31 #include "mount.h" 32 #include "vnode.h" 33 #include "../ufs/fs.h" 34 #include "../ufs/quota.h" 35 #include "ioctl.h" 36 #include "tty.h" 37 38 int vn_read(), vn_write(), vn_ioctl(), vn_select(), vn_close(); 39 struct fileops vnops = 40 { vn_read, vn_write, vn_ioctl, vn_select, vn_close }; 41 42 /* 43 * Common code for vnode open operations. 44 * Check permissions, and call the VOP_OPEN or VOP_CREATE routine. 45 */ 46 vn_open(ndp, fmode, cmode) 47 register struct nameidata *ndp; 48 int fmode, cmode; 49 { 50 register struct vnode *vp; 51 struct vattr vat; 52 struct vattr *vap = &vat; 53 int error; 54 55 if (fmode & FCREAT) { 56 ndp->ni_nameiop = CREATE | LOCKPARENT | LOCKLEAF; 57 if ((fmode & FEXCL) == 0) 58 ndp->ni_nameiop |= FOLLOW; 59 if (error = namei(ndp)) 60 return (error); 61 if (ndp->ni_vp == NULL) { 62 vattr_null(vap); 63 vap->va_type = VREG; 64 vap->va_mode = cmode; 65 if (error = VOP_CREATE(ndp, vap)) 66 return (error); 67 fmode &= ~FTRUNC; 68 vp = ndp->ni_vp; 69 } else { 70 if (ndp->ni_dvp == ndp->ni_vp) 71 vrele(ndp->ni_dvp); 72 else if (ndp->ni_dvp != NULL) 73 vput(ndp->ni_dvp); 74 ndp->ni_dvp = NULL; 75 vp = ndp->ni_vp; 76 if (fmode & FEXCL) { 77 error = EEXIST; 78 goto bad; 79 } 80 fmode &= ~FCREAT; 81 } 82 } else { 83 ndp->ni_nameiop = LOOKUP | FOLLOW | LOCKLEAF; 84 if (error = namei(ndp)) 85 return (error); 86 vp = ndp->ni_vp; 87 } 88 if (vp->v_type == VSOCK) { 89 error = EOPNOTSUPP; 90 goto bad; 91 } 92 if ((fmode & FCREAT) == 0) { 93 if (fmode & FREAD) { 94 if (error = VOP_ACCESS(vp, VREAD, ndp->ni_cred)) 95 goto bad; 96 } 97 if (fmode & (FWRITE|FTRUNC)) { 98 if (vp->v_type == VDIR) { 99 error = EISDIR; 100 goto bad; 101 } 102 if ((error = vn_writechk(vp)) || 103 (error = VOP_ACCESS(vp, VWRITE, ndp->ni_cred))) 104 goto bad; 105 } 106 } 107 if (fmode & FTRUNC) { 108 vattr_null(vap); 109 vap->va_size = 0; 110 if (error = VOP_SETATTR(vp, vap, ndp->ni_cred)) 111 goto bad; 112 } 113 VOP_UNLOCK(vp); 114 if (setjmp(&u.u_qsave)) { 115 if (error == 0) 116 error = EINTR; 117 return (error); 118 } 119 error = VOP_OPEN(vp, fmode, ndp->ni_cred); 120 if (error) 121 vrele(vp); 122 return (error); 123 124 bad: 125 vput(vp); 126 return(error); 127 } 128 129 /* 130 * Check for write permissions on the specified vnode. 131 * The read-only status of the file system is checked. 132 * Also, prototype text segments cannot be written. 133 */ 134 vn_writechk(vp) 135 register struct vnode *vp; 136 { 137 138 /* 139 * Disallow write attempts on read-only file systems; 140 * unless the file is a socket or a block or character 141 * device resident on the file system. 142 */ 143 if ((vp->v_mount->m_flag & M_RDONLY) && vp->v_type != VCHR && 144 vp->v_type != VBLK && vp->v_type != VSOCK) 145 return (EROFS); 146 /* 147 * If there's shared text associated with 148 * the vnode, try to free it up once. If 149 * we fail, we can't allow writing. 150 */ 151 if (vp->v_flag & VTEXT) 152 xrele(vp); 153 if (vp->v_flag & VTEXT) 154 return (ETXTBSY); 155 return (0); 156 } 157 158 /* 159 * Vnode version of rdwri() for calls on file systems. 160 */ 161 vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, cred, aresid) 162 enum uio_rw rw; 163 struct vnode *vp; 164 caddr_t base; 165 int len; 166 off_t offset; 167 enum uio_seg segflg; 168 int ioflg; 169 struct ucred *cred; 170 int *aresid; 171 { 172 struct uio auio; 173 struct iovec aiov; 174 int error; 175 176 if ((ioflg & IO_NODELOCKED) == 0) 177 VOP_LOCK(vp); 178 auio.uio_iov = &aiov; 179 auio.uio_iovcnt = 1; 180 aiov.iov_base = base; 181 aiov.iov_len = len; 182 auio.uio_resid = len; 183 auio.uio_offset = offset; 184 auio.uio_segflg = segflg; 185 auio.uio_rw = rw; 186 if (rw == UIO_READ) 187 error = VOP_READ(vp, &auio, ioflg, cred); 188 else 189 error = VOP_WRITE(vp, &auio, ioflg, cred); 190 if (aresid) 191 *aresid = auio.uio_resid; 192 else 193 if (auio.uio_resid && error == 0) 194 error = EIO; 195 if ((ioflg & IO_NODELOCKED) == 0) 196 VOP_UNLOCK(vp); 197 return (error); 198 } 199 200 vn_read(fp, uio, cred) 201 struct file *fp; 202 struct uio *uio; 203 struct ucred *cred; 204 { 205 register struct vnode *vp = (struct vnode *)fp->f_data; 206 int count, error; 207 208 VOP_LOCK(vp); 209 uio->uio_offset = fp->f_offset; 210 count = uio->uio_resid; 211 error = VOP_READ(vp, uio, (fp->f_flag & FNDELAY) ? IO_NDELAY : 0, cred); 212 fp->f_offset += count - uio->uio_resid; 213 VOP_UNLOCK(vp); 214 return (error); 215 } 216 217 vn_write(fp, uio, cred) 218 struct file *fp; 219 struct uio *uio; 220 struct ucred *cred; 221 { 222 register struct vnode *vp = (struct vnode *)fp->f_data; 223 int count, error, ioflag = 0; 224 225 if (vp->v_type == VREG && (fp->f_flag & FAPPEND)) 226 ioflag |= IO_APPEND; 227 if (fp->f_flag & FNDELAY) 228 ioflag |= IO_NDELAY; 229 VOP_LOCK(vp); 230 uio->uio_offset = fp->f_offset; 231 count = uio->uio_resid; 232 error = VOP_WRITE(vp, uio, ioflag, cred); 233 if (ioflag & IO_APPEND) 234 fp->f_offset = uio->uio_offset; 235 else 236 fp->f_offset += count - uio->uio_resid; 237 VOP_UNLOCK(vp); 238 return (error); 239 } 240 241 /* 242 * Get stat info for a vnode. 243 */ 244 vn_stat(vp, sb) 245 struct vnode *vp; 246 register struct stat *sb; 247 { 248 struct vattr vattr; 249 register struct vattr *vap; 250 int error; 251 u_short mode; 252 253 vap = &vattr; 254 error = VOP_GETATTR(vp, vap, u.u_cred); 255 if (error) 256 return (error); 257 /* 258 * Copy from vattr table 259 */ 260 sb->st_dev = vap->va_fsid; 261 sb->st_ino = vap->va_fileid; 262 mode = vap->va_mode; 263 switch (vp->v_type) { 264 case VREG: 265 mode |= S_IFREG; 266 break; 267 case VDIR: 268 mode |= S_IFDIR; 269 break; 270 case VBLK: 271 mode |= S_IFBLK; 272 break; 273 case VCHR: 274 mode |= S_IFCHR; 275 break; 276 case VLNK: 277 mode |= S_IFLNK; 278 break; 279 case VSOCK: 280 mode |= S_IFSOCK; 281 break; 282 default: 283 return (EBADF); 284 }; 285 sb->st_mode = mode; 286 sb->st_nlink = vap->va_nlink; 287 sb->st_uid = vap->va_uid; 288 sb->st_gid = vap->va_gid; 289 sb->st_rdev = vap->va_rdev; 290 sb->st_size = vap->va_size; 291 sb->st_atime = vap->va_atime.tv_sec; 292 sb->st_spare1 = 0; 293 sb->st_mtime = vap->va_mtime.tv_sec; 294 sb->st_spare2 = 0; 295 sb->st_ctime = vap->va_ctime.tv_sec; 296 sb->st_spare3 = 0; 297 sb->st_blksize = vap->va_blocksize; 298 sb->st_flags = vap->va_flags; 299 sb->st_gen = vap->va_gen; 300 sb->st_blocks = vap->va_bytes / S_BLKSIZE; 301 return (0); 302 } 303 304 /* 305 * Vnode ioctl call 306 */ 307 vn_ioctl(fp, com, data) 308 struct file *fp; 309 int com; 310 caddr_t data; 311 { 312 register struct vnode *vp = ((struct vnode *)fp->f_data); 313 struct vattr vattr; 314 int error; 315 316 switch (vp->v_type) { 317 318 case VREG: 319 case VDIR: 320 if (com == FIONREAD) { 321 if (error = VOP_GETATTR(vp, &vattr, u.u_cred)) 322 return (error); 323 *(off_t *)data = vattr.va_size - fp->f_offset; 324 return (0); 325 } 326 if (com == FIONBIO || com == FIOASYNC) /* XXX */ 327 return (0); /* XXX */ 328 /* fall into ... */ 329 330 default: 331 return (ENOTTY); 332 333 case VCHR: 334 case VBLK: 335 u.u_r.r_val1 = 0; 336 if (setjmp(&u.u_qsave)) { 337 if ((u.u_sigintr & sigmask(u.u_procp->p_cursig)) != 0) 338 return(EINTR); 339 u.u_eosys = RESTARTSYS; 340 return (0); 341 } 342 error = VOP_IOCTL(vp, com, data, fp->f_flag, u.u_cred); 343 if (error == 0 && com == TIOCSCTTY) { 344 u.u_procp->p_session->s_ttyvp = vp; 345 VREF(vp); 346 } 347 return (error); 348 } 349 } 350 351 /* 352 * Vnode select call 353 */ 354 vn_select(fp, which) 355 struct file *fp; 356 int which; 357 { 358 return(VOP_SELECT(((struct vnode *)fp->f_data), which, u.u_cred)); 359 } 360 361 /* 362 * Vnode close call 363 */ 364 vn_close(fp) 365 register struct file *fp; 366 { 367 struct vnode *vp = ((struct vnode *)fp->f_data); 368 int error; 369 370 if (fp->f_flag & (FSHLOCK|FEXLOCK)) 371 vn_unlock(fp, FSHLOCK|FEXLOCK); 372 /* 373 * Must delete vnode reference from this file entry 374 * before VOP_CLOSE, so that only other references 375 * will prevent close. 376 */ 377 fp->f_data = (caddr_t) 0; 378 error = VOP_CLOSE(vp, fp->f_flag, u.u_cred); 379 vrele(vp); 380 return (error); 381 } 382 383 /* 384 * Place an advisory lock on a vnode. 385 * !! THIS IMPLIES THAT ALL STATEFUL FILE SERVERS WILL USE file table entries 386 */ 387 vn_lock(fp, cmd) 388 register struct file *fp; 389 int cmd; 390 { 391 register int priority = PLOCK; 392 register struct vnode *vp = (struct vnode *)fp->f_data; 393 394 if ((cmd & LOCK_EX) == 0) 395 priority += 4; 396 if (setjmp(&u.u_qsave)) { 397 if ((u.u_sigintr & sigmask(u.u_procp->p_cursig)) != 0) 398 return(EINTR); 399 u.u_eosys = RESTARTSYS; 400 return (0); 401 } 402 /* 403 * If there's a exclusive lock currently applied 404 * to the file, then we've gotta wait for the 405 * lock with everyone else. 406 */ 407 again: 408 while (vp->v_flag & VEXLOCK) { 409 /* 410 * If we're holding an exclusive 411 * lock, then release it. 412 */ 413 if (fp->f_flag & FEXLOCK) { 414 vn_unlock(fp, FEXLOCK); 415 continue; 416 } 417 if (cmd & LOCK_NB) 418 return (EWOULDBLOCK); 419 vp->v_flag |= VLWAIT; 420 tsleep((caddr_t)&vp->v_exlockc, priority, SLP_EXLCK, 0); 421 } 422 if ((cmd & LOCK_EX) && (vp->v_flag & VSHLOCK)) { 423 /* 424 * Must wait for any shared locks to finish 425 * before we try to apply a exclusive lock. 426 * 427 * If we're holding a shared 428 * lock, then release it. 429 */ 430 if (fp->f_flag & FSHLOCK) { 431 vn_unlock(fp, FSHLOCK); 432 goto again; 433 } 434 if (cmd & LOCK_NB) 435 return (EWOULDBLOCK); 436 vp->v_flag |= VLWAIT; 437 tsleep((caddr_t)&vp->v_shlockc, PLOCK, SLP_SHLCK, 0); 438 goto again; 439 } 440 if (fp->f_flag & FEXLOCK) 441 panic("vn_lock"); 442 if (cmd & LOCK_EX) { 443 cmd &= ~LOCK_SH; 444 vp->v_exlockc++; 445 vp->v_flag |= VEXLOCK; 446 fp->f_flag |= FEXLOCK; 447 } 448 if ((cmd & LOCK_SH) && (fp->f_flag & FSHLOCK) == 0) { 449 vp->v_shlockc++; 450 vp->v_flag |= VSHLOCK; 451 fp->f_flag |= FSHLOCK; 452 } 453 return (0); 454 } 455 456 /* 457 * Unlock a file. 458 */ 459 vn_unlock(fp, kind) 460 register struct file *fp; 461 int kind; 462 { 463 register struct vnode *vp = (struct vnode *)fp->f_data; 464 int flags; 465 466 kind &= fp->f_flag; 467 if (vp == NULL || kind == 0) 468 return; 469 flags = vp->v_flag; 470 if (kind & FSHLOCK) { 471 if ((flags & VSHLOCK) == 0) 472 panic("vn_unlock: SHLOCK"); 473 if (--vp->v_shlockc == 0) { 474 vp->v_flag &= ~VSHLOCK; 475 if (flags & VLWAIT) 476 wakeup((caddr_t)&vp->v_shlockc); 477 } 478 fp->f_flag &= ~FSHLOCK; 479 } 480 if (kind & FEXLOCK) { 481 if ((flags & VEXLOCK) == 0) 482 panic("vn_unlock: EXLOCK"); 483 if (--vp->v_exlockc == 0) { 484 vp->v_flag &= ~(VEXLOCK|VLWAIT); 485 if (flags & VLWAIT) 486 wakeup((caddr_t)&vp->v_exlockc); 487 } 488 fp->f_flag &= ~FEXLOCK; 489 } 490 } 491 492 /* 493 * vn_fhtovp() - convert a fh to a vnode ptr (optionally locked) 494 * - look up fsid in mount list (if not found ret error) 495 * - get vp by calling VFS_FHTOVP() macro 496 * - if lockflag lock it with VOP_LOCK() 497 */ 498 vn_fhtovp(fhp, lockflag, vpp) 499 fhandle_t *fhp; 500 int lockflag; 501 struct vnode **vpp; 502 { 503 register struct mount *mp; 504 505 if ((mp = getvfs(&fhp->fh_fsid)) == NULL) 506 return (ESTALE); 507 if (VFS_FHTOVP(mp, &fhp->fh_fid, vpp)) 508 return (ESTALE); 509 if (!lockflag) 510 VOP_UNLOCK(*vpp); 511 return (0); 512 } 513 514 /* 515 * Noop 516 */ 517 vfs_noop() 518 { 519 520 return (ENXIO); 521 } 522 523 /* 524 * Null op 525 */ 526 vfs_nullop() 527 { 528 529 return (0); 530 } 531