1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed 6 * to Berkeley by John Heidemann of the UCLA Ficus project. 7 * 8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * 39 * $FreeBSD: src/sys/kern/vfs_default.c,v 1.28.2.7 2003/01/10 18:23:26 bde Exp $ 40 * $DragonFly: src/sys/kern/vfs_default.c,v 1.15 2004/09/30 18:59:48 dillon Exp $ 41 */ 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/buf.h> 46 #include <sys/conf.h> 47 #include <sys/kernel.h> 48 #include <sys/lock.h> 49 #include <sys/malloc.h> 50 #include <sys/mount.h> 51 #include <sys/unistd.h> 52 #include <sys/vnode.h> 53 #include <sys/namei.h> 54 #include <sys/poll.h> 55 56 #include <machine/limits.h> 57 58 #include <vm/vm.h> 59 #include <vm/vm_object.h> 60 #include <vm/vm_page.h> 61 #include <vm/vm_pager.h> 62 #include <vm/vnode_pager.h> 63 64 static int vop_nolookup (struct vop_lookup_args *); 65 static int vop_noresolve (struct vop_resolve_args *); 66 static int vop_nostrategy (struct vop_strategy_args *); 67 68 /* 69 * This vnode table stores what we want to do if the filesystem doesn't 70 * implement a particular VOP. 71 * 72 * If there is no specific entry here, we will return EOPNOTSUPP. 73 */ 74 struct vop_ops *default_vnode_vops; 75 static struct vnodeopv_entry_desc default_vnodeop_entries[] = { 76 { &vop_default_desc, vop_eopnotsupp }, 77 { &vop_advlock_desc, vop_einval }, 78 { &vop_bwrite_desc, (void *) vop_stdbwrite }, 79 { &vop_close_desc, vop_null }, 80 { &vop_createvobject_desc, (void *) vop_stdcreatevobject }, 81 { &vop_destroyvobject_desc, (void *) vop_stddestroyvobject }, 82 { &vop_fsync_desc, vop_null }, 83 { &vop_getvobject_desc, (void *) vop_stdgetvobject }, 84 { &vop_ioctl_desc, vop_enotty }, 85 { &vop_islocked_desc, (void *) vop_stdislocked }, 86 { &vop_lease_desc, vop_null }, 87 { &vop_lock_desc, (void *) vop_stdlock }, 88 { &vop_mmap_desc, vop_einval }, 89 { &vop_resolve_desc, (void *) vop_noresolve }, 90 { &vop_lookup_desc, (void *) vop_nolookup }, 91 { &vop_open_desc, vop_null }, 92 { &vop_pathconf_desc, vop_einval }, 93 { &vop_poll_desc, (void *) vop_nopoll }, 94 { &vop_readlink_desc, vop_einval }, 95 { &vop_reallocblks_desc, vop_eopnotsupp }, 96 { &vop_revoke_desc, (void *) vop_stdrevoke }, 97 { &vop_strategy_desc, (void *) vop_nostrategy }, 98 { &vop_unlock_desc, (void *) vop_stdunlock }, 99 { &vop_getacl_desc, vop_eopnotsupp }, 100 { &vop_setacl_desc, vop_eopnotsupp }, 101 { &vop_aclcheck_desc, vop_eopnotsupp }, 102 { &vop_getextattr_desc, vop_eopnotsupp }, 103 { &vop_setextattr_desc, vop_eopnotsupp }, 104 { NULL, NULL } 105 }; 106 107 static struct vnodeopv_desc default_vnodeop_opv_desc = 108 { &default_vnode_vops, default_vnodeop_entries }; 109 110 VNODEOP_SET(default_vnodeop_opv_desc); 111 112 int 113 vop_eopnotsupp(struct vop_generic_args *ap) 114 { 115 return (EOPNOTSUPP); 116 } 117 118 int 119 vop_ebadf(struct vop_generic_args *ap) 120 { 121 return (EBADF); 122 } 123 124 int 125 vop_enotty(struct vop_generic_args *ap) 126 { 127 return (ENOTTY); 128 } 129 130 int 131 vop_einval(struct vop_generic_args *ap) 132 { 133 return (EINVAL); 134 } 135 136 int 137 vop_null(struct vop_generic_args *ap) 138 { 139 return (0); 140 } 141 142 int 143 vop_defaultop(struct vop_generic_args *ap) 144 { 145 return (VOCALL(default_vnode_vops, ap)); 146 } 147 148 int 149 vop_panic(struct vop_generic_args *ap) 150 { 151 152 panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name); 153 } 154 155 /* 156 * vop_noresolve { struct namecache *a_ncp } XXX STOPGAP FUNCTION 157 * 158 * XXX OLD API ROUTINE! WHEN ALL VFSs HAVE BEEN CLEANED UP THIS PROCEDURE 159 * WILL BE REMOVED. This procedure exists for all VFSs which have not 160 * yet implemented vop_resolve(). It converts vop_resolve() into a 161 * vop_lookup() and does appropriate translations. 162 * 163 * Resolve a ncp for VFSs which do not support the VOP. Eventually all 164 * VFSs will support this VOP and this routine can be removed, since 165 * vop_resolve() is far less complex then the older LOOKUP/CACHEDLOOKUP 166 * API. 167 * 168 * A locked ncp is passed in to be resolved. The NCP is resolved by 169 * figuring out the vnode (if any) and calling cache_setvp() to attach the 170 * vnode to the entry. If the entry represents a non-existant node then 171 * cache_setvp() is called with a NULL vnode to resolve the entry into a 172 * negative cache entry. No vnode locks are retained and the 173 * ncp is left locked on return. 174 */ 175 static int 176 vop_noresolve(struct vop_resolve_args *ap) 177 { 178 int error; 179 struct vnode *dvp; 180 struct vnode *vp; 181 struct namecache *ncp; 182 struct componentname cnp; 183 184 ncp = ap->a_ncp; /* locked namecache node */ 185 if (ncp->nc_flag & NCF_MOUNTPT) /* can't cross a mount point! */ 186 return(EPERM); 187 if (ncp->nc_parent == NULL) 188 return(EPERM); 189 if ((dvp = ncp->nc_parent->nc_vp) == NULL) 190 return(EPERM); 191 vget(dvp, NULL, LK_EXCLUSIVE, curthread); 192 193 bzero(&cnp, sizeof(cnp)); 194 cnp.cn_nameiop = NAMEI_LOOKUP; 195 cnp.cn_flags = CNP_ISLASTCN; 196 cnp.cn_nameptr = ncp->nc_name; 197 cnp.cn_namelen = ncp->nc_nlen; 198 cnp.cn_cred = ap->a_cred; 199 cnp.cn_td = curthread; /* XXX */ 200 201 /* 202 * vop_lookup() always returns vp locked. dvp may or may not be 203 * left locked depending on CNP_PDIRUNLOCK. 204 */ 205 error = vop_lookup(ap->a_head.a_ops, dvp, &vp, &cnp); 206 if (error == 0) { 207 KKASSERT(vp != NULL); 208 cache_setvp(ncp, vp); 209 vput(vp); 210 } else if (error == ENOENT) { 211 KKASSERT(vp == NULL); 212 if (cnp.cn_flags & CNP_ISWHITEOUT) 213 ncp->nc_flag |= NCF_WHITEOUT; 214 cache_setvp(ncp, NULL); 215 } 216 if (cnp.cn_flags & CNP_PDIRUNLOCK) 217 vrele(dvp); 218 else 219 vput(dvp); 220 return (error); 221 } 222 223 #if 0 224 225 /* 226 * vop_noremove { struct namecache *a_ncp } XXX STOPGAP FUNCTION 227 * 228 * Remove the file/dir represented by a_ncp. 229 * 230 * XXX ultra difficult. A number of existing filesystems, including UFS, 231 * assume that the directory will remain locked and the lookup will 232 * store the directory offset and other things in the directory inode 233 * for the later VOP_REMOVE to use. We have to move all that 234 * functionality into e.g. UFS's VOP_REMOVE itself. 235 */ 236 static int 237 vop_nonremove(struct vop_nremove_args *ap) 238 { 239 struct namecache *ncfile; 240 struct namecache *ncdir; 241 struct componentname cnd; 242 struct vnode *vp; 243 struct vnode *vpd; 244 thread_t td; 245 int error; 246 247 td = curthread; 248 ncfile = ap->a_ncp; 249 ncdir = ncfile->nc_parent; 250 251 if ((error = cache_vget(ncdir, ap->a_cred, LK_EXCLUSIVE, &vpd)) != 0) 252 return (error); 253 if ((error = cache_vget(ncfile, ap->a_cred, LK_EXCLUSIVE, &vp)) != 0) { 254 vput(vpd); 255 return (error); 256 } 257 bzero(&cnd, sizeof(cnd)); 258 cnd.cn_nameiop = NAMEI_DELETE; 259 cnd.cn_td = td; 260 cnd.cn_cred = ap->a_cred; 261 cnd.cn_nameptr = ncfile->nc_name; 262 cnd.cn_namelen = ncfile->nc_nlen; 263 error = VOP_REMOVE(vpd, NCPNULL, vp, &cnd); 264 vput(vp); 265 vput(vpd); 266 267 /* 268 * Re-resolve the ncp to match the fact that the file has been 269 * deleted from the namespace. If an error occured leave the ncp 270 * unresolved (meaning that we have no idea what the correct state 271 * is). 272 */ 273 cache_setunresolved(ncfile); 274 if (error == 0) 275 cache_setvp(ncfile, NULL); 276 return (error); 277 } 278 279 #endif 280 281 282 static int 283 vop_nolookup(ap) 284 struct vop_lookup_args /* { 285 struct vnode *a_dvp; 286 struct vnode **a_vpp; 287 struct componentname *a_cnp; 288 } */ *ap; 289 { 290 291 *ap->a_vpp = NULL; 292 return (ENOTDIR); 293 } 294 295 /* 296 * vop_nostrategy: 297 * 298 * Strategy routine for VFS devices that have none. 299 * 300 * B_ERROR and B_INVAL must be cleared prior to calling any strategy 301 * routine. Typically this is done for a B_READ strategy call. Typically 302 * B_INVAL is assumed to already be clear prior to a write and should not 303 * be cleared manually unless you just made the buffer invalid. B_ERROR 304 * should be cleared either way. 305 */ 306 307 static int 308 vop_nostrategy (struct vop_strategy_args *ap) 309 { 310 printf("No strategy for buffer at %p\n", ap->a_bp); 311 vprint("", ap->a_vp); 312 vprint("", ap->a_bp->b_vp); 313 ap->a_bp->b_flags |= B_ERROR; 314 ap->a_bp->b_error = EOPNOTSUPP; 315 biodone(ap->a_bp); 316 return (EOPNOTSUPP); 317 } 318 319 int 320 vop_stdpathconf(ap) 321 struct vop_pathconf_args /* { 322 struct vnode *a_vp; 323 int a_name; 324 int *a_retval; 325 } */ *ap; 326 { 327 328 switch (ap->a_name) { 329 case _PC_LINK_MAX: 330 *ap->a_retval = LINK_MAX; 331 return (0); 332 case _PC_MAX_CANON: 333 *ap->a_retval = MAX_CANON; 334 return (0); 335 case _PC_MAX_INPUT: 336 *ap->a_retval = MAX_INPUT; 337 return (0); 338 case _PC_PIPE_BUF: 339 *ap->a_retval = PIPE_BUF; 340 return (0); 341 case _PC_CHOWN_RESTRICTED: 342 *ap->a_retval = 1; 343 return (0); 344 case _PC_VDISABLE: 345 *ap->a_retval = _POSIX_VDISABLE; 346 return (0); 347 default: 348 return (EINVAL); 349 } 350 /* NOTREACHED */ 351 } 352 353 /* 354 * Standard lock. The lock is recursive-capable only if the lock was 355 * initialized with LK_CANRECURSE or that flag is passed in a_flags. 356 */ 357 int 358 vop_stdlock(ap) 359 struct vop_lock_args /* { 360 struct vnode *a_vp; 361 lwkt_tokref_t a_vlock; 362 int a_flags; 363 struct proc *a_p; 364 } */ *ap; 365 { 366 int error; 367 368 #ifndef DEBUG_LOCKS 369 error = lockmgr(&ap->a_vp->v_lock, ap->a_flags, 370 ap->a_vlock, ap->a_td); 371 #else 372 error = debuglockmgr(&ap->a_vp->v_lock, ap->a_flags, 373 ap->a_vlock, ap->a_td, 374 "vop_stdlock", ap->a_vp->filename, ap->a_vp->line); 375 #endif 376 return(error); 377 } 378 379 int 380 vop_stdunlock(ap) 381 struct vop_unlock_args /* { 382 struct vnode *a_vp; 383 lwkt_tokref_t a_vlock; 384 int a_flags; 385 struct thread *a_td; 386 } */ *ap; 387 { 388 int error; 389 390 error = lockmgr(&ap->a_vp->v_lock, ap->a_flags | LK_RELEASE, 391 ap->a_vlock, ap->a_td); 392 return(error); 393 } 394 395 int 396 vop_stdislocked(ap) 397 struct vop_islocked_args /* { 398 struct vnode *a_vp; 399 struct thread *a_td; 400 } */ *ap; 401 { 402 return (lockstatus(&ap->a_vp->v_lock, ap->a_td)); 403 } 404 405 /* 406 * Return true for select/poll. 407 */ 408 int 409 vop_nopoll(ap) 410 struct vop_poll_args /* { 411 struct vnode *a_vp; 412 int a_events; 413 struct ucred *a_cred; 414 struct proc *a_p; 415 } */ *ap; 416 { 417 /* 418 * Return true for read/write. If the user asked for something 419 * special, return POLLNVAL, so that clients have a way of 420 * determining reliably whether or not the extended 421 * functionality is present without hard-coding knowledge 422 * of specific filesystem implementations. 423 */ 424 if (ap->a_events & ~POLLSTANDARD) 425 return (POLLNVAL); 426 427 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 428 } 429 430 /* 431 * Implement poll for local filesystems that support it. 432 */ 433 int 434 vop_stdpoll(ap) 435 struct vop_poll_args /* { 436 struct vnode *a_vp; 437 int a_events; 438 struct ucred *a_cred; 439 struct thread *a_td; 440 } */ *ap; 441 { 442 if (ap->a_events & ~POLLSTANDARD) 443 return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events)); 444 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 445 } 446 447 int 448 vop_stdbwrite(ap) 449 struct vop_bwrite_args *ap; 450 { 451 return (bwrite(ap->a_bp)); 452 } 453 454 int 455 vop_stdcreatevobject(ap) 456 struct vop_createvobject_args /* { 457 struct vnode *a_vp; 458 struct proc *a_td; 459 } */ *ap; 460 { 461 struct vnode *vp = ap->a_vp; 462 struct thread *td = ap->a_td; 463 struct vattr vat; 464 vm_object_t object; 465 int error = 0; 466 467 if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE) 468 return (0); 469 470 retry: 471 if ((object = vp->v_object) == NULL) { 472 if (vp->v_type == VREG || vp->v_type == VDIR) { 473 if ((error = VOP_GETATTR(vp, &vat, td)) != 0) 474 goto retn; 475 object = vnode_pager_alloc(vp, vat.va_size, 0, 0); 476 } else if (vp->v_rdev && dev_is_good(vp->v_rdev)) { 477 /* 478 * XXX v_rdev uses NULL/non-NULL instead of NODEV 479 * 480 * This simply allocates the biggest object possible 481 * for a disk vnode. This should be fixed, but doesn't 482 * cause any problems (yet). 483 */ 484 object = vnode_pager_alloc(vp, IDX_TO_OFF(INT_MAX), 0, 0); 485 } else { 486 goto retn; 487 } 488 /* 489 * Dereference the reference we just created. This assumes 490 * that the object is associated with the vp. 491 */ 492 object->ref_count--; 493 vp->v_usecount--; 494 } else { 495 if (object->flags & OBJ_DEAD) { 496 VOP_UNLOCK(vp, NULL, 0, td); 497 tsleep(object, 0, "vodead", 0); 498 vn_lock(vp, NULL, LK_EXCLUSIVE | LK_RETRY, td); 499 goto retry; 500 } 501 } 502 503 KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object")); 504 vp->v_flag |= VOBJBUF; 505 506 retn: 507 return (error); 508 } 509 510 int 511 vop_stddestroyvobject(ap) 512 struct vop_destroyvobject_args /* { 513 struct vnode *vp; 514 } */ *ap; 515 { 516 struct vnode *vp = ap->a_vp; 517 vm_object_t obj = vp->v_object; 518 519 if (vp->v_object == NULL) 520 return (0); 521 522 if (obj->ref_count == 0) { 523 /* 524 * vclean() may be called twice. The first time 525 * removes the primary reference to the object, 526 * the second time goes one further and is a 527 * special-case to terminate the object. 528 * 529 * don't double-terminate the object. 530 */ 531 if ((obj->flags & OBJ_DEAD) == 0) 532 vm_object_terminate(obj); 533 } else { 534 /* 535 * Woe to the process that tries to page now :-). 536 */ 537 vm_pager_deallocate(obj); 538 } 539 return (0); 540 } 541 542 /* 543 * Return the underlying VM object. This routine may be called with or 544 * without the vnode interlock held. If called without, the returned 545 * object is not guarenteed to be valid. The syncer typically gets the 546 * object without holding the interlock in order to quickly test whether 547 * it might be dirty before going heavy-weight. vm_object's use zalloc 548 * and thus stable-storage, so this is safe. 549 */ 550 int 551 vop_stdgetvobject(ap) 552 struct vop_getvobject_args /* { 553 struct vnode *vp; 554 struct vm_object **objpp; 555 } */ *ap; 556 { 557 struct vnode *vp = ap->a_vp; 558 struct vm_object **objpp = ap->a_objpp; 559 560 if (objpp) 561 *objpp = vp->v_object; 562 return (vp->v_object ? 0 : EINVAL); 563 } 564 565 /* 566 * vfs default ops 567 * used to fill the vfs fucntion table to get reasonable default return values. 568 */ 569 int 570 vfs_stdmount(struct mount *mp, char *path, caddr_t data, 571 struct nameidata *ndp, struct thread *td) 572 { 573 return (0); 574 } 575 576 int 577 vfs_stdunmount(struct mount *mp, int mntflags, struct thread *td) 578 { 579 return (0); 580 } 581 582 int 583 vfs_stdroot(struct mount *mp, struct vnode **vpp) 584 { 585 return (EOPNOTSUPP); 586 } 587 588 int 589 vfs_stdstatfs(struct mount *mp, struct statfs *sbp, struct thread *td) 590 { 591 return (EOPNOTSUPP); 592 } 593 594 int 595 vfs_stdvptofh(struct vnode *vp, struct fid *fhp) 596 { 597 return (EOPNOTSUPP); 598 } 599 600 int 601 vfs_stdstart(struct mount *mp, int flags, struct thread *td) 602 { 603 return (0); 604 } 605 606 int 607 vfs_stdquotactl(struct mount *mp, int cmds, uid_t uid, 608 caddr_t arg, struct thread *td) 609 { 610 return (EOPNOTSUPP); 611 } 612 613 int 614 vfs_stdsync(struct mount *mp, int waitfor, struct thread *td) 615 { 616 return (0); 617 } 618 619 int 620 vfs_stdvget(struct mount *mp, ino_t ino, struct vnode **vpp) 621 { 622 return (EOPNOTSUPP); 623 } 624 625 int 626 vfs_stdfhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp) 627 { 628 return (EOPNOTSUPP); 629 } 630 631 int 632 vfs_stdcheckexp(struct mount *mp, struct sockaddr *nam, int *extflagsp, 633 struct ucred **credanonp) 634 { 635 return (EOPNOTSUPP); 636 } 637 638 int 639 vfs_stdinit(struct vfsconf *vfsp) 640 { 641 return (0); 642 } 643 644 int 645 vfs_stduninit(struct vfsconf *vfsp) 646 { 647 return(0); 648 } 649 650 int 651 vfs_stdextattrctl(struct mount *mp, int cmd, const char *attrname, 652 caddr_t arg, struct thread *td) 653 { 654 return(EOPNOTSUPP); 655 } 656 657 /* end of vfs default ops */ 658