1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed 6 * to Berkeley by John Heidemann of the UCLA Ficus project. 7 * 8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * 39 * $FreeBSD: src/sys/kern/vfs_default.c,v 1.28.2.7 2003/01/10 18:23:26 bde Exp $ 40 * $DragonFly: src/sys/kern/vfs_default.c,v 1.13 2004/08/28 19:02:05 dillon Exp $ 41 */ 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/buf.h> 46 #include <sys/conf.h> 47 #include <sys/kernel.h> 48 #include <sys/lock.h> 49 #include <sys/malloc.h> 50 #include <sys/mount.h> 51 #include <sys/unistd.h> 52 #include <sys/vnode.h> 53 #include <sys/poll.h> 54 55 #include <machine/limits.h> 56 57 #include <vm/vm.h> 58 #include <vm/vm_object.h> 59 #include <vm/vm_page.h> 60 #include <vm/vm_pager.h> 61 #include <vm/vnode_pager.h> 62 63 static int vop_nolookup (struct vop_lookup_args *); 64 static int vop_nostrategy (struct vop_strategy_args *); 65 66 /* 67 * This vnode table stores what we want to do if the filesystem doesn't 68 * implement a particular VOP. 69 * 70 * If there is no specific entry here, we will return EOPNOTSUPP. 71 */ 72 struct vop_ops *default_vnode_vops; 73 static struct vnodeopv_entry_desc default_vnodeop_entries[] = { 74 { &vop_default_desc, vop_eopnotsupp }, 75 { &vop_advlock_desc, vop_einval }, 76 { &vop_bwrite_desc, (void *) vop_stdbwrite }, 77 { &vop_close_desc, vop_null }, 78 { &vop_createvobject_desc, (void *) vop_stdcreatevobject }, 79 { &vop_destroyvobject_desc, (void *) vop_stddestroyvobject }, 80 { &vop_fsync_desc, vop_null }, 81 { &vop_getvobject_desc, (void *) vop_stdgetvobject }, 82 { &vop_ioctl_desc, vop_enotty }, 83 { &vop_islocked_desc, (void *) vop_stdislocked }, 84 { &vop_lease_desc, vop_null }, 85 { &vop_lock_desc, (void *) vop_stdlock }, 86 { &vop_mmap_desc, vop_einval }, 87 { &vop_lookup_desc, (void *) vop_nolookup }, 88 { &vop_open_desc, vop_null }, 89 { &vop_pathconf_desc, vop_einval }, 90 { &vop_poll_desc, (void *) vop_nopoll }, 91 { &vop_readlink_desc, vop_einval }, 92 { &vop_reallocblks_desc, vop_eopnotsupp }, 93 { &vop_revoke_desc, (void *) vop_stdrevoke }, 94 { &vop_strategy_desc, (void *) vop_nostrategy }, 95 { &vop_unlock_desc, (void *) vop_stdunlock }, 96 { &vop_getacl_desc, vop_eopnotsupp }, 97 { &vop_setacl_desc, vop_eopnotsupp }, 98 { &vop_aclcheck_desc, vop_eopnotsupp }, 99 { &vop_getextattr_desc, vop_eopnotsupp }, 100 { &vop_setextattr_desc, vop_eopnotsupp }, 101 { NULL, NULL } 102 }; 103 104 static struct vnodeopv_desc default_vnodeop_opv_desc = 105 { &default_vnode_vops, default_vnodeop_entries }; 106 107 VNODEOP_SET(default_vnodeop_opv_desc); 108 109 int 110 vop_eopnotsupp(struct vop_generic_args *ap) 111 { 112 /* 113 printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name); 114 */ 115 return (EOPNOTSUPP); 116 } 117 118 int 119 vop_ebadf(struct vop_generic_args *ap) 120 { 121 return (EBADF); 122 } 123 124 int 125 vop_enotty(struct vop_generic_args *ap) 126 { 127 return (ENOTTY); 128 } 129 130 int 131 vop_einval(struct vop_generic_args *ap) 132 { 133 return (EINVAL); 134 } 135 136 int 137 vop_null(struct vop_generic_args *ap) 138 { 139 return (0); 140 } 141 142 int 143 vop_defaultop(struct vop_generic_args *ap) 144 { 145 return (VOCALL(default_vnode_vops, ap)); 146 } 147 148 int 149 vop_panic(struct vop_generic_args *ap) 150 { 151 152 panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name); 153 } 154 155 static int 156 vop_nolookup(ap) 157 struct vop_lookup_args /* { 158 struct vnode *a_dvp; 159 struct vnode **a_vpp; 160 struct componentname *a_cnp; 161 } */ *ap; 162 { 163 164 *ap->a_vpp = NULL; 165 return (ENOTDIR); 166 } 167 168 /* 169 * vop_nostrategy: 170 * 171 * Strategy routine for VFS devices that have none. 172 * 173 * B_ERROR and B_INVAL must be cleared prior to calling any strategy 174 * routine. Typically this is done for a B_READ strategy call. Typically 175 * B_INVAL is assumed to already be clear prior to a write and should not 176 * be cleared manually unless you just made the buffer invalid. B_ERROR 177 * should be cleared either way. 178 */ 179 180 static int 181 vop_nostrategy (struct vop_strategy_args *ap) 182 { 183 printf("No strategy for buffer at %p\n", ap->a_bp); 184 vprint("", ap->a_vp); 185 vprint("", ap->a_bp->b_vp); 186 ap->a_bp->b_flags |= B_ERROR; 187 ap->a_bp->b_error = EOPNOTSUPP; 188 biodone(ap->a_bp); 189 return (EOPNOTSUPP); 190 } 191 192 int 193 vop_stdpathconf(ap) 194 struct vop_pathconf_args /* { 195 struct vnode *a_vp; 196 int a_name; 197 int *a_retval; 198 } */ *ap; 199 { 200 201 switch (ap->a_name) { 202 case _PC_LINK_MAX: 203 *ap->a_retval = LINK_MAX; 204 return (0); 205 case _PC_MAX_CANON: 206 *ap->a_retval = MAX_CANON; 207 return (0); 208 case _PC_MAX_INPUT: 209 *ap->a_retval = MAX_INPUT; 210 return (0); 211 case _PC_PIPE_BUF: 212 *ap->a_retval = PIPE_BUF; 213 return (0); 214 case _PC_CHOWN_RESTRICTED: 215 *ap->a_retval = 1; 216 return (0); 217 case _PC_VDISABLE: 218 *ap->a_retval = _POSIX_VDISABLE; 219 return (0); 220 default: 221 return (EINVAL); 222 } 223 /* NOTREACHED */ 224 } 225 226 /* 227 * Standard lock. The lock is recursive-capable only if the lock was 228 * initialized with LK_CANRECURSE or that flag is passed in a_flags. 229 */ 230 int 231 vop_stdlock(ap) 232 struct vop_lock_args /* { 233 struct vnode *a_vp; 234 lwkt_tokref_t a_vlock; 235 int a_flags; 236 struct proc *a_p; 237 } */ *ap; 238 { 239 int error; 240 241 #ifndef DEBUG_LOCKS 242 error = lockmgr(&ap->a_vp->v_lock, ap->a_flags, 243 ap->a_vlock, ap->a_td); 244 #else 245 error = debuglockmgr(&ap->a_vp->v_lock, ap->a_flags, 246 ap->a_vlock, ap->a_td, 247 "vop_stdlock", ap->a_vp->filename, ap->a_vp->line); 248 #endif 249 return(error); 250 } 251 252 int 253 vop_stdunlock(ap) 254 struct vop_unlock_args /* { 255 struct vnode *a_vp; 256 lwkt_tokref_t a_vlock; 257 int a_flags; 258 struct thread *a_td; 259 } */ *ap; 260 { 261 int error; 262 263 error = lockmgr(&ap->a_vp->v_lock, ap->a_flags | LK_RELEASE, 264 ap->a_vlock, ap->a_td); 265 return(error); 266 } 267 268 int 269 vop_stdislocked(ap) 270 struct vop_islocked_args /* { 271 struct vnode *a_vp; 272 struct thread *a_td; 273 } */ *ap; 274 { 275 return (lockstatus(&ap->a_vp->v_lock, ap->a_td)); 276 } 277 278 /* 279 * Return true for select/poll. 280 */ 281 int 282 vop_nopoll(ap) 283 struct vop_poll_args /* { 284 struct vnode *a_vp; 285 int a_events; 286 struct ucred *a_cred; 287 struct proc *a_p; 288 } */ *ap; 289 { 290 /* 291 * Return true for read/write. If the user asked for something 292 * special, return POLLNVAL, so that clients have a way of 293 * determining reliably whether or not the extended 294 * functionality is present without hard-coding knowledge 295 * of specific filesystem implementations. 296 */ 297 if (ap->a_events & ~POLLSTANDARD) 298 return (POLLNVAL); 299 300 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 301 } 302 303 /* 304 * Implement poll for local filesystems that support it. 305 */ 306 int 307 vop_stdpoll(ap) 308 struct vop_poll_args /* { 309 struct vnode *a_vp; 310 int a_events; 311 struct ucred *a_cred; 312 struct thread *a_td; 313 } */ *ap; 314 { 315 if (ap->a_events & ~POLLSTANDARD) 316 return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events)); 317 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 318 } 319 320 int 321 vop_stdbwrite(ap) 322 struct vop_bwrite_args *ap; 323 { 324 return (bwrite(ap->a_bp)); 325 } 326 327 int 328 vop_stdcreatevobject(ap) 329 struct vop_createvobject_args /* { 330 struct vnode *a_vp; 331 struct proc *a_td; 332 } */ *ap; 333 { 334 struct vnode *vp = ap->a_vp; 335 struct thread *td = ap->a_td; 336 struct vattr vat; 337 vm_object_t object; 338 int error = 0; 339 340 if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE) 341 return (0); 342 343 retry: 344 if ((object = vp->v_object) == NULL) { 345 if (vp->v_type == VREG || vp->v_type == VDIR) { 346 if ((error = VOP_GETATTR(vp, &vat, td)) != 0) 347 goto retn; 348 object = vnode_pager_alloc(vp, vat.va_size, 0, 0); 349 } else if (vp->v_rdev && dev_is_good(vp->v_rdev)) { 350 /* 351 * XXX v_rdev uses NULL/non-NULL instead of NODEV 352 * 353 * This simply allocates the biggest object possible 354 * for a disk vnode. This should be fixed, but doesn't 355 * cause any problems (yet). 356 */ 357 object = vnode_pager_alloc(vp, IDX_TO_OFF(INT_MAX), 0, 0); 358 } else { 359 goto retn; 360 } 361 /* 362 * Dereference the reference we just created. This assumes 363 * that the object is associated with the vp. 364 */ 365 object->ref_count--; 366 vp->v_usecount--; 367 } else { 368 if (object->flags & OBJ_DEAD) { 369 VOP_UNLOCK(vp, NULL, 0, td); 370 tsleep(object, 0, "vodead", 0); 371 vn_lock(vp, NULL, LK_EXCLUSIVE | LK_RETRY, td); 372 goto retry; 373 } 374 } 375 376 KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object")); 377 vp->v_flag |= VOBJBUF; 378 379 retn: 380 return (error); 381 } 382 383 int 384 vop_stddestroyvobject(ap) 385 struct vop_destroyvobject_args /* { 386 struct vnode *vp; 387 } */ *ap; 388 { 389 struct vnode *vp = ap->a_vp; 390 vm_object_t obj = vp->v_object; 391 392 if (vp->v_object == NULL) 393 return (0); 394 395 if (obj->ref_count == 0) { 396 /* 397 * vclean() may be called twice. The first time 398 * removes the primary reference to the object, 399 * the second time goes one further and is a 400 * special-case to terminate the object. 401 * 402 * don't double-terminate the object. 403 */ 404 if ((obj->flags & OBJ_DEAD) == 0) 405 vm_object_terminate(obj); 406 } else { 407 /* 408 * Woe to the process that tries to page now :-). 409 */ 410 vm_pager_deallocate(obj); 411 } 412 return (0); 413 } 414 415 /* 416 * Return the underlying VM object. This routine may be called with or 417 * without the vnode interlock held. If called without, the returned 418 * object is not guarenteed to be valid. The syncer typically gets the 419 * object without holding the interlock in order to quickly test whether 420 * it might be dirty before going heavy-weight. vm_object's use zalloc 421 * and thus stable-storage, so this is safe. 422 */ 423 int 424 vop_stdgetvobject(ap) 425 struct vop_getvobject_args /* { 426 struct vnode *vp; 427 struct vm_object **objpp; 428 } */ *ap; 429 { 430 struct vnode *vp = ap->a_vp; 431 struct vm_object **objpp = ap->a_objpp; 432 433 if (objpp) 434 *objpp = vp->v_object; 435 return (vp->v_object ? 0 : EINVAL); 436 } 437 438 /* 439 * vfs default ops 440 * used to fill the vfs fucntion table to get reasonable default return values. 441 */ 442 int 443 vfs_stdmount(struct mount *mp, char *path, caddr_t data, 444 struct nameidata *ndp, struct thread *td) 445 { 446 return (0); 447 } 448 449 int 450 vfs_stdunmount(struct mount *mp, int mntflags, struct thread *td) 451 { 452 return (0); 453 } 454 455 int 456 vfs_stdroot(struct mount *mp, struct vnode **vpp) 457 { 458 return (EOPNOTSUPP); 459 } 460 461 int 462 vfs_stdstatfs(struct mount *mp, struct statfs *sbp, struct thread *td) 463 { 464 return (EOPNOTSUPP); 465 } 466 467 int 468 vfs_stdvptofh(struct vnode *vp, struct fid *fhp) 469 { 470 return (EOPNOTSUPP); 471 } 472 473 int 474 vfs_stdstart(struct mount *mp, int flags, struct thread *td) 475 { 476 return (0); 477 } 478 479 int 480 vfs_stdquotactl(struct mount *mp, int cmds, uid_t uid, 481 caddr_t arg, struct thread *td) 482 { 483 return (EOPNOTSUPP); 484 } 485 486 int 487 vfs_stdsync(struct mount *mp, int waitfor, struct thread *td) 488 { 489 return (0); 490 } 491 492 int 493 vfs_stdvget(struct mount *mp, ino_t ino, struct vnode **vpp) 494 { 495 return (EOPNOTSUPP); 496 } 497 498 int 499 vfs_stdfhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp) 500 { 501 return (EOPNOTSUPP); 502 } 503 504 int 505 vfs_stdcheckexp(struct mount *mp, struct sockaddr *nam, int *extflagsp, 506 struct ucred **credanonp) 507 { 508 return (EOPNOTSUPP); 509 } 510 511 int 512 vfs_stdinit(struct vfsconf *vfsp) 513 { 514 return (0); 515 } 516 517 int 518 vfs_stduninit(struct vfsconf *vfsp) 519 { 520 return(0); 521 } 522 523 int 524 vfs_stdextattrctl(struct mount *mp, int cmd, const char *attrname, 525 caddr_t arg, struct thread *td) 526 { 527 return(EOPNOTSUPP); 528 } 529 530 /* end of vfs default ops */ 531