1 /* $OpenBSD: vfs_default.c,v 1.22 2003/09/01 18:06:03 henning Exp $ */ 2 3 /* 4 * Portions of this code are: 5 * 6 * Copyright (c) 1989, 1993 7 * The Regents of the University of California. All rights reserved. 8 * (c) UNIX System Laboratories, Inc. 9 * All or some portions of this file are derived from material licensed 10 * to the University of California by American Telephone and Telegraph 11 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 12 * the permission of UNIX System Laboratories, Inc. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 3. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 */ 38 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/proc.h> 43 #include <sys/mount.h> 44 #include <sys/vnode.h> 45 #include <sys/namei.h> 46 #include <sys/malloc.h> 47 #include <sys/event.h> 48 #include <miscfs/specfs/specdev.h> 49 50 51 extern struct simplelock spechash_slock; 52 53 int filt_generic_readwrite(struct knote *kn, long hint); 54 void filt_generic_detach(struct knote *kn); 55 56 /* 57 * Eliminate all activity associated with the requested vnode 58 * and with all vnodes aliased to the requested vnode. 59 */ 60 int 61 vop_generic_revoke(v) 62 void *v; 63 { 64 struct vop_revoke_args /* { 65 struct vnodeop_desc *a_desc; 66 struct vnode *a_vp; 67 int a_flags; 68 } */ *ap = v; 69 struct vnode *vp, *vq; 70 struct proc *p = curproc; 71 72 #ifdef DIAGNOSTIC 73 if ((ap->a_flags & REVOKEALL) == 0) 74 panic("vop_generic_revoke"); 75 #endif 76 77 vp = ap->a_vp; 78 simple_lock(&vp->v_interlock); 79 80 if (vp->v_flag & VALIASED) { 81 /* 82 * If a vgone (or vclean) is already in progress, 83 * wait until it is done and return. 84 */ 85 if (vp->v_flag & VXLOCK) { 86 vp->v_flag |= VXWANT; 87 simple_unlock(&vp->v_interlock); 88 tsleep(vp, PINOD, "vop_generic_revokeall", 0); 89 return(0); 90 } 91 /* 92 * Ensure that vp will not be vgone'd while we 93 * are eliminating its aliases. 94 */ 95 vp->v_flag |= VXLOCK; 96 simple_unlock(&vp->v_interlock); 97 while (vp->v_flag & VALIASED) { 98 simple_lock(&spechash_slock); 99 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 100 if (vq->v_rdev != vp->v_rdev || 101 vq->v_type != vp->v_type || vp == vq) 102 continue; 103 simple_unlock(&spechash_slock); 104 vgone(vq); 105 break; 106 } 107 simple_unlock(&spechash_slock); 108 } 109 /* 110 * Remove the lock so that vgone below will 111 * really eliminate the vnode after which time 112 * vgone will awaken any sleepers. 113 */ 114 simple_lock(&vp->v_interlock); 115 vp->v_flag &= ~VXLOCK; 116 } 117 vgonel(vp, p); 118 return (0); 119 } 120 121 122 int 123 vop_generic_bwrite(v) 124 void *v; 125 { 126 struct vop_bwrite_args *ap = v; 127 128 return (bwrite(ap->a_bp)); 129 } 130 131 132 int 133 vop_generic_abortop(v) 134 void *v; 135 { 136 struct vop_abortop_args /* { 137 struct vnodeop_desc *a_desc; 138 struct vnode *a_dvp; 139 struct componentname *a_cnp; 140 } */ *ap = v; 141 142 if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF) 143 FREE(ap->a_cnp->cn_pnbuf, M_NAMEI); 144 return (0); 145 } 146 147 /* 148 * Stubs to use when there is no locking to be done on the underlying object. 149 * A minimal shared lock is necessary to ensure that the underlying object 150 * is not revoked while an operation is in progress. So, an active shared 151 * count is maintained in an auxillary vnode lock structure. 152 */ 153 int 154 vop_generic_lock(v) 155 void *v; 156 { 157 struct vop_lock_args /* { 158 struct vnodeop_desc *a_desc; 159 struct vnode *a_vp; 160 int a_flags; 161 struct proc *a_p; 162 } */ *ap = v; 163 164 #ifdef notyet 165 /* 166 * This code cannot be used until all the non-locking filesystems 167 * (notably NFS) are converted to properly lock and release nodes. 168 * Also, certain vnode operations change the locking state within 169 * the operation (create, mknod, remove, link, rename, mkdir, rmdir, 170 * and symlink). Ideally these operations should not change the 171 * lock state, but should be changed to let the caller of the 172 * function unlock them. Otherwise all intermediate vnode layers 173 * (such as union, umapfs, etc) must catch these functions to do 174 * the necessary locking at their layer. Note that the inactive 175 * and lookup operations also change their lock state, but this 176 * cannot be avoided, so these two operations will always need 177 * to be handled in intermediate layers. 178 */ 179 struct vnode *vp = ap->a_vp; 180 int vnflags, flags = ap->a_flags; 181 182 if (vp->v_vnlock == NULL) { 183 if ((flags & LK_TYPE_MASK) == LK_DRAIN) 184 return (0); 185 MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock), 186 M_VNODE, M_WAITOK); 187 lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0); 188 } 189 switch (flags & LK_TYPE_MASK) { 190 case LK_DRAIN: 191 vnflags = LK_DRAIN; 192 break; 193 case LK_EXCLUSIVE: 194 case LK_SHARED: 195 vnflags = LK_SHARED; 196 break; 197 case LK_UPGRADE: 198 case LK_EXCLUPGRADE: 199 case LK_DOWNGRADE: 200 return (0); 201 case LK_RELEASE: 202 default: 203 panic("vop_generic_lock: bad operation %d", flags & LK_TYPE_MASK); 204 } 205 if (flags & LK_INTERLOCK) 206 vnflags |= LK_INTERLOCK; 207 return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p)); 208 #else /* for now */ 209 /* 210 * Since we are not using the lock manager, we must clear 211 * the interlock here. 212 */ 213 if (ap->a_flags & LK_INTERLOCK) 214 simple_unlock(&ap->a_vp->v_interlock); 215 return (0); 216 #endif 217 } 218 219 /* 220 * Decrement the active use count. 221 */ 222 223 int 224 vop_generic_unlock(v) 225 void *v; 226 { 227 struct vop_unlock_args /* { 228 struct vnodeop_desc *a_desc; 229 struct vnode *a_vp; 230 int a_flags; 231 struct proc *a_p; 232 } */ *ap = v; 233 234 struct vnode *vp = ap->a_vp; 235 236 if (vp->v_vnlock == NULL) 237 return (0); 238 return (lockmgr(vp->v_vnlock, LK_RELEASE, NULL, ap->a_p)); 239 } 240 241 /* 242 * Return whether or not the node is in use. 243 */ 244 int 245 vop_generic_islocked(v) 246 void *v; 247 { 248 struct vop_islocked_args /* { 249 struct vnodeop_desc *a_desc; 250 struct vnode *a_vp; 251 } */ *ap = v; 252 253 struct vnode *vp = ap->a_vp; 254 255 if (vp->v_vnlock == NULL) 256 return (0); 257 return (lockstatus(vp->v_vnlock)); 258 } 259 260 struct filterops generic_filtops = 261 { 1, NULL, filt_generic_detach, filt_generic_readwrite }; 262 263 int 264 vop_generic_kqfilter(v) 265 void *v; 266 { 267 struct vop_kqfilter_args /* { 268 struct vnodeop_desc *a_desc; 269 struct vnode *a_vp; 270 struct knote *a_kn; 271 } */ *ap = v; 272 struct knote *kn = ap->a_kn; 273 274 switch (kn->kn_filter) { 275 case EVFILT_READ: 276 case EVFILT_WRITE: 277 kn->kn_fop = &generic_filtops; 278 break; 279 default: 280 return (1); 281 } 282 283 return (0); 284 } 285 286 void 287 filt_generic_detach(struct knote *kn) 288 { 289 } 290 291 int 292 filt_generic_readwrite(struct knote *kn, long hint) 293 { 294 /* 295 * filesystem is gone, so set the EOF flag and schedule 296 * the knote for deletion. 297 */ 298 if (hint == NOTE_REVOKE) { 299 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 300 return (1); 301 } 302 303 kn->kn_data = 0; 304 return (1); 305 } 306 307 int lease_check(void *); 308 309 int 310 lease_check(void *v) 311 { 312 return (0); 313 } 314 315 /* 316 * vfs default ops 317 * used to fill the vfs function table to get reasonable default return values. 318 */ 319 int 320 vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname, td) 321 struct mount *mp; 322 int cmd; 323 struct vnode *filename_vp; 324 int attrnamespace; 325 const char *attrname; 326 struct proc *td; 327 { 328 return(EOPNOTSUPP); 329 } 330