1 /* $OpenBSD: vfs_default.c,v 1.7 2001/06/25 03:28:03 csapuntz Exp $ */ 2 3 4 /* 5 * Portions of this code are: 6 * 7 * Copyright (c) 1989, 1993 8 * The Regents of the University of California. All rights reserved. 9 * (c) UNIX System Laboratories, Inc. 10 * All or some portions of this file are derived from material licensed 11 * to the University of California by American Telephone and Telegraph 12 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 13 * the permission of UNIX System Laboratories, Inc. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 3. All advertising materials mentioning features or use of this software 24 * must display the following acknowledgement: 25 * This product includes software developed by the University of 26 * California, Berkeley and its contributors. 27 * 4. Neither the name of the University nor the names of its contributors 28 * may be used to endorse or promote products derived from this software 29 * without specific prior written permission. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 41 * SUCH DAMAGE. 42 */ 43 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/proc.h> 48 #include <sys/mount.h> 49 #include <sys/vnode.h> 50 #include <sys/namei.h> 51 #include <sys/malloc.h> 52 #include <sys/event.h> 53 #include <miscfs/specfs/specdev.h> 54 55 56 extern struct simplelock spechash_slock; 57 58 int filt_generic_readwrite __P((struct knote *kn, long hint)); 59 void filt_generic_detach __P((struct knote *kn)); 60 61 /* 62 * Eliminate all activity associated with the requested vnode 63 * and with all vnodes aliased to the requested vnode. 64 */ 65 int 66 vop_generic_revoke(v) 67 void *v; 68 { 69 struct vop_revoke_args /* { 70 struct vnode *a_vp; 71 int a_flags; 72 } */ *ap = v; 73 struct vnode *vp, *vq; 74 struct proc *p = curproc; 75 76 #ifdef DIAGNOSTIC 77 if ((ap->a_flags & REVOKEALL) == 0) 78 panic("vop_generic_revoke"); 79 #endif 80 81 vp = ap->a_vp; 82 simple_lock(&vp->v_interlock); 83 84 if (vp->v_flag & VALIASED) { 85 /* 86 * If a vgone (or vclean) is already in progress, 87 * wait until it is done and return. 88 */ 89 if (vp->v_flag & VXLOCK) { 90 vp->v_flag |= VXWANT; 91 simple_unlock(&vp->v_interlock); 92 tsleep((caddr_t)vp, PINOD, "vop_generic_revokeall", 0); 93 return(0); 94 } 95 /* 96 * Ensure that vp will not be vgone'd while we 97 * are eliminating its aliases. 98 */ 99 vp->v_flag |= VXLOCK; 100 simple_unlock(&vp->v_interlock); 101 while (vp->v_flag & VALIASED) { 102 simple_lock(&spechash_slock); 103 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 104 if (vq->v_rdev != vp->v_rdev || 105 vq->v_type != vp->v_type || vp == vq) 106 continue; 107 simple_unlock(&spechash_slock); 108 vgone(vq); 109 break; 110 } 111 simple_unlock(&spechash_slock); 112 } 113 /* 114 * Remove the lock so that vgone below will 115 * really eliminate the vnode after which time 116 * vgone will awaken any sleepers. 117 */ 118 simple_lock(&vp->v_interlock); 119 vp->v_flag &= ~VXLOCK; 120 } 121 vgonel(vp, p); 122 return (0); 123 } 124 125 126 int 127 vop_generic_bwrite(v) 128 void *v; 129 { 130 struct vop_bwrite_args *ap = v; 131 132 return (bwrite(ap->a_bp)); 133 } 134 135 136 int 137 vop_generic_abortop(v) 138 void *v; 139 { 140 struct vop_abortop_args /* { 141 struct vnode *a_dvp; 142 struct componentname *a_cnp; 143 } */ *ap = v; 144 145 if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF) 146 FREE(ap->a_cnp->cn_pnbuf, M_NAMEI); 147 return (0); 148 } 149 150 /* 151 * Stubs to use when there is no locking to be done on the underlying object. 152 * A minimal shared lock is necessary to ensure that the underlying object 153 * is not revoked while an operation is in progress. So, an active shared 154 * count is maintained in an auxillary vnode lock structure. 155 */ 156 int 157 vop_generic_lock(v) 158 void *v; 159 { 160 struct vop_lock_args /* { 161 struct vnode *a_vp; 162 int a_flags; 163 struct proc *a_p; 164 } */ *ap = v; 165 166 #ifdef notyet 167 /* 168 * This code cannot be used until all the non-locking filesystems 169 * (notably NFS) are converted to properly lock and release nodes. 170 * Also, certain vnode operations change the locking state within 171 * the operation (create, mknod, remove, link, rename, mkdir, rmdir, 172 * and symlink). Ideally these operations should not change the 173 * lock state, but should be changed to let the caller of the 174 * function unlock them. Otherwise all intermediate vnode layers 175 * (such as union, umapfs, etc) must catch these functions to do 176 * the necessary locking at their layer. Note that the inactive 177 * and lookup operations also change their lock state, but this 178 * cannot be avoided, so these two operations will always need 179 * to be handled in intermediate layers. 180 */ 181 struct vnode *vp = ap->a_vp; 182 int vnflags, flags = ap->a_flags; 183 184 if (vp->v_vnlock == NULL) { 185 if ((flags & LK_TYPE_MASK) == LK_DRAIN) 186 return (0); 187 MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock), 188 M_VNODE, M_WAITOK); 189 lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0); 190 } 191 switch (flags & LK_TYPE_MASK) { 192 case LK_DRAIN: 193 vnflags = LK_DRAIN; 194 break; 195 case LK_EXCLUSIVE: 196 case LK_SHARED: 197 vnflags = LK_SHARED; 198 break; 199 case LK_UPGRADE: 200 case LK_EXCLUPGRADE: 201 case LK_DOWNGRADE: 202 return (0); 203 case LK_RELEASE: 204 default: 205 panic("vop_generic_lock: bad operation %d", flags & LK_TYPE_MASK); 206 } 207 if (flags & LK_INTERLOCK) 208 vnflags |= LK_INTERLOCK; 209 return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p)); 210 #else /* for now */ 211 /* 212 * Since we are not using the lock manager, we must clear 213 * the interlock here. 214 */ 215 if (ap->a_flags & LK_INTERLOCK) 216 simple_unlock(&ap->a_vp->v_interlock); 217 return (0); 218 #endif 219 } 220 221 /* 222 * Decrement the active use count. 223 */ 224 225 int 226 vop_generic_unlock(v) 227 void *v; 228 { 229 struct vop_unlock_args /* { 230 struct vnode *a_vp; 231 int a_flags; 232 struct proc *a_p; 233 } */ *ap = v; 234 235 struct vnode *vp = ap->a_vp; 236 237 if (vp->v_vnlock == NULL) 238 return (0); 239 return (lockmgr(vp->v_vnlock, LK_RELEASE, NULL, ap->a_p)); 240 } 241 242 /* 243 * Return whether or not the node is in use. 244 */ 245 int 246 vop_generic_islocked(v) 247 void *v; 248 { 249 struct vop_islocked_args /* { 250 struct vnode *a_vp; 251 } */ *ap = v; 252 253 struct vnode *vp = ap->a_vp; 254 255 if (vp->v_vnlock == NULL) 256 return (0); 257 return (lockstatus(vp->v_vnlock)); 258 } 259 260 struct filterops generic_filtops = 261 { 1, NULL, filt_generic_detach, filt_generic_readwrite }; 262 263 int 264 vop_generic_kqfilter(v) 265 void *v; 266 { 267 struct vop_kqfilter_args /* { 268 struct vnode *a_vp; 269 struct knote *a_kn; 270 } */ *ap = v; 271 struct knote *kn = ap->a_kn; 272 273 switch (kn->kn_filter) { 274 case EVFILT_READ: 275 case EVFILT_WRITE: 276 kn->kn_fop = &generic_filtops; 277 break; 278 default: 279 return (1); 280 } 281 282 return (0); 283 } 284 285 void 286 filt_generic_detach(struct knote *kn) 287 { 288 } 289 290 int 291 filt_generic_readwrite(struct knote *kn, long hint) 292 { 293 /* 294 * filesystem is gone, so set the EOF flag and schedule 295 * the knote for deletion. 296 */ 297 if (hint == NOTE_REVOKE) { 298 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 299 return (1); 300 } 301 302 kn->kn_data = 0; 303 return (1); 304 } 305 306 int lease_check(void *); 307 308 int 309 lease_check(void *v) 310 { 311 return (0); 312 } 313