1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Rick Macklem at The University of Guelph. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)nfs_node.c 8.6 (Berkeley) 5/22/95 37 * $FreeBSD: src/sys/nfs/nfs_node.c,v 1.36.2.3 2002/01/05 22:25:04 dillon Exp $ 38 * $DragonFly: src/sys/vfs/nfs/nfs_node.c,v 1.22 2006/03/27 16:18:39 dillon Exp $ 39 */ 40 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/proc.h> 45 #include <sys/mount.h> 46 #include <sys/namei.h> 47 #include <sys/vnode.h> 48 #include <sys/malloc.h> 49 #include <sys/fnv_hash.h> 50 51 #include <vm/vm_zone.h> 52 53 #include "rpcv2.h" 54 #include "nfsproto.h" 55 #include "nfs.h" 56 #include "nfsmount.h" 57 #include "nfsnode.h" 58 59 static vm_zone_t nfsnode_zone; 60 static LIST_HEAD(nfsnodehashhead, nfsnode) *nfsnodehashtbl; 61 static u_long nfsnodehash; 62 63 #define TRUE 1 64 #define FALSE 0 65 66 /* 67 * Initialize hash links for nfsnodes 68 * and build nfsnode free list. 69 */ 70 void 71 nfs_nhinit(void) 72 { 73 nfsnode_zone = zinit("NFSNODE", sizeof(struct nfsnode), 0, 0, 1); 74 nfsnodehashtbl = hashinit(desiredvnodes, M_NFSHASH, &nfsnodehash); 75 } 76 77 /* 78 * Look up a vnode/nfsnode by file handle. 79 * Callers must check for mount points!! 80 * In all cases, a pointer to a 81 * nfsnode structure is returned. 82 */ 83 static int nfs_node_hash_lock; 84 85 int 86 nfs_nget(struct mount *mntp, nfsfh_t *fhp, int fhsize, struct nfsnode **npp) 87 { 88 struct thread *td = curthread; /* XXX */ 89 struct nfsnode *np, *np2; 90 struct nfsnodehashhead *nhpp; 91 struct vnode *vp; 92 struct vnode *nvp; 93 int error; 94 int lkflags; 95 struct nfsmount *nmp; 96 97 /* 98 * Calculate nfs mount point and figure out whether the rslock should 99 * be interruptable or not. 100 */ 101 nmp = VFSTONFS(mntp); 102 if (nmp->nm_flag & NFSMNT_INT) 103 lkflags = LK_PCATCH; 104 else 105 lkflags = 0; 106 107 retry: 108 nhpp = NFSNOHASH(fnv_32_buf(fhp->fh_bytes, fhsize, FNV1_32_INIT)); 109 loop: 110 for (np = nhpp->lh_first; np; np = np->n_hash.le_next) { 111 if (mntp != NFSTOV(np)->v_mount || np->n_fhsize != fhsize || 112 bcmp((caddr_t)fhp, (caddr_t)np->n_fhp, fhsize)) { 113 continue; 114 } 115 vp = NFSTOV(np); 116 if (vget(vp, LK_EXCLUSIVE, td)) 117 goto loop; 118 for (np = nhpp->lh_first; np; np = np->n_hash.le_next) { 119 if (mntp == NFSTOV(np)->v_mount && 120 np->n_fhsize == fhsize && 121 bcmp((caddr_t)fhp, (caddr_t)np->n_fhp, fhsize) == 0 122 ) { 123 break; 124 } 125 } 126 if (np == NULL || NFSTOV(np) != vp) { 127 vput(vp); 128 goto loop; 129 } 130 *npp = np; 131 return(0); 132 } 133 /* 134 * Obtain a lock to prevent a race condition if the getnewvnode() 135 * or MALLOC() below happens to block. 136 */ 137 if (nfs_node_hash_lock) { 138 while (nfs_node_hash_lock) { 139 nfs_node_hash_lock = -1; 140 tsleep(&nfs_node_hash_lock, 0, "nfsngt", 0); 141 } 142 goto loop; 143 } 144 nfs_node_hash_lock = 1; 145 146 /* 147 * Allocate before getnewvnode since doing so afterward 148 * might cause a bogus v_data pointer to get dereferenced 149 * elsewhere if zalloc should block. 150 */ 151 np = zalloc(nfsnode_zone); 152 153 error = getnewvnode(VT_NFS, mntp, &nvp, 0, LK_NOPAUSE); 154 if (error) { 155 if (nfs_node_hash_lock < 0) 156 wakeup(&nfs_node_hash_lock); 157 nfs_node_hash_lock = 0; 158 *npp = 0; 159 zfree(nfsnode_zone, np); 160 return (error); 161 } 162 vp = nvp; 163 bzero((caddr_t)np, sizeof *np); 164 np->n_vnode = vp; 165 vp->v_data = np; 166 167 /* 168 * Insert the nfsnode in the hash queue for its new file handle 169 */ 170 for (np2 = nhpp->lh_first; np2 != 0; np2 = np2->n_hash.le_next) { 171 if (mntp != NFSTOV(np2)->v_mount || np2->n_fhsize != fhsize || 172 bcmp((caddr_t)fhp, (caddr_t)np2->n_fhp, fhsize)) 173 continue; 174 vx_put(vp); 175 if (nfs_node_hash_lock < 0) 176 wakeup(&nfs_node_hash_lock); 177 nfs_node_hash_lock = 0; 178 zfree(nfsnode_zone, np); 179 goto retry; 180 } 181 LIST_INSERT_HEAD(nhpp, np, n_hash); 182 if (fhsize > NFS_SMALLFH) { 183 MALLOC(np->n_fhp, nfsfh_t *, fhsize, M_NFSBIGFH, M_WAITOK); 184 } else 185 np->n_fhp = &np->n_fh; 186 bcopy((caddr_t)fhp, (caddr_t)np->n_fhp, fhsize); 187 np->n_fhsize = fhsize; 188 lockinit(&np->n_rslock, "nfrslk", 0, LK_NOPAUSE | lkflags); 189 190 /* 191 * nvp is locked & refd so effectively so is np. 192 */ 193 *npp = np; 194 195 if (nfs_node_hash_lock < 0) 196 wakeup(&nfs_node_hash_lock); 197 nfs_node_hash_lock = 0; 198 199 return (0); 200 } 201 202 /* 203 * nfs_inactive(struct vnode *a_vp, struct thread *a_td) 204 * 205 * NOTE: the passed vnode is locked but not referenced. On return the 206 * vnode must be unlocked and not referenced. 207 */ 208 int 209 nfs_inactive(struct vop_inactive_args *ap) 210 { 211 struct nfsnode *np; 212 struct sillyrename *sp; 213 214 np = VTONFS(ap->a_vp); 215 if (prtactive && ap->a_vp->v_usecount != 0) 216 vprint("nfs_inactive: pushing active", ap->a_vp); 217 if (ap->a_vp->v_type != VDIR) { 218 sp = np->n_sillyrename; 219 np->n_sillyrename = NULL; 220 } else { 221 sp = NULL; 222 } 223 if (sp) { 224 /* 225 * We need a reference to keep the vnode from being 226 * recycled by getnewvnode while we do the I/O 227 * associated with discarding the buffers. The vnode 228 * is already locked. 229 */ 230 nfs_vinvalbuf(ap->a_vp, 0, ap->a_td, 1); 231 232 /* 233 * Either we have the only ref or we were vgone()'d via 234 * revoke and might have more. 235 */ 236 KKASSERT(ap->a_vp->v_usecount == 1 || 237 (ap->a_vp->v_flag & VRECLAIMED)); 238 239 /* 240 * Remove the silly file that was rename'd earlier 241 */ 242 nfs_removeit(sp); 243 crfree(sp->s_cred); 244 vrele(sp->s_dvp); 245 FREE((caddr_t)sp, M_NFSREQ); 246 } 247 248 np->n_flag &= ~(NWRITEERR | NACC | NUPD | NCHG | NLOCKED | NWANTED); 249 250 return (0); 251 } 252 253 /* 254 * Reclaim an nfsnode so that it can be used for other purposes. 255 * 256 * nfs_reclaim(struct vnode *a_vp) 257 */ 258 int 259 nfs_reclaim(struct vop_reclaim_args *ap) 260 { 261 struct vnode *vp = ap->a_vp; 262 struct nfsnode *np = VTONFS(vp); 263 struct nfsdmap *dp, *dp2; 264 265 if (prtactive && vp->v_usecount != 0) 266 vprint("nfs_reclaim: pushing active", vp); 267 268 if (np->n_hash.le_prev != NULL) 269 LIST_REMOVE(np, n_hash); 270 271 /* 272 * Free up any directory cookie structures and 273 * large file handle structures that might be associated with 274 * this nfs node. 275 */ 276 if (vp->v_type == VDIR) { 277 dp = np->n_cookies.lh_first; 278 while (dp) { 279 dp2 = dp; 280 dp = dp->ndm_list.le_next; 281 FREE((caddr_t)dp2, M_NFSDIROFF); 282 } 283 } 284 if (np->n_fhsize > NFS_SMALLFH) { 285 FREE((caddr_t)np->n_fhp, M_NFSBIGFH); 286 } 287 if (np->n_rucred) { 288 crfree(np->n_rucred); 289 np->n_rucred = NULL; 290 } 291 if (np->n_wucred) { 292 crfree(np->n_wucred); 293 np->n_wucred = NULL; 294 } 295 296 vp->v_data = NULL; 297 zfree(nfsnode_zone, np); 298 return (0); 299 } 300 301 #if 0 302 /* 303 * Lock an nfsnode 304 * 305 * nfs_lock(struct vnode *a_vp) 306 */ 307 int 308 nfs_lock(struct vop_lock_args *ap) 309 { 310 struct vnode *vp = ap->a_vp; 311 312 /* 313 * Ugh, another place where interruptible mounts will get hung. 314 * If you make this sleep interruptible, then you have to fix all 315 * the VOP_LOCK() calls to expect interruptibility. 316 */ 317 while (vp->v_flag & VXLOCK) { 318 vp->v_flag |= VXWANT; 319 (void) tsleep((caddr_t)vp, 0, "nfslck", 0); 320 } 321 if (vp->v_tag == VT_NON) 322 return (ENOENT); 323 324 #if 0 325 /* 326 * Only lock regular files. If a server crashed while we were 327 * holding a directory lock, we could easily end up sleeping 328 * until the server rebooted while holding a lock on the root. 329 * Locks are only needed for protecting critical sections in 330 * VMIO at the moment. 331 * New vnodes will have type VNON but they should be locked 332 * since they may become VREG. This is checked in loadattrcache 333 * and unwanted locks are released there. 334 */ 335 if (vp->v_type == VREG || vp->v_type == VNON) { 336 while (np->n_flag & NLOCKED) { 337 np->n_flag |= NWANTED; 338 (void) tsleep((caddr_t) np, 0, "nfslck2", 0); 339 /* 340 * If the vnode has transmuted into a VDIR while we 341 * were asleep, then skip the lock. 342 */ 343 if (vp->v_type != VREG && vp->v_type != VNON) 344 return (0); 345 } 346 np->n_flag |= NLOCKED; 347 } 348 #endif 349 350 return (0); 351 } 352 353 /* 354 * Unlock an nfsnode 355 * 356 * nfs_unlock(struct vnode *a_vp) 357 */ 358 int 359 nfs_unlock(struct vop_unlock_args *ap) 360 { 361 #if 0 362 struct vnode* vp = ap->a_vp; 363 struct nfsnode* np = VTONFS(vp); 364 365 if (vp->v_type == VREG || vp->v_type == VNON) { 366 if (!(np->n_flag & NLOCKED)) 367 panic("nfs_unlock: nfsnode not locked"); 368 np->n_flag &= ~NLOCKED; 369 if (np->n_flag & NWANTED) { 370 np->n_flag &= ~NWANTED; 371 wakeup((caddr_t) np); 372 } 373 } 374 #endif 375 376 return (0); 377 } 378 379 /* 380 * Check for a locked nfsnode 381 * 382 * nfs_islocked(struct vnode *a_vp, struct thread *a_td) 383 */ 384 int 385 nfs_islocked(struct vop_islocked_args *ap) 386 { 387 return VTONFS(ap->a_vp)->n_flag & NLOCKED ? 1 : 0; 388 } 389 #endif 390 391