1 /* $NetBSD: nfs_node.c,v 1.101 2008/01/30 09:50:24 ad Exp $ */ 2 3 /* 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * Rick Macklem at The University of Guelph. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)nfs_node.c 8.6 (Berkeley) 5/22/95 35 */ 36 37 #include <sys/cdefs.h> 38 __KERNEL_RCSID(0, "$NetBSD: nfs_node.c,v 1.101 2008/01/30 09:50:24 ad Exp $"); 39 40 #include "opt_nfs.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/proc.h> 45 #include <sys/mount.h> 46 #include <sys/namei.h> 47 #include <sys/vnode.h> 48 #include <sys/kernel.h> 49 #include <sys/malloc.h> 50 #include <sys/pool.h> 51 #include <sys/lock.h> 52 #include <sys/hash.h> 53 #include <sys/kauth.h> 54 55 #include <nfs/rpcv2.h> 56 #include <nfs/nfsproto.h> 57 #include <nfs/nfs.h> 58 #include <nfs/nfsnode.h> 59 #include <nfs/nfsmount.h> 60 #include <nfs/nfs_var.h> 61 62 struct nfsnodehashhead *nfsnodehashtbl; 63 u_long nfsnodehash; 64 static kmutex_t nfs_hashlock; 65 66 POOL_INIT(nfs_node_pool, sizeof(struct nfsnode), 0, 0, 0, "nfsnodepl", 67 &pool_allocator_nointr, IPL_NONE); 68 POOL_INIT(nfs_vattr_pool, sizeof(struct vattr), 0, 0, 0, "nfsvapl", 69 &pool_allocator_nointr, IPL_NONE); 70 71 MALLOC_DEFINE(M_NFSNODE, "NFS node", "NFS vnode private part"); 72 73 extern int prtactive; 74 75 #define nfs_hash(x,y) hash32_buf((x), (y), HASH32_BUF_INIT) 76 77 void nfs_gop_size(struct vnode *, off_t, off_t *, int); 78 int nfs_gop_alloc(struct vnode *, off_t, off_t, int, kauth_cred_t); 79 int nfs_gop_write(struct vnode *, struct vm_page **, int, int); 80 81 static const struct genfs_ops nfs_genfsops = { 82 .gop_size = nfs_gop_size, 83 .gop_alloc = nfs_gop_alloc, 84 .gop_write = nfs_gop_write, 85 }; 86 87 /* 88 * Initialize hash links for nfsnodes 89 * and build nfsnode free list. 90 */ 91 void 92 nfs_nhinit() 93 { 94 95 nfsnodehashtbl = hashinit(desiredvnodes, HASH_LIST, M_NFSNODE, 96 M_WAITOK, &nfsnodehash); 97 mutex_init(&nfs_hashlock, MUTEX_DEFAULT, IPL_NONE); 98 } 99 100 /* 101 * Reinitialize inode hash table. 102 */ 103 104 void 105 nfs_nhreinit() 106 { 107 struct nfsnode *np; 108 struct nfsnodehashhead *oldhash, *hash; 109 u_long oldmask, mask, val; 110 int i; 111 112 hash = hashinit(desiredvnodes, HASH_LIST, M_NFSNODE, M_WAITOK, 113 &mask); 114 115 mutex_enter(&nfs_hashlock); 116 oldhash = nfsnodehashtbl; 117 oldmask = nfsnodehash; 118 nfsnodehashtbl = hash; 119 nfsnodehash = mask; 120 for (i = 0; i <= oldmask; i++) { 121 while ((np = LIST_FIRST(&oldhash[i])) != NULL) { 122 LIST_REMOVE(np, n_hash); 123 val = NFSNOHASH(nfs_hash(np->n_fhp, np->n_fhsize)); 124 LIST_INSERT_HEAD(&hash[val], np, n_hash); 125 } 126 } 127 mutex_exit(&nfs_hashlock); 128 hashdone(oldhash, M_NFSNODE); 129 } 130 131 /* 132 * Free resources previoslu allocated in nfs_nhinit(). 133 */ 134 void 135 nfs_nhdone() 136 { 137 hashdone(nfsnodehashtbl, M_NFSNODE); 138 pool_destroy(&nfs_node_pool); 139 pool_destroy(&nfs_vattr_pool); 140 mutex_destroy(&nfs_hashlock); 141 } 142 143 /* 144 * Look up a vnode/nfsnode by file handle. 145 * Callers must check for mount points!! 146 * In all cases, a pointer to a 147 * nfsnode structure is returned. 148 */ 149 int 150 nfs_nget1(mntp, fhp, fhsize, npp, lkflags) 151 struct mount *mntp; 152 nfsfh_t *fhp; 153 int fhsize; 154 struct nfsnode **npp; 155 int lkflags; 156 { 157 struct nfsnode *np, *np2; 158 struct nfsnodehashhead *nhpp; 159 struct vnode *vp; 160 int error; 161 162 nhpp = &nfsnodehashtbl[NFSNOHASH(nfs_hash(fhp, fhsize))]; 163 loop: 164 mutex_enter(&nfs_hashlock); 165 LIST_FOREACH(np, nhpp, n_hash) { 166 if (mntp != NFSTOV(np)->v_mount || np->n_fhsize != fhsize || 167 memcmp(fhp, np->n_fhp, fhsize)) 168 continue; 169 vp = NFSTOV(np); 170 mutex_enter(&vp->v_interlock); 171 mutex_exit(&nfs_hashlock); 172 error = vget(vp, LK_EXCLUSIVE | LK_INTERLOCK | lkflags); 173 if (error == EBUSY) 174 return error; 175 if (error) 176 goto loop; 177 *npp = np; 178 return(0); 179 } 180 mutex_exit(&nfs_hashlock); 181 182 error = getnewvnode(VT_NFS, mntp, nfsv2_vnodeop_p, &vp); 183 if (error) { 184 *npp = 0; 185 return (error); 186 } 187 np = pool_get(&nfs_node_pool, PR_WAITOK); 188 memset(np, 0, sizeof *np); 189 np->n_vnode = vp; 190 191 /* 192 * Insert the nfsnode in the hash queue for its new file handle 193 */ 194 195 if (fhsize > NFS_SMALLFH) { 196 np->n_fhp = kmem_alloc(fhsize, KM_SLEEP); 197 } else 198 np->n_fhp = &np->n_fh; 199 memcpy(np->n_fhp, fhp, fhsize); 200 np->n_fhsize = fhsize; 201 np->n_accstamp = -1; 202 np->n_vattr = pool_get(&nfs_vattr_pool, PR_WAITOK); 203 204 mutex_enter(&nfs_hashlock); 205 LIST_FOREACH(np2, nhpp, n_hash) { 206 if (mntp != NFSTOV(np2)->v_mount || np2->n_fhsize != fhsize || 207 memcmp(fhp, np2->n_fhp, fhsize)) 208 continue; 209 mutex_exit(&nfs_hashlock); 210 if (fhsize > NFS_SMALLFH) { 211 kmem_free(np->n_fhp, fhsize); 212 } 213 pool_put(&nfs_vattr_pool, np->n_vattr); 214 pool_put(&nfs_node_pool, np); 215 ungetnewvnode(vp); 216 goto loop; 217 } 218 vp->v_data = np; 219 genfs_node_init(vp, &nfs_genfsops); 220 /* 221 * Initalize read/write creds to useful values. VOP_OPEN will 222 * overwrite these. 223 */ 224 np->n_rcred = curlwp->l_cred; 225 kauth_cred_hold(np->n_rcred); 226 np->n_wcred = curlwp->l_cred; 227 kauth_cred_hold(np->n_wcred); 228 vlockmgr(&vp->v_lock, LK_EXCLUSIVE); 229 NFS_INVALIDATE_ATTRCACHE(np); 230 uvm_vnp_setsize(vp, 0); 231 LIST_INSERT_HEAD(nhpp, np, n_hash); 232 mutex_exit(&nfs_hashlock); 233 234 *npp = np; 235 return (0); 236 } 237 238 int 239 nfs_inactive(v) 240 void *v; 241 { 242 struct vop_inactive_args /* { 243 struct vnode *a_vp; 244 bool *a_recycle; 245 } */ *ap = v; 246 struct nfsnode *np; 247 struct sillyrename *sp; 248 struct vnode *vp = ap->a_vp; 249 250 np = VTONFS(vp); 251 if (vp->v_type != VDIR) { 252 sp = np->n_sillyrename; 253 np->n_sillyrename = (struct sillyrename *)0; 254 } else 255 sp = NULL; 256 if (sp != NULL) 257 nfs_vinvalbuf(vp, 0, sp->s_cred, curlwp, 1); 258 *ap->a_recycle = (np->n_flag & NREMOVED) != 0; 259 np->n_flag &= 260 (NMODIFIED | NFLUSHINPROG | NFLUSHWANT | NEOFVALID | NTRUNCDELAYED); 261 262 if (vp->v_type == VDIR && np->n_dircache) 263 nfs_invaldircache(vp, 264 NFS_INVALDIRCACHE_FORCE | NFS_INVALDIRCACHE_KEEPEOF); 265 266 VOP_UNLOCK(vp, 0); 267 268 if (sp != NULL) { 269 int error; 270 271 /* 272 * Remove the silly file that was rename'd earlier 273 * 274 * Just in case our thread also has the parent node locked, 275 * we use LK_CANRECURSE. 276 */ 277 278 error = vn_lock(sp->s_dvp, LK_EXCLUSIVE | LK_CANRECURSE); 279 if (error || sp->s_dvp->v_data == NULL) { 280 /* XXX should recover */ 281 printf("%s: vp=%p error=%d\n", 282 __func__, sp->s_dvp, error); 283 } else { 284 nfs_removeit(sp); 285 } 286 kauth_cred_free(sp->s_cred); 287 vput(sp->s_dvp); 288 kmem_free(sp, sizeof(*sp)); 289 } 290 291 return (0); 292 } 293 294 /* 295 * Reclaim an nfsnode so that it can be used for other purposes. 296 */ 297 int 298 nfs_reclaim(v) 299 void *v; 300 { 301 struct vop_reclaim_args /* { 302 struct vnode *a_vp; 303 } */ *ap = v; 304 struct vnode *vp = ap->a_vp; 305 struct nfsnode *np = VTONFS(vp); 306 307 if (prtactive && vp->v_usecount > 1) 308 vprint("nfs_reclaim: pushing active", vp); 309 310 mutex_enter(&nfs_hashlock); 311 LIST_REMOVE(np, n_hash); 312 mutex_exit(&nfs_hashlock); 313 314 /* 315 * Free up any directory cookie structures and 316 * large file handle structures that might be associated with 317 * this nfs node. 318 */ 319 if (vp->v_type == VDIR && np->n_dircache) 320 hashdone(np->n_dircache, M_NFSDIROFF); 321 KASSERT(np->n_dirgens == NULL); 322 323 if (np->n_fhsize > NFS_SMALLFH) 324 kmem_free(np->n_fhp, np->n_fhsize); 325 326 pool_put(&nfs_vattr_pool, np->n_vattr); 327 if (np->n_rcred) 328 kauth_cred_free(np->n_rcred); 329 330 if (np->n_wcred) 331 kauth_cred_free(np->n_wcred); 332 333 cache_purge(vp); 334 if (vp->v_type == VREG) { 335 mutex_destroy(&np->n_commitlock); 336 } 337 genfs_node_destroy(vp); 338 pool_put(&nfs_node_pool, np); 339 vp->v_data = NULL; 340 return (0); 341 } 342 343 void 344 nfs_gop_size(struct vnode *vp, off_t size, off_t *eobp, int flags) 345 { 346 347 *eobp = MAX(size, vp->v_size); 348 } 349 350 int 351 nfs_gop_alloc(struct vnode *vp, off_t off, off_t len, int flags, 352 kauth_cred_t cred) 353 { 354 355 return 0; 356 } 357 358 int 359 nfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags) 360 { 361 int i; 362 363 for (i = 0; i < npages; i++) { 364 pmap_page_protect(pgs[i], VM_PROT_READ); 365 } 366 return genfs_gop_write(vp, pgs, npages, flags); 367 } 368