1 /* $NetBSD: nfs_clnode.c,v 1.3 2016/12/13 22:17:33 pgoyette Exp $ */ 2 /*- 3 * Copyright (c) 1989, 1993 4 * The Regents of the University of California. All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * Rick Macklem at The University of Guelph. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * from nfs_node.c 8.6 (Berkeley) 5/22/95 34 */ 35 36 #include <sys/cdefs.h> 37 /* __FBSDID("FreeBSD: head/sys/fs/nfsclient/nfs_clnode.c 302210 2016-06-26 14:18:28Z kib "); */ 38 __RCSID("$NetBSD: nfs_clnode.c,v 1.3 2016/12/13 22:17:33 pgoyette Exp $"); 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/fcntl.h> 43 #include <sys/lock.h> 44 #include <sys/malloc.h> 45 #include <sys/mount.h> 46 #include <sys/namei.h> 47 #include <sys/proc.h> 48 #include <sys/socket.h> 49 #include <sys/sysctl.h> 50 #include <sys/taskqueue.h> 51 #include <sys/vnode.h> 52 53 #include <vm/uma.h> 54 55 #include <fs/nfs/common/nfsport.h> 56 #include <fs/nfs/client/nfsnode.h> 57 #include <fs/nfs/client/nfsmount.h> 58 #include <fs/nfs/client/nfs.h> 59 #include <fs/nfs/client/nfs_kdtrace.h> 60 61 #include <fs/nfs/common/nfs_lock.h> 62 63 extern struct vop_vector newnfs_vnodeops; 64 extern struct buf_ops buf_ops_newnfs; 65 MALLOC_DECLARE(M_NEWNFSREQ); 66 67 uma_zone_t newnfsnode_zone; 68 69 const char nfs_vnode_tag[] = "nfs"; 70 71 static void nfs_freesillyrename(void *arg); 72 73 void 74 ncl_nhinit(void) 75 { 76 77 newnfsnode_zone = uma_zcreate("NCLNODE", sizeof(struct nfsnode), NULL, 78 NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 79 } 80 81 void 82 ncl_nhuninit(void) 83 { 84 uma_zdestroy(newnfsnode_zone); 85 } 86 87 /* 88 * ONLY USED FOR THE ROOT DIRECTORY. nfscl_nget() does the rest. If this 89 * function is going to be used to get Regular Files, code must be added 90 * to fill in the "struct nfsv4node". 91 * Look up a vnode/nfsnode by file handle. 92 * Callers must check for mount points!! 93 * In all cases, a pointer to a 94 * nfsnode structure is returned. 95 */ 96 int 97 ncl_nget(struct mount *mntp, u_int8_t *fhp, int fhsize, struct nfsnode **npp, 98 int lkflags) 99 { 100 struct thread *td = curthread; /* XXX */ 101 struct nfsnode *np; 102 struct vnode *vp; 103 struct vnode *nvp; 104 int error; 105 u_int hash; 106 struct nfsmount *nmp; 107 struct nfsfh *nfhp; 108 109 nmp = VFSTONFS(mntp); 110 *npp = NULL; 111 112 hash = fnv_32_buf(fhp, fhsize, FNV1_32_INIT); 113 114 MALLOC(nfhp, struct nfsfh *, sizeof (struct nfsfh) + fhsize, 115 M_NFSFH, M_WAITOK); 116 bcopy(fhp, &nfhp->nfh_fh[0], fhsize); 117 nfhp->nfh_len = fhsize; 118 error = vfs_hash_get(mntp, hash, lkflags, 119 td, &nvp, newnfs_vncmpf, nfhp); 120 FREE(nfhp, M_NFSFH); 121 if (error) 122 return (error); 123 if (nvp != NULL) { 124 *npp = VTONFS(nvp); 125 return (0); 126 } 127 np = uma_zalloc(newnfsnode_zone, M_WAITOK | M_ZERO); 128 129 error = getnewvnode(nfs_vnode_tag, mntp, &newnfs_vnodeops, &nvp); 130 if (error) { 131 uma_zfree(newnfsnode_zone, np); 132 return (error); 133 } 134 vp = nvp; 135 KASSERT(vp->v_bufobj.bo_bsize != 0, ("ncl_nget: bo_bsize == 0")); 136 vp->v_bufobj.bo_ops = &buf_ops_newnfs; 137 vp->v_data = np; 138 np->n_vnode = vp; 139 /* 140 * Initialize the mutex even if the vnode is going to be a loser. 141 * This simplifies the logic in reclaim, which can then unconditionally 142 * destroy the mutex (in the case of the loser, or if hash_insert 143 * happened to return an error no special casing is needed). 144 */ 145 mtx_init(&np->n_mtx, "NEWNFSnode lock", NULL, MTX_DEF | MTX_DUPOK); 146 /* 147 * NFS supports recursive and shared locking. 148 */ 149 lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL); 150 VN_LOCK_AREC(vp); 151 VN_LOCK_ASHARE(vp); 152 /* 153 * Are we getting the root? If so, make sure the vnode flags 154 * are correct 155 */ 156 if ((fhsize == nmp->nm_fhsize) && 157 !bcmp(fhp, nmp->nm_fh, fhsize)) { 158 if (vp->v_type == VNON) 159 vp->v_type = VDIR; 160 vp->v_vflag |= VV_ROOT; 161 } 162 163 MALLOC(np->n_fhp, struct nfsfh *, sizeof (struct nfsfh) + fhsize, 164 M_NFSFH, M_WAITOK); 165 bcopy(fhp, np->n_fhp->nfh_fh, fhsize); 166 np->n_fhp->nfh_len = fhsize; 167 error = insmntque(vp, mntp); 168 if (error != 0) { 169 *npp = NULL; 170 FREE((caddr_t)np->n_fhp, M_NFSFH); 171 mtx_destroy(&np->n_mtx); 172 uma_zfree(newnfsnode_zone, np); 173 return (error); 174 } 175 error = vfs_hash_insert(vp, hash, lkflags, 176 td, &nvp, newnfs_vncmpf, np->n_fhp); 177 if (error) 178 return (error); 179 if (nvp != NULL) { 180 *npp = VTONFS(nvp); 181 /* vfs_hash_insert() vput()'s the losing vnode */ 182 return (0); 183 } 184 *npp = np; 185 186 return (0); 187 } 188 189 /* 190 * Do the vrele(sp->s_dvp) as a separate task in order to avoid a 191 * deadlock because of a LOR when vrele() locks the directory vnode. 192 */ 193 static void 194 nfs_freesillyrename(void *arg) 195 { 196 struct sillyrename *sp; 197 198 sp = arg; 199 vrele(sp->s_dvp); 200 free(sp, M_NEWNFSREQ); 201 } 202 203 static void 204 ncl_releasesillyrename(struct vnode *vp, struct thread *td) 205 { 206 struct nfsnode *np; 207 struct sillyrename *sp; 208 209 ASSERT_VOP_ELOCKED(vp, "releasesillyrename"); 210 np = VTONFS(vp); 211 mtx_assert(&np->n_mtx, MA_OWNED); 212 if (vp->v_type != VDIR) { 213 sp = np->n_sillyrename; 214 np->n_sillyrename = NULL; 215 } else 216 sp = NULL; 217 if (sp != NULL) { 218 mtx_unlock(&np->n_mtx); 219 (void) ncl_vinvalbuf(vp, 0, td, 1); 220 /* 221 * Remove the silly file that was rename'd earlier 222 */ 223 ncl_removeit(sp, vp); 224 crfree(sp->s_cred); 225 sysmon_task_queue_sched(0, nfs_freesillyrename, sp); 226 mtx_lock(&np->n_mtx); 227 } 228 } 229 230 int 231 ncl_inactive(struct vop_inactive_args *ap) 232 { 233 struct vnode *vp = ap->a_vp; 234 struct nfsnode *np; 235 boolean_t retv; 236 237 if (NFS_ISV4(vp) && vp->v_type == VREG) { 238 /* 239 * Since mmap()'d files do I/O after VOP_CLOSE(), the NFSv4 240 * Close operations are delayed until now. Any dirty 241 * buffers/pages must be flushed before the close, so that the 242 * stateid is available for the writes. 243 */ 244 if (vp->v_object != NULL) { 245 VM_OBJECT_WLOCK(vp->v_object); 246 retv = vm_object_page_clean(vp->v_object, 0, 0, 247 OBJPC_SYNC); 248 VM_OBJECT_WUNLOCK(vp->v_object); 249 } else 250 retv = TRUE; 251 if (retv == TRUE) { 252 (void)ncl_flush(vp, MNT_WAIT, NULL, ap->a_td, 1, 0); 253 (void)nfsrpc_close(vp, 1, ap->a_td); 254 } 255 } 256 257 np = VTONFS(vp); 258 mtx_lock(&np->n_mtx); 259 ncl_releasesillyrename(vp, ap->a_td); 260 261 /* 262 * NMODIFIED means that there might be dirty/stale buffers 263 * associated with the NFS vnode. None of the other flags are 264 * meaningful after the vnode is unused. 265 */ 266 np->n_flag &= NMODIFIED; 267 mtx_unlock(&np->n_mtx); 268 return (0); 269 } 270 271 /* 272 * Reclaim an nfsnode so that it can be used for other purposes. 273 */ 274 int 275 ncl_reclaim(struct vop_reclaim_args *ap) 276 { 277 struct vnode *vp = ap->a_vp; 278 struct nfsnode *np = VTONFS(vp); 279 struct nfsdmap *dp, *dp2; 280 281 /* 282 * If the NLM is running, give it a chance to abort pending 283 * locks. 284 */ 285 if (nfs_reclaim_p != NULL) 286 nfs_reclaim_p(ap); 287 288 mtx_lock(&np->n_mtx); 289 ncl_releasesillyrename(vp, ap->a_td); 290 mtx_unlock(&np->n_mtx); 291 292 /* 293 * Destroy the vm object and flush associated pages. 294 */ 295 vnode_destroy_vobject(vp); 296 297 if (NFS_ISV4(vp) && vp->v_type == VREG) 298 /* 299 * We can now safely close any remaining NFSv4 Opens for 300 * this file. Most opens will have already been closed by 301 * ncl_inactive(), but there are cases where it is not 302 * called, so we need to do it again here. 303 */ 304 (void) nfsrpc_close(vp, 1, ap->a_td); 305 306 vfs_hash_remove(vp); 307 308 /* 309 * Call nfscl_reclaimnode() to save attributes in the delegation, 310 * as required. 311 */ 312 if (vp->v_type == VREG) 313 nfscl_reclaimnode(vp); 314 315 /* 316 * Free up any directory cookie structures and 317 * large file handle structures that might be associated with 318 * this nfs node. 319 */ 320 if (vp->v_type == VDIR) { 321 dp = LIST_FIRST(&np->n_cookies); 322 while (dp) { 323 dp2 = dp; 324 dp = LIST_NEXT(dp, ndm_list); 325 FREE((caddr_t)dp2, M_NFSDIROFF); 326 } 327 } 328 if (np->n_writecred != NULL) 329 crfree(np->n_writecred); 330 FREE((caddr_t)np->n_fhp, M_NFSFH); 331 if (np->n_v4 != NULL) 332 FREE((caddr_t)np->n_v4, M_NFSV4NODE); 333 mtx_destroy(&np->n_mtx); 334 uma_zfree(newnfsnode_zone, vp->v_data); 335 vp->v_data = NULL; 336 return (0); 337 } 338 339 /* 340 * Invalidate both the access and attribute caches for this vnode. 341 */ 342 void 343 ncl_invalcaches(struct vnode *vp) 344 { 345 struct nfsnode *np = VTONFS(vp); 346 int i; 347 348 mtx_lock(&np->n_mtx); 349 for (i = 0; i < NFS_ACCESSCACHESIZE; i++) 350 np->n_accesscache[i].stamp = 0; 351 KDTRACE_NFS_ACCESSCACHE_FLUSH_DONE(vp); 352 np->n_attrstamp = 0; 353 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 354 mtx_unlock(&np->n_mtx); 355 } 356