xref: /netbsd-src/sys/fs/puffs/puffs_node.c (revision b757af438b42b93f8c6571f026d8b8ef3eaf5fc9)
1 /*	$NetBSD: puffs_node.c,v 1.23 2012/01/19 08:14:41 manu Exp $	*/
2 
3 /*
4  * Copyright (c) 2005, 2006, 2007  Antti Kantee.  All Rights Reserved.
5  *
6  * Development of this software was supported by the
7  * Google Summer of Code program, the Ulla Tuominen Foundation
8  * and the Finnish Cultural Foundation.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
20  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: puffs_node.c,v 1.23 2012/01/19 08:14:41 manu Exp $");
34 
35 #include <sys/param.h>
36 #include <sys/hash.h>
37 #include <sys/kmem.h>
38 #include <sys/malloc.h>
39 #include <sys/mount.h>
40 #include <sys/namei.h>
41 #include <sys/vnode.h>
42 
43 #include <uvm/uvm.h>
44 
45 #include <fs/puffs/puffs_msgif.h>
46 #include <fs/puffs/puffs_sys.h>
47 
48 #include <miscfs/genfs/genfs_node.h>
49 #include <miscfs/specfs/specdev.h>
50 
51 static const struct genfs_ops puffs_genfsops = {
52 	.gop_size = puffs_gop_size,
53 	.gop_write = genfs_gop_write,
54 	.gop_markupdate = puffs_gop_markupdate,
55 #if 0
56 	.gop_alloc, should ask userspace
57 #endif
58 };
59 
60 static __inline struct puffs_node_hashlist
61 	*puffs_cookie2hashlist(struct puffs_mount *, puffs_cookie_t);
62 static struct puffs_node *puffs_cookie2pnode(struct puffs_mount *,
63 					     puffs_cookie_t);
64 
65 struct pool puffs_pnpool;
66 
67 /*
68  * Grab a vnode, intialize all the puffs-dependent stuff.
69  */
70 int
71 puffs_getvnode(struct mount *mp, puffs_cookie_t ck, enum vtype type,
72 	voff_t vsize, dev_t rdev, struct vnode **vpp)
73 {
74 	struct puffs_mount *pmp;
75 	struct puffs_newcookie *pnc;
76 	struct vnode *vp;
77 	struct puffs_node *pnode;
78 	struct puffs_node_hashlist *plist;
79 	int error;
80 
81 	pmp = MPTOPUFFSMP(mp);
82 
83 	error = EPROTO;
84 	if (type <= VNON || type >= VBAD) {
85 		puffs_senderr(pmp, PUFFS_ERR_MAKENODE, EINVAL,
86 		    "bad node type", ck);
87 		goto bad;
88 	}
89 	if (vsize == VSIZENOTSET) {
90 		puffs_senderr(pmp, PUFFS_ERR_MAKENODE, EINVAL,
91 		    "VSIZENOTSET is not a valid size", ck);
92 		goto bad;
93 	}
94 
95 	error = getnewvnode(VT_PUFFS, mp, puffs_vnodeop_p, NULL, &vp);
96 	if (error) {
97 		goto bad;
98 	}
99 	vp->v_type = type;
100 
101 	/*
102 	 * Creation should not fail after this point.  Or if it does,
103 	 * care must be taken so that VOP_INACTIVE() isn't called.
104 	 */
105 
106 	/* default size */
107 	uvm_vnp_setsize(vp, 0);
108 
109 	/* dances based on vnode type. almost ufs_vinit(), but not quite */
110 	switch (type) {
111 	case VCHR:
112 	case VBLK:
113 		/*
114 		 * replace vnode operation vector with the specops vector.
115 		 * our user server has very little control over the node
116 		 * if it decides its a character or block special file
117 		 */
118 		vp->v_op = puffs_specop_p;
119 		spec_node_init(vp, rdev);
120 		break;
121 
122 	case VFIFO:
123 		vp->v_op = puffs_fifoop_p;
124 		break;
125 
126 	case VREG:
127 		uvm_vnp_setsize(vp, vsize);
128 		break;
129 
130 	case VDIR:
131 	case VLNK:
132 	case VSOCK:
133 		break;
134 	default:
135 		panic("puffs_getvnode: invalid vtype %d", type);
136 	}
137 
138 	pnode = pool_get(&puffs_pnpool, PR_WAITOK);
139 	memset(pnode, 0, sizeof(struct puffs_node));
140 
141 	pnode->pn_cookie = ck;
142 	pnode->pn_refcount = 1;
143 
144 	/* insert cookie on list, take off of interlock list */
145 	mutex_init(&pnode->pn_mtx, MUTEX_DEFAULT, IPL_NONE);
146 	selinit(&pnode->pn_sel);
147 	plist = puffs_cookie2hashlist(pmp, ck);
148 	mutex_enter(&pmp->pmp_lock);
149 	LIST_INSERT_HEAD(plist, pnode, pn_hashent);
150 	if (ck != pmp->pmp_root_cookie) {
151 		LIST_FOREACH(pnc, &pmp->pmp_newcookie, pnc_entries) {
152 			if (pnc->pnc_cookie == ck) {
153 				LIST_REMOVE(pnc, pnc_entries);
154 				kmem_free(pnc, sizeof(struct puffs_newcookie));
155 				break;
156 			}
157 		}
158 		KASSERT(pnc != NULL);
159 	}
160 	mutex_init(&pnode->pn_sizemtx, MUTEX_DEFAULT, IPL_NONE);
161 	mutex_exit(&pmp->pmp_lock);
162 
163 	vp->v_data = pnode;
164 	vp->v_type = type;
165 	pnode->pn_vp = vp;
166 	pnode->pn_serversize = vsize;
167 
168 	genfs_node_init(vp, &puffs_genfsops);
169 	*vpp = vp;
170 
171 	DPRINTF(("new vnode at %p, pnode %p, cookie %p\n", vp,
172 	    pnode, pnode->pn_cookie));
173 
174 	return 0;
175 
176  bad:
177 	/* remove staging cookie from list */
178 	if (ck != pmp->pmp_root_cookie) {
179 		mutex_enter(&pmp->pmp_lock);
180 		LIST_FOREACH(pnc, &pmp->pmp_newcookie, pnc_entries) {
181 			if (pnc->pnc_cookie == ck) {
182 				LIST_REMOVE(pnc, pnc_entries);
183 				kmem_free(pnc, sizeof(struct puffs_newcookie));
184 				break;
185 			}
186 		}
187 		KASSERT(pnc != NULL);
188 		mutex_exit(&pmp->pmp_lock);
189 	}
190 
191 	return error;
192 }
193 
194 /* new node creating for creative vop ops (create, symlink, mkdir, mknod) */
195 int
196 puffs_newnode(struct mount *mp, struct vnode *dvp, struct vnode **vpp,
197 	puffs_cookie_t ck, struct componentname *cnp,
198 	enum vtype type, dev_t rdev)
199 {
200 	struct puffs_mount *pmp = MPTOPUFFSMP(mp);
201 	struct puffs_newcookie *pnc;
202 	struct vnode *vp;
203 	int error;
204 
205 	/* userspace probably has this as a NULL op */
206 	if (ck == NULL) {
207 		error = EOPNOTSUPP;
208 		return error;
209 	}
210 
211 	/*
212 	 * Check for previous node with the same designation.
213 	 * Explicitly check the root node cookie, since it might be
214 	 * reclaimed from the kernel when this check is made.
215 	 */
216 	mutex_enter(&pmp->pmp_lock);
217 	if (ck == pmp->pmp_root_cookie
218 	    || puffs_cookie2pnode(pmp, ck) != NULL) {
219 		mutex_exit(&pmp->pmp_lock);
220 		puffs_senderr(pmp, PUFFS_ERR_MAKENODE, EEXIST,
221 		    "cookie exists", ck);
222 		return EPROTO;
223 	}
224 
225 	LIST_FOREACH(pnc, &pmp->pmp_newcookie, pnc_entries) {
226 		if (pnc->pnc_cookie == ck) {
227 			mutex_exit(&pmp->pmp_lock);
228 			puffs_senderr(pmp, PUFFS_ERR_MAKENODE, EEXIST,
229 			    "newcookie exists", ck);
230 			return EPROTO;
231 		}
232 	}
233 
234 	KASSERT(curlwp != uvm.pagedaemon_lwp);
235 	pnc = kmem_alloc(sizeof(struct puffs_newcookie), KM_SLEEP);
236 	pnc->pnc_cookie = ck;
237 	LIST_INSERT_HEAD(&pmp->pmp_newcookie, pnc, pnc_entries);
238 	mutex_exit(&pmp->pmp_lock);
239 
240 	error = puffs_getvnode(dvp->v_mount, ck, type, 0, rdev, &vp);
241 	if (error)
242 		return error;
243 
244 	vp->v_type = type;
245 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
246 	*vpp = vp;
247 
248 	if ((cnp->cn_flags & MAKEENTRY) && PUFFS_USE_NAMECACHE(pmp))
249 		cache_enter(dvp, vp, cnp);
250 
251 	return 0;
252 }
253 
254 void
255 puffs_putvnode(struct vnode *vp)
256 {
257 	struct puffs_mount *pmp;
258 	struct puffs_node *pnode;
259 
260 	pmp = VPTOPUFFSMP(vp);
261 	pnode = VPTOPP(vp);
262 
263 #ifdef DIAGNOSTIC
264 	if (vp->v_tag != VT_PUFFS)
265 		panic("puffs_putvnode: %p not a puffs vnode", vp);
266 #endif
267 
268 	genfs_node_destroy(vp);
269 	puffs_releasenode(pnode);
270 	vp->v_data = NULL;
271 
272 	return;
273 }
274 
275 static __inline struct puffs_node_hashlist *
276 puffs_cookie2hashlist(struct puffs_mount *pmp, puffs_cookie_t ck)
277 {
278 	uint32_t hash;
279 
280 	hash = hash32_buf(&ck, sizeof(void *), HASH32_BUF_INIT);
281 	return &pmp->pmp_pnodehash[hash % pmp->pmp_npnodehash];
282 }
283 
284 /*
285  * Translate cookie to puffs_node.  Caller must hold pmp_lock
286  * and it will be held upon return.
287  */
288 static struct puffs_node *
289 puffs_cookie2pnode(struct puffs_mount *pmp, puffs_cookie_t ck)
290 {
291 	struct puffs_node_hashlist *plist;
292 	struct puffs_node *pnode;
293 
294 	plist = puffs_cookie2hashlist(pmp, ck);
295 	LIST_FOREACH(pnode, plist, pn_hashent) {
296 		if (pnode->pn_cookie == ck)
297 			break;
298 	}
299 
300 	return pnode;
301 }
302 
303 /*
304  * Make sure root vnode exists and reference it.  Does NOT lock.
305  */
306 static int
307 puffs_makeroot(struct puffs_mount *pmp)
308 {
309 	struct vnode *vp;
310 	int rv;
311 
312 	/*
313 	 * pmp_lock must be held if vref()'ing or vrele()'ing the
314 	 * root vnode.  the latter is controlled by puffs_inactive().
315 	 *
316 	 * pmp_root is set here and cleared in puffs_reclaim().
317 	 */
318  retry:
319 	mutex_enter(&pmp->pmp_lock);
320 	vp = pmp->pmp_root;
321 	if (vp) {
322 		mutex_enter(vp->v_interlock);
323 		mutex_exit(&pmp->pmp_lock);
324 		switch (vget(vp, 0)) {
325 		case ENOENT:
326 			goto retry;
327 		case 0:
328 			return 0;
329 		default:
330 			break;
331 		}
332 	} else
333 		mutex_exit(&pmp->pmp_lock);
334 
335 	/*
336 	 * So, didn't have the magic root vnode available.
337 	 * No matter, grab another and stuff it with the cookie.
338 	 */
339 	if ((rv = puffs_getvnode(pmp->pmp_mp, pmp->pmp_root_cookie,
340 	    pmp->pmp_root_vtype, pmp->pmp_root_vsize, pmp->pmp_root_rdev, &vp)))
341 		return rv;
342 
343 	/*
344 	 * Someone magically managed to race us into puffs_getvnode?
345 	 * Put our previous new vnode back and retry.
346 	 */
347 	mutex_enter(&pmp->pmp_lock);
348 	if (pmp->pmp_root) {
349 		struct puffs_node *pnode = vp->v_data;
350 
351 		LIST_REMOVE(pnode, pn_hashent);
352 		mutex_exit(&pmp->pmp_lock);
353 		puffs_putvnode(vp);
354 		goto retry;
355 	}
356 
357 	/* store cache */
358 	vp->v_vflag |= VV_ROOT;
359 	pmp->pmp_root = vp;
360 	mutex_exit(&pmp->pmp_lock);
361 
362 	return 0;
363 }
364 
365 /*
366  * Locate the in-kernel vnode based on the cookie received given
367  * from userspace.  Returns a vnode, if found, NULL otherwise.
368  * The parameter "lock" control whether to lock the possible or
369  * not.  Locking always might cause us to lock against ourselves
370  * in situations where we want the vnode but don't care for the
371  * vnode lock, e.g. file server issued putpages.
372  */
373 int
374 puffs_cookie2vnode(struct puffs_mount *pmp, puffs_cookie_t ck, int lock,
375 	int willcreate, struct vnode **vpp)
376 {
377 	struct puffs_node *pnode;
378 	struct puffs_newcookie *pnc;
379 	struct vnode *vp;
380 	int vgetflags, rv;
381 
382 	/*
383 	 * Handle root in a special manner, since we want to make sure
384 	 * pmp_root is properly set.
385 	 */
386 	if (ck == pmp->pmp_root_cookie) {
387 		if ((rv = puffs_makeroot(pmp)))
388 			return rv;
389 		if (lock)
390 			vn_lock(pmp->pmp_root, LK_EXCLUSIVE | LK_RETRY);
391 
392 		*vpp = pmp->pmp_root;
393 		return 0;
394 	}
395 
396  retry:
397 	mutex_enter(&pmp->pmp_lock);
398 	pnode = puffs_cookie2pnode(pmp, ck);
399 	if (pnode == NULL) {
400 		if (willcreate) {
401 			pnc = kmem_alloc(sizeof(struct puffs_newcookie),
402 			    KM_SLEEP);
403 			pnc->pnc_cookie = ck;
404 			LIST_INSERT_HEAD(&pmp->pmp_newcookie, pnc, pnc_entries);
405 		}
406 		mutex_exit(&pmp->pmp_lock);
407 		return PUFFS_NOSUCHCOOKIE;
408 	}
409 	vp = pnode->pn_vp;
410 	mutex_enter(vp->v_interlock);
411 	mutex_exit(&pmp->pmp_lock);
412 
413 	vgetflags = 0;
414 	if (lock)
415 		vgetflags |= LK_EXCLUSIVE;
416 	switch (rv = vget(vp, vgetflags)) {
417 	case ENOENT:
418 		goto retry;
419 	case 0:
420 		break;
421 	default:
422 		return rv;
423 	}
424 
425 	*vpp = vp;
426 	return 0;
427 }
428 
429 void
430 puffs_updatenode(struct puffs_node *pn, int flags, voff_t size)
431 {
432 	struct timespec ts;
433 
434 	if (flags == 0)
435 		return;
436 
437 	nanotime(&ts);
438 
439 	if (flags & PUFFS_UPDATEATIME) {
440 		pn->pn_mc_atime = ts;
441 		pn->pn_stat |= PNODE_METACACHE_ATIME;
442 	}
443 	if (flags & PUFFS_UPDATECTIME) {
444 		pn->pn_mc_ctime = ts;
445 		pn->pn_stat |= PNODE_METACACHE_CTIME;
446 	}
447 	if (flags & PUFFS_UPDATEMTIME) {
448 		pn->pn_mc_mtime = ts;
449 		pn->pn_stat |= PNODE_METACACHE_MTIME;
450 	}
451 	if (flags & PUFFS_UPDATESIZE) {
452 		pn->pn_mc_size = size;
453 		pn->pn_stat |= PNODE_METACACHE_SIZE;
454 	}
455 }
456 
457 /*
458  * Add reference to node.
459  *  mutex held on entry and return
460  */
461 void
462 puffs_referencenode(struct puffs_node *pn)
463 {
464 
465 	KASSERT(mutex_owned(&pn->pn_mtx));
466 	pn->pn_refcount++;
467 }
468 
469 /*
470  * Release pnode structure which dealing with references to the
471  * puffs_node instead of the vnode.  Can't use vref()/vrele() on
472  * the vnode there, since that causes the lovely VOP_INACTIVE(),
473  * which in turn causes the lovely deadlock when called by the one
474  * who is supposed to handle it.
475  */
476 void
477 puffs_releasenode(struct puffs_node *pn)
478 {
479 
480 	mutex_enter(&pn->pn_mtx);
481 	if (--pn->pn_refcount == 0) {
482 		mutex_exit(&pn->pn_mtx);
483 		mutex_destroy(&pn->pn_mtx);
484 		mutex_destroy(&pn->pn_sizemtx);
485 		seldestroy(&pn->pn_sel);
486 		pool_put(&puffs_pnpool, pn);
487 	} else {
488 		mutex_exit(&pn->pn_mtx);
489 	}
490 }
491