xref: /netbsd-src/sys/coda/coda_vnops.c (revision 7788a0781fe6ff2cce37368b4578a7ade0850cb1)
1 /*	$NetBSD: coda_vnops.c,v 1.90 2012/08/02 16:06:58 christos Exp $	*/
2 
3 /*
4  *
5  *             Coda: an Experimental Distributed File System
6  *                              Release 3.1
7  *
8  *           Copyright (c) 1987-1998 Carnegie Mellon University
9  *                          All Rights Reserved
10  *
11  * Permission  to  use, copy, modify and distribute this software and its
12  * documentation is hereby granted,  provided  that  both  the  copyright
13  * notice  and  this  permission  notice  appear  in  all  copies  of the
14  * software, derivative works or  modified  versions,  and  any  portions
15  * thereof, and that both notices appear in supporting documentation, and
16  * that credit is given to Carnegie Mellon University  in  all  documents
17  * and publicity pertaining to direct or indirect use of this code or its
18  * derivatives.
19  *
20  * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS  KNOWN  TO  HAVE  BUGS,
21  * SOME  OF  WHICH MAY HAVE SERIOUS CONSEQUENCES.  CARNEGIE MELLON ALLOWS
22  * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION.   CARNEGIE  MELLON
23  * DISCLAIMS  ANY  LIABILITY  OF  ANY  KIND  FOR  ANY  DAMAGES WHATSOEVER
24  * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE  OR  OF
25  * ANY DERIVATIVE WORK.
26  *
27  * Carnegie  Mellon  encourages  users  of  this  software  to return any
28  * improvements or extensions that  they  make,  and  to  grant  Carnegie
29  * Mellon the rights to redistribute these changes without encumbrance.
30  *
31  * 	@(#) coda/coda_vnops.c,v 1.1.1.1 1998/08/29 21:26:46 rvb Exp $
32  */
33 
34 /*
35  * Mach Operating System
36  * Copyright (c) 1990 Carnegie-Mellon University
37  * Copyright (c) 1989 Carnegie-Mellon University
38  * All rights reserved.  The CMU software License Agreement specifies
39  * the terms and conditions for use and redistribution.
40  */
41 
42 /*
43  * This code was written for the Coda file system at Carnegie Mellon
44  * University.  Contributers include David Steere, James Kistler, and
45  * M. Satyanarayanan.
46  */
47 
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: coda_vnops.c,v 1.90 2012/08/02 16:06:58 christos Exp $");
50 
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/malloc.h>
54 #include <sys/errno.h>
55 #include <sys/acct.h>
56 #include <sys/file.h>
57 #include <sys/uio.h>
58 #include <sys/namei.h>
59 #include <sys/ioctl.h>
60 #include <sys/mount.h>
61 #include <sys/proc.h>
62 #include <sys/select.h>
63 #include <sys/vnode.h>
64 #include <sys/kauth.h>
65 
66 #include <miscfs/genfs/genfs.h>
67 
68 #include <coda/coda.h>
69 #include <coda/cnode.h>
70 #include <coda/coda_vnops.h>
71 #include <coda/coda_venus.h>
72 #include <coda/coda_opstats.h>
73 #include <coda/coda_subr.h>
74 #include <coda/coda_namecache.h>
75 #include <coda/coda_pioctl.h>
76 
77 /*
78  * These flags select various performance enhancements.
79  */
80 int coda_attr_cache  = 1;       /* Set to cache attributes in the kernel */
81 int coda_symlink_cache = 1;     /* Set to cache symbolic link information */
82 int coda_access_cache = 1;      /* Set to handle some access checks directly */
83 
84 /* structure to keep track of vfs calls */
85 
86 struct coda_op_stats coda_vnodeopstats[CODA_VNODEOPS_SIZE];
87 
88 #define MARK_ENTRY(op) (coda_vnodeopstats[op].entries++)
89 #define MARK_INT_SAT(op) (coda_vnodeopstats[op].sat_intrn++)
90 #define MARK_INT_FAIL(op) (coda_vnodeopstats[op].unsat_intrn++)
91 #define MARK_INT_GEN(op) (coda_vnodeopstats[op].gen_intrn++)
92 
93 /* What we are delaying for in printf */
94 static int coda_lockdebug = 0;
95 
96 #define ENTRY if(coda_vnop_print_entry) myprintf(("Entered %s\n",__func__))
97 
98 /* Definition of the vnode operation vector */
99 
100 const struct vnodeopv_entry_desc coda_vnodeop_entries[] = {
101     { &vop_default_desc, coda_vop_error },
102     { &vop_lookup_desc, coda_lookup },		/* lookup */
103     { &vop_create_desc, coda_create },		/* create */
104     { &vop_mknod_desc, coda_vop_error },	/* mknod */
105     { &vop_open_desc, coda_open },		/* open */
106     { &vop_close_desc, coda_close },		/* close */
107     { &vop_access_desc, coda_access },		/* access */
108     { &vop_getattr_desc, coda_getattr },	/* getattr */
109     { &vop_setattr_desc, coda_setattr },	/* setattr */
110     { &vop_read_desc, coda_read },		/* read */
111     { &vop_write_desc, coda_write },		/* write */
112     { &vop_fcntl_desc, genfs_fcntl },		/* fcntl */
113     { &vop_ioctl_desc, coda_ioctl },		/* ioctl */
114     { &vop_mmap_desc, genfs_mmap },		/* mmap */
115     { &vop_fsync_desc, coda_fsync },		/* fsync */
116     { &vop_remove_desc, coda_remove },		/* remove */
117     { &vop_link_desc, coda_link },		/* link */
118     { &vop_rename_desc, coda_rename },		/* rename */
119     { &vop_mkdir_desc, coda_mkdir },		/* mkdir */
120     { &vop_rmdir_desc, coda_rmdir },		/* rmdir */
121     { &vop_symlink_desc, coda_symlink },	/* symlink */
122     { &vop_readdir_desc, coda_readdir },	/* readdir */
123     { &vop_readlink_desc, coda_readlink },	/* readlink */
124     { &vop_abortop_desc, coda_abortop },	/* abortop */
125     { &vop_inactive_desc, coda_inactive },	/* inactive */
126     { &vop_reclaim_desc, coda_reclaim },	/* reclaim */
127     { &vop_lock_desc, coda_lock },		/* lock */
128     { &vop_unlock_desc, coda_unlock },		/* unlock */
129     { &vop_bmap_desc, coda_bmap },		/* bmap */
130     { &vop_strategy_desc, coda_strategy },	/* strategy */
131     { &vop_print_desc, coda_vop_error },	/* print */
132     { &vop_islocked_desc, coda_islocked },	/* islocked */
133     { &vop_pathconf_desc, coda_vop_error },	/* pathconf */
134     { &vop_advlock_desc, coda_vop_nop },	/* advlock */
135     { &vop_bwrite_desc, coda_vop_error },	/* bwrite */
136     { &vop_seek_desc, genfs_seek },		/* seek */
137     { &vop_poll_desc, genfs_poll },		/* poll */
138     { &vop_getpages_desc, coda_getpages },	/* getpages */
139     { &vop_putpages_desc, coda_putpages },	/* putpages */
140     { NULL, NULL }
141 };
142 
143 static void coda_print_vattr(struct vattr *);
144 
145 int (**coda_vnodeop_p)(void *);
146 const struct vnodeopv_desc coda_vnodeop_opv_desc =
147         { &coda_vnodeop_p, coda_vnodeop_entries };
148 
149 /* Definitions of NetBSD vnodeop interfaces */
150 
151 /*
152  * A generic error routine.  Return EIO without looking at arguments.
153  */
154 int
155 coda_vop_error(void *anon) {
156     struct vnodeop_desc **desc = (struct vnodeop_desc **)anon;
157 
158     if (codadebug) {
159 	myprintf(("%s: Vnode operation %s called (error).\n",
160 	    __func__, (*desc)->vdesc_name));
161     }
162 
163     return EIO;
164 }
165 
166 /* A generic do-nothing. */
167 int
168 coda_vop_nop(void *anon) {
169     struct vnodeop_desc **desc = (struct vnodeop_desc **)anon;
170 
171     if (codadebug) {
172 	myprintf(("Vnode operation %s called, but unsupported\n",
173 		  (*desc)->vdesc_name));
174     }
175    return (0);
176 }
177 
178 int
179 coda_vnodeopstats_init(void)
180 {
181 	int i;
182 
183 	for(i=0;i<CODA_VNODEOPS_SIZE;i++) {
184 		coda_vnodeopstats[i].opcode = i;
185 		coda_vnodeopstats[i].entries = 0;
186 		coda_vnodeopstats[i].sat_intrn = 0;
187 		coda_vnodeopstats[i].unsat_intrn = 0;
188 		coda_vnodeopstats[i].gen_intrn = 0;
189 	}
190 
191 	return 0;
192 }
193 
194 /*
195  * XXX The entire relationship between VOP_OPEN and having a container
196  * file (via venus_open) needs to be reexamined.  In particular, it's
197  * valid to open/mmap/close and then reference.  Instead of doing
198  * VOP_OPEN when getpages needs a container, we should do the
199  * venus_open part, and record that the vnode has opened the container
200  * for getpages, and do the matching logical close on coda_inactive.
201  * Further, coda_rdwr needs a container file, and sometimes needs to
202  * do the equivalent of open (core dumps).
203  */
204 /*
205  * coda_open calls Venus to return the device and inode of the
206  * container file, and then obtains a vnode for that file.  The
207  * container vnode is stored in the coda vnode, and a reference is
208  * added for each open file.
209  */
210 int
211 coda_open(void *v)
212 {
213     /*
214      * NetBSD can pass the O_EXCL flag in mode, even though the check
215      * has already happened.  Venus defensively assumes that if open
216      * is passed the EXCL, it must be a bug.  We strip the flag here.
217      */
218 /* true args */
219     struct vop_open_args *ap = v;
220     vnode_t *vp = ap->a_vp;
221     struct cnode *cp = VTOC(vp);
222     int flag = ap->a_mode & (~O_EXCL);
223     kauth_cred_t cred = ap->a_cred;
224 /* locals */
225     int error;
226     dev_t dev;			/* container file device, inode, vnode */
227     ino_t inode;
228     vnode_t *container_vp;
229 
230     MARK_ENTRY(CODA_OPEN_STATS);
231 
232     if (!VOP_ISLOCKED(vp))
233 	VOP_LOCK(vp, LK_EXCLUSIVE);
234     /* Check for open of control file. */
235     if (IS_CTL_VP(vp)) {
236 	/* if (WRITABLE(flag)) */
237 	if (flag & (FWRITE | O_TRUNC | O_CREAT | O_EXCL)) {
238 	    MARK_INT_FAIL(CODA_OPEN_STATS);
239 	    return(EACCES);
240 	}
241 	MARK_INT_SAT(CODA_OPEN_STATS);
242 	return(0);
243     }
244 
245     error = venus_open(vtomi(vp), &cp->c_fid, flag, cred, curlwp, &dev, &inode);
246     if (error)
247 	return (error);
248     if (!error) {
249 	    CODADEBUG(CODA_OPEN, myprintf((
250 		"%s: dev 0x%llx inode %llu result %d\n", __func__,
251 		(unsigned long long)dev, (unsigned long long)inode, error));)
252     }
253 
254     /*
255      * Obtain locked and referenced container vnode from container
256      * device/inode.
257      */
258     error = coda_grab_vnode(vp, dev, inode, &container_vp);
259     if (error)
260 	return (error);
261 
262     /* Save the vnode pointer for the container file. */
263     if (cp->c_ovp == NULL) {
264 	cp->c_ovp = container_vp;
265     } else {
266 	if (cp->c_ovp != container_vp)
267 	    /*
268 	     * Perhaps venus returned a different container, or
269 	     * something else went wrong.
270 	     */
271 	    panic("%s: cp->c_ovp != container_vp", __func__);
272     }
273     cp->c_ocount++;
274 
275     /* Flush the attribute cache if writing the file. */
276     if (flag & FWRITE) {
277 	cp->c_owrite++;
278 	cp->c_flags &= ~C_VATTR;
279     }
280 
281     /*
282      * Save the <device, inode> pair for the container file to speed
283      * up subsequent reads while closed (mmap, program execution).
284      * This is perhaps safe because venus will invalidate the node
285      * before changing the container file mapping.
286      */
287     cp->c_device = dev;
288     cp->c_inode = inode;
289 
290     /* Open the container file. */
291     error = VOP_OPEN(container_vp, flag, cred);
292     /*
293      * Drop the lock on the container, after we have done VOP_OPEN
294      * (which requires a locked vnode).
295      */
296     VOP_UNLOCK(container_vp);
297     return(error);
298 }
299 
300 /*
301  * Close the cache file used for I/O and notify Venus.
302  */
303 int
304 coda_close(void *v)
305 {
306 /* true args */
307     struct vop_close_args *ap = v;
308     vnode_t *vp = ap->a_vp;
309     struct cnode *cp = VTOC(vp);
310     int flag = ap->a_fflag;
311     kauth_cred_t cred = ap->a_cred;
312 /* locals */
313     int error;
314 
315     MARK_ENTRY(CODA_CLOSE_STATS);
316 
317     /* Check for close of control file. */
318     if (IS_CTL_VP(vp)) {
319 	MARK_INT_SAT(CODA_CLOSE_STATS);
320 	return(0);
321     }
322 
323     /*
324      * XXX The IS_UNMOUNTING part of this is very suspect.
325      */
326     if (IS_UNMOUNTING(cp)) {
327 	if (cp->c_ovp) {
328 #ifdef	CODA_VERBOSE
329 	    printf("%s: destroying container %d, ufs vp %p of vp %p/cp %p\n",
330 		__func__, vp->v_usecount, cp->c_ovp, vp, cp);
331 #endif
332 #ifdef	hmm
333 	    vgone(cp->c_ovp);
334 #else
335 	    vn_lock(cp->c_ovp, LK_EXCLUSIVE | LK_RETRY);
336 	    VOP_CLOSE(cp->c_ovp, flag, cred); /* Do errors matter here? */
337 	    vput(cp->c_ovp);
338 #endif
339 	} else {
340 #ifdef	CODA_VERBOSE
341 	    printf("%s: NO container vp %p/cp %p\n", __func__, vp, cp);
342 #endif
343 	}
344 	return ENODEV;
345     }
346 
347     /* Lock the container node, and VOP_CLOSE it. */
348     vn_lock(cp->c_ovp, LK_EXCLUSIVE | LK_RETRY);
349     VOP_CLOSE(cp->c_ovp, flag, cred); /* Do errors matter here? */
350     /*
351      * Drop the lock we just obtained, and vrele the container vnode.
352      * Decrement reference counts, and clear container vnode pointer on
353      * last close.
354      */
355     vput(cp->c_ovp);
356     if (flag & FWRITE)
357 	--cp->c_owrite;
358     if (--cp->c_ocount == 0)
359 	cp->c_ovp = NULL;
360 
361     error = venus_close(vtomi(vp), &cp->c_fid, flag, cred, curlwp);
362 
363     CODADEBUG(CODA_CLOSE, myprintf(("%s: result %d\n", __func__, error)); )
364     return(error);
365 }
366 
367 int
368 coda_read(void *v)
369 {
370     struct vop_read_args *ap = v;
371 
372     ENTRY;
373     return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_READ,
374 		    ap->a_ioflag, ap->a_cred, curlwp));
375 }
376 
377 int
378 coda_write(void *v)
379 {
380     struct vop_write_args *ap = v;
381 
382     ENTRY;
383     return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_WRITE,
384 		    ap->a_ioflag, ap->a_cred, curlwp));
385 }
386 
387 int
388 coda_rdwr(vnode_t *vp, struct uio *uiop, enum uio_rw rw, int ioflag,
389 	kauth_cred_t cred, struct lwp *l)
390 {
391 /* upcall decl */
392   /* NOTE: container file operation!!! */
393 /* locals */
394     struct cnode *cp = VTOC(vp);
395     vnode_t *cfvp = cp->c_ovp;
396     struct proc *p = l->l_proc;
397     int opened_internally = 0;
398     int error = 0;
399 
400     MARK_ENTRY(CODA_RDWR_STATS);
401 
402     CODADEBUG(CODA_RDWR, myprintf(("coda_rdwr(%d, %p, %lu, %lld)\n", rw,
403 	uiop->uio_iov->iov_base, (unsigned long) uiop->uio_resid,
404 	(long long) uiop->uio_offset)); )
405 
406     /* Check for rdwr of control object. */
407     if (IS_CTL_VP(vp)) {
408 	MARK_INT_FAIL(CODA_RDWR_STATS);
409 	return(EINVAL);
410     }
411 
412     /* Redirect the request to UFS. */
413 
414     /*
415      * If file is not already open this must be a page
416      * {read,write} request.  Iget the cache file's inode
417      * pointer if we still have its <device, inode> pair.
418      * Otherwise, we must do an internal open to derive the
419      * pair.
420      * XXX Integrate this into a coherent strategy for container
421      * file acquisition.
422      */
423     if (cfvp == NULL) {
424 	/*
425 	 * If we're dumping core, do the internal open. Otherwise
426 	 * venus won't have the correct size of the core when
427 	 * it's completely written.
428 	 */
429 	if (cp->c_inode != 0 && !(p && (p->p_acflag & ACORE))) {
430 #ifdef CODA_VERBOSE
431 	    printf("%s: grabbing container vnode, losing reference\n",
432 		__func__);
433 #endif
434 	    /* Get locked and refed vnode. */
435 	    error = coda_grab_vnode(vp, cp->c_device, cp->c_inode, &cfvp);
436 	    if (error) {
437 		MARK_INT_FAIL(CODA_RDWR_STATS);
438 		return(error);
439 	    }
440 	    /*
441 	     * Drop lock.
442 	     * XXX Where is reference released.
443 	     */
444 	    VOP_UNLOCK(cfvp);
445 	}
446 	else {
447 #ifdef CODA_VERBOSE
448 	    printf("%s: internal VOP_OPEN\n", __func__);
449 #endif
450 	    opened_internally = 1;
451 	    MARK_INT_GEN(CODA_OPEN_STATS);
452 	    error = VOP_OPEN(vp, (rw == UIO_READ ? FREAD : FWRITE), cred);
453 #ifdef	CODA_VERBOSE
454 	    printf("%s: Internally Opening %p\n", __func__, vp);
455 #endif
456 	    if (error) {
457 		MARK_INT_FAIL(CODA_RDWR_STATS);
458 		return(error);
459 	    }
460 	    cfvp = cp->c_ovp;
461 	}
462     }
463 
464     /* Have UFS handle the call. */
465     CODADEBUG(CODA_RDWR, myprintf(("%s: fid = %s, refcnt = %d\n", __func__,
466 	coda_f2s(&cp->c_fid), CTOV(cp)->v_usecount)); )
467 
468     if (rw == UIO_READ) {
469 	error = VOP_READ(cfvp, uiop, ioflag, cred);
470     } else {
471 	error = VOP_WRITE(cfvp, uiop, ioflag, cred);
472     }
473 
474     if (error)
475 	MARK_INT_FAIL(CODA_RDWR_STATS);
476     else
477 	MARK_INT_SAT(CODA_RDWR_STATS);
478 
479     /* Do an internal close if necessary. */
480     if (opened_internally) {
481 	MARK_INT_GEN(CODA_CLOSE_STATS);
482 	(void)VOP_CLOSE(vp, (rw == UIO_READ ? FREAD : FWRITE), cred);
483     }
484 
485     /* Invalidate cached attributes if writing. */
486     if (rw == UIO_WRITE)
487 	cp->c_flags &= ~C_VATTR;
488     return(error);
489 }
490 
491 int
492 coda_ioctl(void *v)
493 {
494 /* true args */
495     struct vop_ioctl_args *ap = v;
496     vnode_t *vp = ap->a_vp;
497     int com = ap->a_command;
498     void *data = ap->a_data;
499     int flag = ap->a_fflag;
500     kauth_cred_t cred = ap->a_cred;
501 /* locals */
502     int error;
503     vnode_t *tvp;
504     struct PioctlData *iap = (struct PioctlData *)data;
505     namei_simple_flags_t sflags;
506 
507     MARK_ENTRY(CODA_IOCTL_STATS);
508 
509     CODADEBUG(CODA_IOCTL, myprintf(("in coda_ioctl on %s\n", iap->path));)
510 
511     /* Don't check for operation on a dying object, for ctlvp it
512        shouldn't matter */
513 
514     /* Must be control object to succeed. */
515     if (!IS_CTL_VP(vp)) {
516 	MARK_INT_FAIL(CODA_IOCTL_STATS);
517 	CODADEBUG(CODA_IOCTL, myprintf(("%s error: vp != ctlvp", __func__));)
518 	return (EOPNOTSUPP);
519     }
520     /* Look up the pathname. */
521 
522     /* Should we use the name cache here? It would get it from
523        lookupname sooner or later anyway, right? */
524 
525     sflags = iap->follow ? NSM_FOLLOW_NOEMULROOT : NSM_NOFOLLOW_NOEMULROOT;
526     error = namei_simple_user(iap->path, sflags, &tvp);
527 
528     if (error) {
529 	MARK_INT_FAIL(CODA_IOCTL_STATS);
530 	CODADEBUG(CODA_IOCTL, myprintf(("%s error: lookup returns %d\n",
531 	    __func__, error));)
532 	return(error);
533     }
534 
535     /*
536      * Make sure this is a coda style cnode, but it may be a
537      * different vfsp
538      */
539     /* XXX: this totally violates the comment about vtagtype in vnode.h */
540     if (tvp->v_tag != VT_CODA) {
541 	vrele(tvp);
542 	MARK_INT_FAIL(CODA_IOCTL_STATS);
543 	CODADEBUG(CODA_IOCTL, myprintf(("%s error: %s not a coda object\n",
544 	    __func__, iap->path));)
545 	return(EINVAL);
546     }
547 
548     if (iap->vi.in_size > VC_MAXDATASIZE || iap->vi.out_size > VC_MAXDATASIZE) {
549 	vrele(tvp);
550 	return(EINVAL);
551     }
552     error = venus_ioctl(vtomi(tvp), &((VTOC(tvp))->c_fid), com, flag, data,
553 	cred, curlwp);
554 
555     if (error)
556 	MARK_INT_FAIL(CODA_IOCTL_STATS);
557     else
558 	CODADEBUG(CODA_IOCTL, myprintf(("Ioctl returns %d \n", error)); )
559 
560     vrele(tvp);
561     return(error);
562 }
563 
564 /*
565  * To reduce the cost of a user-level venus;we cache attributes in
566  * the kernel.  Each cnode has storage allocated for an attribute. If
567  * c_vattr is valid, return a reference to it. Otherwise, get the
568  * attributes from venus and store them in the cnode.  There is some
569  * question if this method is a security leak. But I think that in
570  * order to make this call, the user must have done a lookup and
571  * opened the file, and therefore should already have access.
572  */
573 int
574 coda_getattr(void *v)
575 {
576 /* true args */
577     struct vop_getattr_args *ap = v;
578     vnode_t *vp = ap->a_vp;
579     struct cnode *cp = VTOC(vp);
580     struct vattr *vap = ap->a_vap;
581     kauth_cred_t cred = ap->a_cred;
582 /* locals */
583     int error;
584 
585     MARK_ENTRY(CODA_GETATTR_STATS);
586 
587     /* Check for getattr of control object. */
588     if (IS_CTL_VP(vp)) {
589 	MARK_INT_FAIL(CODA_GETATTR_STATS);
590 	return(ENOENT);
591     }
592 
593     /* Check to see if the attributes have already been cached */
594     if (VALID_VATTR(cp)) {
595 	CODADEBUG(CODA_GETATTR, { myprintf(("%s: attr cache hit: %s\n",
596 	    __func__, coda_f2s(&cp->c_fid)));})
597 	CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR))
598 	    coda_print_vattr(&cp->c_vattr); )
599 
600 	*vap = cp->c_vattr;
601 	MARK_INT_SAT(CODA_GETATTR_STATS);
602 	return(0);
603     }
604 
605     error = venus_getattr(vtomi(vp), &cp->c_fid, cred, curlwp, vap);
606 
607     if (!error) {
608 	CODADEBUG(CODA_GETATTR, myprintf(("%s miss %s: result %d\n",
609 	    __func__, coda_f2s(&cp->c_fid), error)); )
610 
611 	CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR))
612 	    coda_print_vattr(vap);	)
613 
614 	/* If not open for write, store attributes in cnode */
615 	if ((cp->c_owrite == 0) && (coda_attr_cache)) {
616 	    cp->c_vattr = *vap;
617 	    cp->c_flags |= C_VATTR;
618 	}
619 
620     }
621     return(error);
622 }
623 
624 int
625 coda_setattr(void *v)
626 {
627 /* true args */
628     struct vop_setattr_args *ap = v;
629     vnode_t *vp = ap->a_vp;
630     struct cnode *cp = VTOC(vp);
631     struct vattr *vap = ap->a_vap;
632     kauth_cred_t cred = ap->a_cred;
633 /* locals */
634     int error;
635 
636     MARK_ENTRY(CODA_SETATTR_STATS);
637 
638     /* Check for setattr of control object. */
639     if (IS_CTL_VP(vp)) {
640 	MARK_INT_FAIL(CODA_SETATTR_STATS);
641 	return(ENOENT);
642     }
643 
644     if (codadebug & CODADBGMSK(CODA_SETATTR)) {
645 	coda_print_vattr(vap);
646     }
647     error = venus_setattr(vtomi(vp), &cp->c_fid, vap, cred, curlwp);
648 
649     if (!error)
650 	cp->c_flags &= ~C_VATTR;
651 
652     CODADEBUG(CODA_SETATTR,	myprintf(("setattr %d\n", error)); )
653     return(error);
654 }
655 
656 int
657 coda_access(void *v)
658 {
659 /* true args */
660     struct vop_access_args *ap = v;
661     vnode_t *vp = ap->a_vp;
662     struct cnode *cp = VTOC(vp);
663     int mode = ap->a_mode;
664     kauth_cred_t cred = ap->a_cred;
665 /* locals */
666     int error;
667 
668     MARK_ENTRY(CODA_ACCESS_STATS);
669 
670     /* Check for access of control object.  Only read access is
671        allowed on it. */
672     if (IS_CTL_VP(vp)) {
673 	/* bogus hack - all will be marked as successes */
674 	MARK_INT_SAT(CODA_ACCESS_STATS);
675 	return(((mode & VREAD) && !(mode & (VWRITE | VEXEC)))
676 	       ? 0 : EACCES);
677     }
678 
679     /*
680      * if the file is a directory, and we are checking exec (eg lookup)
681      * access, and the file is in the namecache, then the user must have
682      * lookup access to it.
683      */
684     if (coda_access_cache) {
685 	if ((vp->v_type == VDIR) && (mode & VEXEC)) {
686 	    if (coda_nc_lookup(cp, ".", 1, cred)) {
687 		MARK_INT_SAT(CODA_ACCESS_STATS);
688 		return(0);                     /* it was in the cache */
689 	    }
690 	}
691     }
692 
693     error = venus_access(vtomi(vp), &cp->c_fid, mode, cred, curlwp);
694 
695     return(error);
696 }
697 
698 /*
699  * CODA abort op, called after namei() when a CREATE/DELETE isn't actually
700  * done. If a buffer has been saved in anticipation of a coda_create or
701  * a coda_remove, delete it.
702  */
703 /* ARGSUSED */
704 int
705 coda_abortop(void *v)
706 {
707 /* true args */
708     struct vop_abortop_args /* {
709 	vnode_t *a_dvp;
710 	struct componentname *a_cnp;
711     } */ *ap = v;
712 
713     (void)ap;
714 /* upcall decl */
715 /* locals */
716 
717     return (0);
718 }
719 
720 int
721 coda_readlink(void *v)
722 {
723 /* true args */
724     struct vop_readlink_args *ap = v;
725     vnode_t *vp = ap->a_vp;
726     struct cnode *cp = VTOC(vp);
727     struct uio *uiop = ap->a_uio;
728     kauth_cred_t cred = ap->a_cred;
729 /* locals */
730     struct lwp *l = curlwp;
731     int error;
732     char *str;
733     int len;
734 
735     MARK_ENTRY(CODA_READLINK_STATS);
736 
737     /* Check for readlink of control object. */
738     if (IS_CTL_VP(vp)) {
739 	MARK_INT_FAIL(CODA_READLINK_STATS);
740 	return(ENOENT);
741     }
742 
743     if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) { /* symlink was cached */
744 	uiop->uio_rw = UIO_READ;
745 	error = uiomove(cp->c_symlink, (int)cp->c_symlen, uiop);
746 	if (error)
747 	    MARK_INT_FAIL(CODA_READLINK_STATS);
748 	else
749 	    MARK_INT_SAT(CODA_READLINK_STATS);
750 	return(error);
751     }
752 
753     error = venus_readlink(vtomi(vp), &cp->c_fid, cred, l, &str, &len);
754 
755     if (!error) {
756 	uiop->uio_rw = UIO_READ;
757 	error = uiomove(str, len, uiop);
758 
759 	if (coda_symlink_cache) {
760 	    cp->c_symlink = str;
761 	    cp->c_symlen = len;
762 	    cp->c_flags |= C_SYMLINK;
763 	} else
764 	    CODA_FREE(str, len);
765     }
766 
767     CODADEBUG(CODA_READLINK, myprintf(("in readlink result %d\n",error));)
768     return(error);
769 }
770 
771 int
772 coda_fsync(void *v)
773 {
774 /* true args */
775     struct vop_fsync_args *ap = v;
776     vnode_t *vp = ap->a_vp;
777     struct cnode *cp = VTOC(vp);
778     kauth_cred_t cred = ap->a_cred;
779 /* locals */
780     vnode_t *convp = cp->c_ovp;
781     int error;
782 
783     MARK_ENTRY(CODA_FSYNC_STATS);
784 
785     /* Check for fsync on an unmounting object */
786     /* The NetBSD kernel, in it's infinite wisdom, can try to fsync
787      * after an unmount has been initiated.  This is a Bad Thing,
788      * which we have to avoid.  Not a legitimate failure for stats.
789      */
790     if (IS_UNMOUNTING(cp)) {
791 	return(ENODEV);
792     }
793 
794     /* Check for fsync of control object. */
795     if (IS_CTL_VP(vp)) {
796 	MARK_INT_SAT(CODA_FSYNC_STATS);
797 	return(0);
798     }
799 
800     if (convp)
801     	VOP_FSYNC(convp, cred, MNT_WAIT, 0, 0);
802 
803     /*
804      * We can expect fsync on any vnode at all if venus is pruging it.
805      * Venus can't very well answer the fsync request, now can it?
806      * Hopefully, it won't have to, because hopefully, venus preserves
807      * the (possibly untrue) invariant that it never purges an open
808      * vnode.  Hopefully.
809      */
810     if (cp->c_flags & C_PURGING) {
811 	return(0);
812     }
813 
814     error = venus_fsync(vtomi(vp), &cp->c_fid, cred, curlwp);
815 
816     CODADEBUG(CODA_FSYNC, myprintf(("in fsync result %d\n",error)); )
817     return(error);
818 }
819 
820 /*
821  * vp is locked on entry, and we must unlock it.
822  * XXX This routine is suspect and probably needs rewriting.
823  */
824 int
825 coda_inactive(void *v)
826 {
827 /* true args */
828     struct vop_inactive_args *ap = v;
829     vnode_t *vp = ap->a_vp;
830     struct cnode *cp = VTOC(vp);
831     kauth_cred_t cred __unused = NULL;
832 
833     /* We don't need to send inactive to venus - DCS */
834     MARK_ENTRY(CODA_INACTIVE_STATS);
835 
836     if (IS_CTL_VP(vp)) {
837 	MARK_INT_SAT(CODA_INACTIVE_STATS);
838 	return 0;
839     }
840 
841     CODADEBUG(CODA_INACTIVE, myprintf(("in inactive, %s, vfsp %p\n",
842 				  coda_f2s(&cp->c_fid), vp->v_mount));)
843 
844     /* If an array has been allocated to hold the symlink, deallocate it */
845     if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) {
846 	if (cp->c_symlink == NULL)
847 	    panic("%s: null symlink pointer in cnode", __func__);
848 
849 	CODA_FREE(cp->c_symlink, cp->c_symlen);
850 	cp->c_flags &= ~C_SYMLINK;
851 	cp->c_symlen = 0;
852     }
853 
854     /* Remove it from the table so it can't be found. */
855     coda_unsave(cp);
856     if (vp->v_mount->mnt_data == NULL) {
857 	myprintf(("Help! vfsp->vfs_data was NULL, but vnode %p wasn't dying\n", vp));
858 	panic("badness in coda_inactive");
859     }
860 
861 #ifdef CODA_VERBOSE
862     /* Sanity checks that perhaps should be panic. */
863     if (vp->v_usecount > 1)
864 	printf("%s: %p usecount %d\n", __func__, vp, vp->v_usecount);
865     if (cp->c_ovp != NULL)
866 	printf("%s: %p ovp != NULL\n", __func__, vp);
867 #endif
868     /* XXX Do we need to VOP_CLOSE container vnodes? */
869     VOP_UNLOCK(vp);
870     if (!IS_UNMOUNTING(cp))
871 	*ap->a_recycle = true;
872 
873     MARK_INT_SAT(CODA_INACTIVE_STATS);
874     return(0);
875 }
876 
877 /*
878  * Coda does not use the normal namecache, but a private version.
879  * Consider how to use the standard facility instead.
880  */
881 int
882 coda_lookup(void *v)
883 {
884 /* true args */
885     struct vop_lookup_args *ap = v;
886     /* (locked) vnode of dir in which to do lookup */
887     vnode_t *dvp = ap->a_dvp;
888     struct cnode *dcp = VTOC(dvp);
889     /* output variable for result */
890     vnode_t **vpp = ap->a_vpp;
891     /* name to lookup */
892     struct componentname *cnp = ap->a_cnp;
893     kauth_cred_t cred = cnp->cn_cred;
894     struct lwp *l = curlwp;
895 /* locals */
896     struct cnode *cp;
897     const char *nm = cnp->cn_nameptr;
898     int len = cnp->cn_namelen;
899     int flags = cnp->cn_flags;
900     int isdot;
901     CodaFid VFid;
902     int	vtype;
903     int error = 0;
904 
905     MARK_ENTRY(CODA_LOOKUP_STATS);
906 
907     CODADEBUG(CODA_LOOKUP, myprintf(("%s: %s in %s\n", __func__,
908 	nm, coda_f2s(&dcp->c_fid)));)
909 
910     /*
911      * XXX componentname flags in MODMASK are not handled at all
912      */
913 
914     /*
915      * The overall strategy is to switch on the lookup type and get a
916      * result vnode that is vref'd but not locked.  Then, the code at
917      * exit: switches on ., .., and regular lookups and does the right
918      * locking.
919      */
920 
921     /* Check for lookup of control object. */
922     if (IS_CTL_NAME(dvp, nm, len)) {
923 	*vpp = coda_ctlvp;
924 	vref(*vpp);
925 	MARK_INT_SAT(CODA_LOOKUP_STATS);
926 	goto exit;
927     }
928 
929     /* Avoid trying to hand venus an unreasonably long name. */
930     if (len+1 > CODA_MAXNAMLEN) {
931 	MARK_INT_FAIL(CODA_LOOKUP_STATS);
932 	CODADEBUG(CODA_LOOKUP, myprintf(("%s: name too long:, %s (%s)\n",
933 	    __func__, coda_f2s(&dcp->c_fid), nm));)
934 	*vpp = (vnode_t *)0;
935 	error = EINVAL;
936 	goto exit;
937     }
938 
939     /*
940      * XXX Check for DOT lookups, and short circuit all the caches,
941      * just doing an extra vref.  (venus guarantees that lookup of
942      * . returns self.)
943      */
944     isdot = (len == 1 && nm[0] == '.');
945 
946     /*
947      * Try to resolve the lookup in the minicache.  If that fails, ask
948      * venus to do the lookup.  XXX The interaction between vnode
949      * locking and any locking that coda does is not clear.
950      */
951     cp = coda_nc_lookup(dcp, nm, len, cred);
952     if (cp) {
953 	*vpp = CTOV(cp);
954 	vref(*vpp);
955 	CODADEBUG(CODA_LOOKUP,
956 		 myprintf(("lookup result %d vpp %p\n",error,*vpp));)
957     } else {
958 	/* The name wasn't cached, so ask Venus. */
959 	error = venus_lookup(vtomi(dvp), &dcp->c_fid, nm, len, cred, l, &VFid,
960 	    &vtype);
961 
962 	if (error) {
963 	    MARK_INT_FAIL(CODA_LOOKUP_STATS);
964 	    CODADEBUG(CODA_LOOKUP, myprintf(("%s: lookup error on %s (%s)%d\n",
965 		__func__, coda_f2s(&dcp->c_fid), nm, error));)
966 	    *vpp = (vnode_t *)0;
967 	} else {
968 	    MARK_INT_SAT(CODA_LOOKUP_STATS);
969 	    CODADEBUG(CODA_LOOKUP, myprintf(("%s: %s type %o result %d\n",
970 		__func__, coda_f2s(&VFid), vtype, error)); )
971 
972 	    cp = make_coda_node(&VFid, dvp->v_mount, vtype);
973 	    *vpp = CTOV(cp);
974 	    /* vpp is now vrefed. */
975 
976 	    /*
977 	     * Unless this vnode is marked CODA_NOCACHE, enter it into
978 	     * the coda name cache to avoid a future venus round-trip.
979 	     * XXX Interaction with componentname NOCACHE is unclear.
980 	     */
981 	    if (!(vtype & CODA_NOCACHE))
982 		coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
983 	}
984     }
985 
986  exit:
987     /*
988      * If we are creating, and this was the last name to be looked up,
989      * and the error was ENOENT, then make the leaf NULL and return
990      * success.
991      * XXX Check against new lookup rules.
992      */
993     if (((cnp->cn_nameiop == CREATE) || (cnp->cn_nameiop == RENAME))
994 	&& (cnp->cn_flags & ISLASTCN)
995 	&& (error == ENOENT))
996     {
997 	error = EJUSTRETURN;
998 	*ap->a_vpp = NULL;
999     }
1000 
1001     /*
1002      * If the lookup succeeded, we must generally lock the returned
1003      * vnode.  This could be a ., .., or normal lookup.  See
1004      * vnodeops(9) for the details.
1005      */
1006     /*
1007      * XXX LK_RETRY is likely incorrect.  Handle vn_lock failure
1008      * somehow, and remove LK_RETRY.
1009      */
1010     if (!error || (error == EJUSTRETURN)) {
1011 	/* Lookup has a value and it isn't "."? */
1012 	if (*ap->a_vpp && (*ap->a_vpp != dvp)) {
1013 	    if (flags & ISDOTDOT)
1014 		/* ..: unlock parent */
1015 		VOP_UNLOCK(dvp);
1016 	    /* all but .: lock child */
1017 	    vn_lock(*ap->a_vpp, LK_EXCLUSIVE | LK_RETRY);
1018 	    if (flags & ISDOTDOT)
1019 		/* ..: relock parent */
1020 	        vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
1021 	}
1022 	/* else .: leave dvp locked */
1023     } else {
1024 	/* The lookup failed, so return NULL.  Leave dvp locked. */
1025 	*ap->a_vpp = NULL;
1026     }
1027     return(error);
1028 }
1029 
1030 /*ARGSUSED*/
1031 int
1032 coda_create(void *v)
1033 {
1034 /* true args */
1035     struct vop_create_args *ap = v;
1036     vnode_t *dvp = ap->a_dvp;
1037     struct cnode *dcp = VTOC(dvp);
1038     struct vattr *va = ap->a_vap;
1039     int exclusive = 1;
1040     int mode = ap->a_vap->va_mode;
1041     vnode_t **vpp = ap->a_vpp;
1042     struct componentname  *cnp = ap->a_cnp;
1043     kauth_cred_t cred = cnp->cn_cred;
1044     struct lwp *l = curlwp;
1045 /* locals */
1046     int error;
1047     struct cnode *cp;
1048     const char *nm = cnp->cn_nameptr;
1049     int len = cnp->cn_namelen;
1050     CodaFid VFid;
1051     struct vattr attr;
1052 
1053     MARK_ENTRY(CODA_CREATE_STATS);
1054 
1055     /* All creates are exclusive XXX */
1056     /* I'm assuming the 'mode' argument is the file mode bits XXX */
1057 
1058     /* Check for create of control object. */
1059     if (IS_CTL_NAME(dvp, nm, len)) {
1060 	*vpp = (vnode_t *)0;
1061 	MARK_INT_FAIL(CODA_CREATE_STATS);
1062 	return(EACCES);
1063     }
1064 
1065     error = venus_create(vtomi(dvp), &dcp->c_fid, nm, len, exclusive, mode, va, cred, l, &VFid, &attr);
1066 
1067     if (!error) {
1068 
1069         /*
1070 	 * XXX Violation of venus/kernel invariants is a difficult case,
1071 	 * but venus should not be able to cause a panic.
1072 	 */
1073 	/* If this is an exclusive create, panic if the file already exists. */
1074 	/* Venus should have detected the file and reported EEXIST. */
1075 
1076 	if ((exclusive == 1) &&
1077 	    (coda_find(&VFid) != NULL))
1078 	    panic("cnode existed for newly created file!");
1079 
1080 	cp = make_coda_node(&VFid, dvp->v_mount, attr.va_type);
1081 	*vpp = CTOV(cp);
1082 
1083 	/* XXX vnodeops doesn't say this argument can be changed. */
1084 	/* Update va to reflect the new attributes. */
1085 	(*va) = attr;
1086 
1087 	/* Update the attribute cache and mark it as valid */
1088 	if (coda_attr_cache) {
1089 	    VTOC(*vpp)->c_vattr = attr;
1090 	    VTOC(*vpp)->c_flags |= C_VATTR;
1091 	}
1092 
1093 	/* Invalidate parent's attr cache (modification time has changed). */
1094 	VTOC(dvp)->c_flags &= ~C_VATTR;
1095 
1096 	/* enter the new vnode in the Name Cache */
1097 	coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
1098 
1099 	CODADEBUG(CODA_CREATE, myprintf(("%s: %s, result %d\n", __func__,
1100 	    coda_f2s(&VFid), error)); )
1101     } else {
1102 	*vpp = (vnode_t *)0;
1103 	CODADEBUG(CODA_CREATE, myprintf(("%s: create error %d\n", __func__,
1104 	    error));)
1105     }
1106 
1107     /*
1108      * vnodeops(9) says that we must unlock the parent and lock the child.
1109      * XXX Should we lock the child first?
1110      */
1111     vput(dvp);
1112     if (!error) {
1113 #ifdef CODA_VERBOSE
1114 	if ((cnp->cn_flags & LOCKLEAF) == 0)
1115 	    /* This should not happen; flags are for lookup only. */
1116 	    printf("%s: LOCKLEAF not set!\n", __func__);
1117 
1118 	if ((error = vn_lock(*ap->a_vpp, LK_EXCLUSIVE)))
1119 	    /* XXX Perhaps avoid this panic. */
1120 	    panic("%s: couldn't lock child", __func__);
1121 #endif
1122     }
1123 
1124     return(error);
1125 }
1126 
1127 int
1128 coda_remove(void *v)
1129 {
1130 /* true args */
1131     struct vop_remove_args *ap = v;
1132     vnode_t *dvp = ap->a_dvp;
1133     struct cnode *cp = VTOC(dvp);
1134     vnode_t *vp = ap->a_vp;
1135     struct componentname  *cnp = ap->a_cnp;
1136     kauth_cred_t cred = cnp->cn_cred;
1137     struct lwp *l = curlwp;
1138 /* locals */
1139     int error;
1140     const char *nm = cnp->cn_nameptr;
1141     int len = cnp->cn_namelen;
1142     struct cnode *tp;
1143 
1144     MARK_ENTRY(CODA_REMOVE_STATS);
1145 
1146     CODADEBUG(CODA_REMOVE, myprintf(("%s: %s in %s\n", __func__,
1147 	nm, coda_f2s(&cp->c_fid)));)
1148 
1149     /* Remove the file's entry from the CODA Name Cache */
1150     /* We're being conservative here, it might be that this person
1151      * doesn't really have sufficient access to delete the file
1152      * but we feel zapping the entry won't really hurt anyone -- dcs
1153      */
1154     /* I'm gonna go out on a limb here. If a file and a hardlink to it
1155      * exist, and one is removed, the link count on the other will be
1156      * off by 1. We could either invalidate the attrs if cached, or
1157      * fix them. I'll try to fix them. DCS 11/8/94
1158      */
1159     tp = coda_nc_lookup(VTOC(dvp), nm, len, cred);
1160     if (tp) {
1161 	if (VALID_VATTR(tp)) {	/* If attrs are cached */
1162 	    if (tp->c_vattr.va_nlink > 1) {	/* If it's a hard link */
1163 		tp->c_vattr.va_nlink--;
1164 	    }
1165 	}
1166 
1167 	coda_nc_zapfile(VTOC(dvp), nm, len);
1168 	/* No need to flush it if it doesn't exist! */
1169     }
1170     /* Invalidate the parent's attr cache, the modification time has changed */
1171     VTOC(dvp)->c_flags &= ~C_VATTR;
1172 
1173     /* Check for remove of control object. */
1174     if (IS_CTL_NAME(dvp, nm, len)) {
1175 	MARK_INT_FAIL(CODA_REMOVE_STATS);
1176 	return(ENOENT);
1177     }
1178 
1179     error = venus_remove(vtomi(dvp), &cp->c_fid, nm, len, cred, l);
1180 
1181     CODADEBUG(CODA_REMOVE, myprintf(("in remove result %d\n",error)); )
1182 
1183     /*
1184      * Unlock parent and child (avoiding double if ".").
1185      */
1186     if (dvp == vp) {
1187 	vrele(vp);
1188     } else {
1189 	vput(vp);
1190     }
1191     vput(dvp);
1192 
1193     return(error);
1194 }
1195 
1196 /*
1197  * dvp is the directory where the link is to go, and is locked.
1198  * vp is the object to be linked to, and is unlocked.
1199  * At exit, we must unlock dvp, and vput dvp.
1200  */
1201 int
1202 coda_link(void *v)
1203 {
1204 /* true args */
1205     struct vop_link_args *ap = v;
1206     vnode_t *vp = ap->a_vp;
1207     struct cnode *cp = VTOC(vp);
1208     vnode_t *dvp = ap->a_dvp;
1209     struct cnode *dcp = VTOC(dvp);
1210     struct componentname *cnp = ap->a_cnp;
1211     kauth_cred_t cred = cnp->cn_cred;
1212     struct lwp *l = curlwp;
1213 /* locals */
1214     int error;
1215     const char *nm = cnp->cn_nameptr;
1216     int len = cnp->cn_namelen;
1217 
1218     MARK_ENTRY(CODA_LINK_STATS);
1219 
1220     if (codadebug & CODADBGMSK(CODA_LINK)) {
1221 
1222 	myprintf(("%s: vp fid: %s\n", __func__, coda_f2s(&cp->c_fid)));
1223 	myprintf(("%s: dvp fid: %s)\n", __func__, coda_f2s(&dcp->c_fid)));
1224 
1225     }
1226     if (codadebug & CODADBGMSK(CODA_LINK)) {
1227 	myprintf(("%s: vp fid: %s\n", __func__, coda_f2s(&cp->c_fid)));
1228 	myprintf(("%s: dvp fid: %s\n", __func__, coda_f2s(&dcp->c_fid)));
1229 
1230     }
1231 
1232     /* Check for link to/from control object. */
1233     if (IS_CTL_NAME(dvp, nm, len) || IS_CTL_VP(vp)) {
1234 	MARK_INT_FAIL(CODA_LINK_STATS);
1235 	return(EACCES);
1236     }
1237 
1238     /* If linking . to a name, error out earlier. */
1239     if (vp == dvp) {
1240 #ifdef CODA_VERBOSE
1241         printf("%s coda_link vp==dvp\n", __func__);
1242 #endif
1243 	error = EISDIR;
1244 	goto exit;
1245     }
1246 
1247     /* XXX Why does venus_link need the vnode to be locked?*/
1248     if ((error = vn_lock(vp, LK_EXCLUSIVE)) != 0) {
1249 #ifdef CODA_VERBOSE
1250 	printf("%s: couldn't lock vnode %p\n", __func__, vp);
1251 #endif
1252 	error = EFAULT;		/* XXX better value */
1253 	goto exit;
1254     }
1255     error = venus_link(vtomi(vp), &cp->c_fid, &dcp->c_fid, nm, len, cred, l);
1256     VOP_UNLOCK(vp);
1257 
1258     /* Invalidate parent's attr cache (the modification time has changed). */
1259     VTOC(dvp)->c_flags &= ~C_VATTR;
1260     /* Invalidate child's attr cache (XXX why). */
1261     VTOC(vp)->c_flags &= ~C_VATTR;
1262 
1263     CODADEBUG(CODA_LINK,	myprintf(("in link result %d\n",error)); )
1264 
1265 exit:
1266     vput(dvp);
1267     return(error);
1268 }
1269 
1270 int
1271 coda_rename(void *v)
1272 {
1273 /* true args */
1274     struct vop_rename_args *ap = v;
1275     vnode_t *odvp = ap->a_fdvp;
1276     struct cnode *odcp = VTOC(odvp);
1277     struct componentname  *fcnp = ap->a_fcnp;
1278     vnode_t *ndvp = ap->a_tdvp;
1279     struct cnode *ndcp = VTOC(ndvp);
1280     struct componentname  *tcnp = ap->a_tcnp;
1281     kauth_cred_t cred = fcnp->cn_cred;
1282     struct lwp *l = curlwp;
1283 /* true args */
1284     int error;
1285     const char *fnm = fcnp->cn_nameptr;
1286     int flen = fcnp->cn_namelen;
1287     const char *tnm = tcnp->cn_nameptr;
1288     int tlen = tcnp->cn_namelen;
1289 
1290     MARK_ENTRY(CODA_RENAME_STATS);
1291 
1292     /* Hmmm.  The vnodes are already looked up.  Perhaps they are locked?
1293        This could be Bad. XXX */
1294 #ifdef OLD_DIAGNOSTIC
1295     if ((fcnp->cn_cred != tcnp->cn_cred)
1296 	|| (fcnp->cn_lwp != tcnp->cn_lwp))
1297     {
1298 	panic("%s: component names don't agree", __func__);
1299     }
1300 #endif
1301 
1302     /* Check for rename involving control object. */
1303     if (IS_CTL_NAME(odvp, fnm, flen) || IS_CTL_NAME(ndvp, tnm, tlen)) {
1304 	MARK_INT_FAIL(CODA_RENAME_STATS);
1305 	return(EACCES);
1306     }
1307 
1308     /* Problem with moving directories -- need to flush entry for .. */
1309     if (odvp != ndvp) {
1310 	struct cnode *ovcp = coda_nc_lookup(VTOC(odvp), fnm, flen, cred);
1311 	if (ovcp) {
1312 	    vnode_t *ovp = CTOV(ovcp);
1313 	    if ((ovp) &&
1314 		(ovp->v_type == VDIR)) /* If it's a directory */
1315 		coda_nc_zapfile(VTOC(ovp),"..", 2);
1316 	}
1317     }
1318 
1319     /* Remove the entries for both source and target files */
1320     coda_nc_zapfile(VTOC(odvp), fnm, flen);
1321     coda_nc_zapfile(VTOC(ndvp), tnm, tlen);
1322 
1323     /* Invalidate the parent's attr cache, the modification time has changed */
1324     VTOC(odvp)->c_flags &= ~C_VATTR;
1325     VTOC(ndvp)->c_flags &= ~C_VATTR;
1326 
1327     if (flen+1 > CODA_MAXNAMLEN) {
1328 	MARK_INT_FAIL(CODA_RENAME_STATS);
1329 	error = EINVAL;
1330 	goto exit;
1331     }
1332 
1333     if (tlen+1 > CODA_MAXNAMLEN) {
1334 	MARK_INT_FAIL(CODA_RENAME_STATS);
1335 	error = EINVAL;
1336 	goto exit;
1337     }
1338 
1339     error = venus_rename(vtomi(odvp), &odcp->c_fid, &ndcp->c_fid, fnm, flen, tnm, tlen, cred, l);
1340 
1341  exit:
1342     CODADEBUG(CODA_RENAME, myprintf(("in rename result %d\n",error));)
1343     /* XXX - do we need to call cache pureg on the moved vnode? */
1344     cache_purge(ap->a_fvp);
1345 
1346     /* It seems to be incumbent on us to drop locks on all four vnodes */
1347     /* From-vnodes are not locked, only ref'd.  To-vnodes are locked. */
1348 
1349     vrele(ap->a_fvp);
1350     vrele(odvp);
1351 
1352     if (ap->a_tvp) {
1353 	if (ap->a_tvp == ndvp) {
1354 	    vrele(ap->a_tvp);
1355 	} else {
1356 	    vput(ap->a_tvp);
1357 	}
1358     }
1359 
1360     vput(ndvp);
1361     return(error);
1362 }
1363 
1364 int
1365 coda_mkdir(void *v)
1366 {
1367 /* true args */
1368     struct vop_mkdir_args *ap = v;
1369     vnode_t *dvp = ap->a_dvp;
1370     struct cnode *dcp = VTOC(dvp);
1371     struct componentname  *cnp = ap->a_cnp;
1372     struct vattr *va = ap->a_vap;
1373     vnode_t **vpp = ap->a_vpp;
1374     kauth_cred_t cred = cnp->cn_cred;
1375     struct lwp *l = curlwp;
1376 /* locals */
1377     int error;
1378     const char *nm = cnp->cn_nameptr;
1379     int len = cnp->cn_namelen;
1380     struct cnode *cp;
1381     CodaFid VFid;
1382     struct vattr ova;
1383 
1384     MARK_ENTRY(CODA_MKDIR_STATS);
1385 
1386     /* Check for mkdir of target object. */
1387     if (IS_CTL_NAME(dvp, nm, len)) {
1388 	*vpp = (vnode_t *)0;
1389 	MARK_INT_FAIL(CODA_MKDIR_STATS);
1390 	return(EACCES);
1391     }
1392 
1393     if (len+1 > CODA_MAXNAMLEN) {
1394 	*vpp = (vnode_t *)0;
1395 	MARK_INT_FAIL(CODA_MKDIR_STATS);
1396 	return(EACCES);
1397     }
1398 
1399     error = venus_mkdir(vtomi(dvp), &dcp->c_fid, nm, len, va, cred, l, &VFid, &ova);
1400 
1401     if (!error) {
1402 	if (coda_find(&VFid) != NULL)
1403 	    panic("cnode existed for newly created directory!");
1404 
1405 
1406 	cp =  make_coda_node(&VFid, dvp->v_mount, va->va_type);
1407 	*vpp = CTOV(cp);
1408 
1409 	/* enter the new vnode in the Name Cache */
1410 	coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
1411 
1412 	/* as a side effect, enter "." and ".." for the directory */
1413 	coda_nc_enter(VTOC(*vpp), ".", 1, cred, VTOC(*vpp));
1414 	coda_nc_enter(VTOC(*vpp), "..", 2, cred, VTOC(dvp));
1415 
1416 	if (coda_attr_cache) {
1417 	    VTOC(*vpp)->c_vattr = ova;		/* update the attr cache */
1418 	    VTOC(*vpp)->c_flags |= C_VATTR;	/* Valid attributes in cnode */
1419 	}
1420 
1421 	/* Invalidate the parent's attr cache, the modification time has changed */
1422 	VTOC(dvp)->c_flags &= ~C_VATTR;
1423 
1424 	CODADEBUG( CODA_MKDIR, myprintf(("%s: %s result %d\n", __func__,
1425 	    coda_f2s(&VFid), error)); )
1426     } else {
1427 	*vpp = (vnode_t *)0;
1428 	CODADEBUG(CODA_MKDIR, myprintf(("%s error %d\n", __func__, error));)
1429     }
1430 
1431     /*
1432      * Currently, all mkdirs explicitly vput their dvp's.
1433      * It also appears that we *must* lock the vpp, since
1434      * lockleaf isn't set, but someone down the road is going
1435      * to try to unlock the new directory.
1436      */
1437     vput(dvp);
1438     if (!error) {
1439 	if ((error = vn_lock(*ap->a_vpp, LK_EXCLUSIVE))) {
1440 	    panic("%s: couldn't lock child", __func__);
1441 	}
1442     }
1443 
1444     return(error);
1445 }
1446 
1447 int
1448 coda_rmdir(void *v)
1449 {
1450 /* true args */
1451     struct vop_rmdir_args *ap = v;
1452     vnode_t *dvp = ap->a_dvp;
1453     struct cnode *dcp = VTOC(dvp);
1454     vnode_t *vp = ap->a_vp;
1455     struct componentname  *cnp = ap->a_cnp;
1456     kauth_cred_t cred = cnp->cn_cred;
1457     struct lwp *l = curlwp;
1458 /* true args */
1459     int error;
1460     const char *nm = cnp->cn_nameptr;
1461     int len = cnp->cn_namelen;
1462     struct cnode *cp;
1463 
1464     MARK_ENTRY(CODA_RMDIR_STATS);
1465 
1466     /* Check for rmdir of control object. */
1467     if (IS_CTL_NAME(dvp, nm, len)) {
1468 	MARK_INT_FAIL(CODA_RMDIR_STATS);
1469 	return(ENOENT);
1470     }
1471 
1472     /* Can't remove . in self. */
1473     if (dvp == vp) {
1474 #ifdef CODA_VERBOSE
1475 	printf("%s: dvp == vp\n", __func__);
1476 #endif
1477 	error = EINVAL;
1478 	goto exit;
1479     }
1480 
1481     /*
1482      * The caller may not have adequate permissions, and the venus
1483      * operation may fail, but it doesn't hurt from a correctness
1484      * viewpoint to invalidate cache entries.
1485      * XXX Why isn't this done after the venus_rmdir call?
1486      */
1487     /* Look up child in name cache (by name, from parent). */
1488     cp = coda_nc_lookup(dcp, nm, len, cred);
1489     /* If found, remove all children of the child (., ..). */
1490     if (cp) coda_nc_zapParentfid(&(cp->c_fid), NOT_DOWNCALL);
1491 
1492     /* Remove child's own entry. */
1493     coda_nc_zapfile(dcp, nm, len);
1494 
1495     /* Invalidate parent's attr cache (the modification time has changed). */
1496     dcp->c_flags &= ~C_VATTR;
1497 
1498     error = venus_rmdir(vtomi(dvp), &dcp->c_fid, nm, len, cred, l);
1499 
1500     CODADEBUG(CODA_RMDIR, myprintf(("in rmdir result %d\n", error)); )
1501 
1502 exit:
1503     /* vput both vnodes */
1504     vput(dvp);
1505     if (dvp == vp) {
1506 	vrele(vp);
1507     } else {
1508 	vput(vp);
1509     }
1510 
1511     return(error);
1512 }
1513 
1514 int
1515 coda_symlink(void *v)
1516 {
1517 /* true args */
1518     struct vop_symlink_args *ap = v;
1519     vnode_t *dvp = ap->a_dvp;
1520     struct cnode *dcp = VTOC(dvp);
1521     /* a_vpp is used in place below */
1522     struct componentname *cnp = ap->a_cnp;
1523     struct vattr *tva = ap->a_vap;
1524     char *path = ap->a_target;
1525     kauth_cred_t cred = cnp->cn_cred;
1526     struct lwp *l = curlwp;
1527 /* locals */
1528     int error;
1529     u_long saved_cn_flags;
1530     const char *nm = cnp->cn_nameptr;
1531     int len = cnp->cn_namelen;
1532     int plen = strlen(path);
1533 
1534     /*
1535      * Here's the strategy for the moment: perform the symlink, then
1536      * do a lookup to grab the resulting vnode.  I know this requires
1537      * two communications with Venus for a new sybolic link, but
1538      * that's the way the ball bounces.  I don't yet want to change
1539      * the way the Mach symlink works.  When Mach support is
1540      * deprecated, we should change symlink so that the common case
1541      * returns the resultant vnode in a vpp argument.
1542      */
1543 
1544     MARK_ENTRY(CODA_SYMLINK_STATS);
1545 
1546     /* Check for symlink of control object. */
1547     if (IS_CTL_NAME(dvp, nm, len)) {
1548 	MARK_INT_FAIL(CODA_SYMLINK_STATS);
1549 	error = EACCES;
1550 	goto exit;
1551     }
1552 
1553     if (plen+1 > CODA_MAXPATHLEN) {
1554 	MARK_INT_FAIL(CODA_SYMLINK_STATS);
1555 	error = EINVAL;
1556 	goto exit;
1557     }
1558 
1559     if (len+1 > CODA_MAXNAMLEN) {
1560 	MARK_INT_FAIL(CODA_SYMLINK_STATS);
1561 	error = EINVAL;
1562 	goto exit;
1563     }
1564 
1565     error = venus_symlink(vtomi(dvp), &dcp->c_fid, path, plen, nm, len, tva, cred, l);
1566 
1567     /* Invalidate the parent's attr cache (modification time has changed). */
1568     dcp->c_flags &= ~C_VATTR;
1569 
1570     if (!error) {
1571 	/*
1572 	 * VOP_SYMLINK is not defined to pay attention to cnp->cn_flags;
1573 	 * these are defined only for VOP_LOOKUP.   We desire to reuse
1574 	 * cnp for a VOP_LOOKUP operation, and must be sure to not pass
1575 	 * stray flags passed to us.  Such stray flags can occur because
1576 	 * sys_symlink makes a namei call and then reuses the
1577 	 * componentname structure.
1578 	 */
1579 	/*
1580 	 * XXX Arguably we should create our own componentname structure
1581 	 * and not reuse the one that was passed in.
1582 	 */
1583 	saved_cn_flags = cnp->cn_flags;
1584 	cnp->cn_flags &= ~(MODMASK | OPMASK);
1585 	cnp->cn_flags |= LOOKUP;
1586 	error = VOP_LOOKUP(dvp, ap->a_vpp, cnp);
1587 	cnp->cn_flags = saved_cn_flags;
1588 	/* Either an error occurs, or ap->a_vpp is locked. */
1589     }
1590 
1591  exit:
1592     /* unlock and deference parent */
1593     vput(dvp);
1594 
1595     CODADEBUG(CODA_SYMLINK, myprintf(("in symlink result %d\n",error)); )
1596     return(error);
1597 }
1598 
1599 /*
1600  * Read directory entries.
1601  */
1602 int
1603 coda_readdir(void *v)
1604 {
1605 /* true args */
1606     struct vop_readdir_args *ap = v;
1607     vnode_t *vp = ap->a_vp;
1608     struct cnode *cp = VTOC(vp);
1609     struct uio *uiop = ap->a_uio;
1610     kauth_cred_t cred = ap->a_cred;
1611     int *eofflag = ap->a_eofflag;
1612     off_t **cookies = ap->a_cookies;
1613     int *ncookies = ap->a_ncookies;
1614 /* upcall decl */
1615 /* locals */
1616     int error = 0;
1617 
1618     MARK_ENTRY(CODA_READDIR_STATS);
1619 
1620     CODADEBUG(CODA_READDIR, myprintf(("%s: (%p, %lu, %lld)\n", __func__,
1621 	uiop->uio_iov->iov_base, (unsigned long) uiop->uio_resid,
1622 	(long long) uiop->uio_offset)); )
1623 
1624     /* Check for readdir of control object. */
1625     if (IS_CTL_VP(vp)) {
1626 	MARK_INT_FAIL(CODA_READDIR_STATS);
1627 	return(ENOENT);
1628     }
1629 
1630     {
1631 	/* Redirect the request to UFS. */
1632 
1633 	/* If directory is not already open do an "internal open" on it. */
1634 	int opened_internally = 0;
1635 	if (cp->c_ovp == NULL) {
1636 	    opened_internally = 1;
1637 	    MARK_INT_GEN(CODA_OPEN_STATS);
1638 	    error = VOP_OPEN(vp, FREAD, cred);
1639 #ifdef	CODA_VERBOSE
1640 	    printf("%s: Internally Opening %p\n", __func__, vp);
1641 #endif
1642 	    if (error) return(error);
1643 	} else
1644 	    vp = cp->c_ovp;
1645 
1646 	/* Have UFS handle the call. */
1647 	CODADEBUG(CODA_READDIR, myprintf(("%s: fid = %s, refcnt = %d\n",
1648 	    __func__, coda_f2s(&cp->c_fid), vp->v_usecount)); )
1649 	error = VOP_READDIR(vp, uiop, cred, eofflag, cookies, ncookies);
1650 	if (error)
1651 	    MARK_INT_FAIL(CODA_READDIR_STATS);
1652 	else
1653 	    MARK_INT_SAT(CODA_READDIR_STATS);
1654 
1655 	/* Do an "internal close" if necessary. */
1656 	if (opened_internally) {
1657 	    MARK_INT_GEN(CODA_CLOSE_STATS);
1658 	    (void)VOP_CLOSE(vp, FREAD, cred);
1659 	}
1660     }
1661 
1662     return(error);
1663 }
1664 
1665 /*
1666  * Convert from file system blocks to device blocks
1667  */
1668 int
1669 coda_bmap(void *v)
1670 {
1671     /* XXX on the global proc */
1672 /* true args */
1673     struct vop_bmap_args *ap = v;
1674     vnode_t *vp __unused = ap->a_vp;	/* file's vnode */
1675     daddr_t bn __unused = ap->a_bn;	/* fs block number */
1676     vnode_t **vpp = ap->a_vpp;			/* RETURN vp of device */
1677     daddr_t *bnp __unused = ap->a_bnp;	/* RETURN device block number */
1678     struct lwp *l __unused = curlwp;
1679 /* upcall decl */
1680 /* locals */
1681 
1682 	*vpp = (vnode_t *)0;
1683 	myprintf(("coda_bmap called!\n"));
1684 	return(EINVAL);
1685 }
1686 
1687 /*
1688  * I don't think the following two things are used anywhere, so I've
1689  * commented them out
1690  *
1691  * struct buf *async_bufhead;
1692  * int async_daemon_count;
1693  */
1694 int
1695 coda_strategy(void *v)
1696 {
1697 /* true args */
1698     struct vop_strategy_args *ap = v;
1699     struct buf *bp __unused = ap->a_bp;
1700     struct lwp *l __unused = curlwp;
1701 /* upcall decl */
1702 /* locals */
1703 
1704 	myprintf(("coda_strategy called!  "));
1705 	return(EINVAL);
1706 }
1707 
1708 int
1709 coda_reclaim(void *v)
1710 {
1711 /* true args */
1712     struct vop_reclaim_args *ap = v;
1713     vnode_t *vp = ap->a_vp;
1714     struct cnode *cp = VTOC(vp);
1715 /* upcall decl */
1716 /* locals */
1717 
1718 /*
1719  * Forced unmount/flush will let vnodes with non zero use be destroyed!
1720  */
1721     ENTRY;
1722 
1723     if (IS_UNMOUNTING(cp)) {
1724 #ifdef	DEBUG
1725 	if (VTOC(vp)->c_ovp) {
1726 	    if (IS_UNMOUNTING(cp))
1727 		printf("%s: c_ovp not void: vp %p, cp %p\n", __func__, vp, cp);
1728 	}
1729 #endif
1730     } else {
1731 #ifdef OLD_DIAGNOSTIC
1732 	if (vp->v_usecount != 0)
1733 	    print("%s: pushing active %p\n", __func__, vp);
1734 	if (VTOC(vp)->c_ovp) {
1735 	    panic("%s: c_ovp not void", __func__);
1736 	}
1737 #endif
1738     }
1739     coda_free(VTOC(vp));
1740     SET_VTOC(vp) = NULL;
1741     return (0);
1742 }
1743 
1744 int
1745 coda_lock(void *v)
1746 {
1747 /* true args */
1748     struct vop_lock_args *ap = v;
1749     vnode_t *vp = ap->a_vp;
1750     struct cnode *cp = VTOC(vp);
1751 /* upcall decl */
1752 /* locals */
1753 
1754     ENTRY;
1755 
1756     if (coda_lockdebug) {
1757 	myprintf(("Attempting lock on %s\n",
1758 		  coda_f2s(&cp->c_fid)));
1759     }
1760 
1761     return genfs_lock(v);
1762 }
1763 
1764 int
1765 coda_unlock(void *v)
1766 {
1767 /* true args */
1768     struct vop_unlock_args *ap = v;
1769     vnode_t *vp = ap->a_vp;
1770     struct cnode *cp = VTOC(vp);
1771 /* upcall decl */
1772 /* locals */
1773 
1774     ENTRY;
1775     if (coda_lockdebug) {
1776 	myprintf(("Attempting unlock on %s\n",
1777 		  coda_f2s(&cp->c_fid)));
1778     }
1779 
1780     return genfs_unlock(v);
1781 }
1782 
1783 int
1784 coda_islocked(void *v)
1785 {
1786 /* true args */
1787     ENTRY;
1788 
1789     return genfs_islocked(v);
1790 }
1791 
1792 /*
1793  * Given a device and inode, obtain a locked vnode.  One reference is
1794  * obtained and passed back to the caller.
1795  */
1796 int
1797 coda_grab_vnode(vnode_t *uvp, dev_t dev, ino_t ino, vnode_t **vpp)
1798 {
1799     int           error;
1800     struct mount *mp;
1801 
1802     /* Obtain mount point structure from device. */
1803     if (!(mp = devtomp(dev))) {
1804 	myprintf(("%s: devtomp(0x%llx) returns NULL\n", __func__,
1805 	    (unsigned long long)dev));
1806 	return(ENXIO);
1807     }
1808 
1809     /*
1810      * Obtain vnode from mount point and inode.
1811      * XXX VFS_VGET does not clearly define locked/referenced state of
1812      * returned vnode.
1813      */
1814     error = VFS_VGET(mp, ino, vpp);
1815     if (error) {
1816 	myprintf(("%s: iget/vget(0x%llx, %llu) returns %p, err %d\n", __func__,
1817 	    (unsigned long long)dev, (unsigned long long)ino, *vpp, error));
1818 	return(ENOENT);
1819     }
1820     /* share the underlying vnode lock with the coda vnode */
1821     mutex_obj_hold((*vpp)->v_interlock);
1822     uvm_obj_setlock(&uvp->v_uobj, (*vpp)->v_interlock);
1823     if (!VOP_ISLOCKED(*vpp))
1824 	VOP_LOCK(*vpp, LK_EXCLUSIVE);
1825     return(0);
1826 }
1827 
1828 static void
1829 coda_print_vattr(struct vattr *attr)
1830 {
1831     const char *typestr;
1832 
1833     switch (attr->va_type) {
1834     case VNON:
1835 	typestr = "VNON";
1836 	break;
1837     case VREG:
1838 	typestr = "VREG";
1839 	break;
1840     case VDIR:
1841 	typestr = "VDIR";
1842 	break;
1843     case VBLK:
1844 	typestr = "VBLK";
1845 	break;
1846     case VCHR:
1847 	typestr = "VCHR";
1848 	break;
1849     case VLNK:
1850 	typestr = "VLNK";
1851 	break;
1852     case VSOCK:
1853 	typestr = "VSCK";
1854 	break;
1855     case VFIFO:
1856 	typestr = "VFFO";
1857 	break;
1858     case VBAD:
1859 	typestr = "VBAD";
1860 	break;
1861     default:
1862 	typestr = "????";
1863 	break;
1864     }
1865 
1866 
1867     myprintf(("attr: type %s mode %d uid %d gid %d fsid %d rdev %d\n",
1868 	      typestr, (int)attr->va_mode, (int)attr->va_uid,
1869 	      (int)attr->va_gid, (int)attr->va_fsid, (int)attr->va_rdev));
1870 
1871     myprintf(("      fileid %d nlink %d size %d blocksize %d bytes %d\n",
1872 	      (int)attr->va_fileid, (int)attr->va_nlink,
1873 	      (int)attr->va_size,
1874 	      (int)attr->va_blocksize,(int)attr->va_bytes));
1875     myprintf(("      gen %ld flags %ld vaflags %d\n",
1876 	      attr->va_gen, attr->va_flags, attr->va_vaflags));
1877     myprintf(("      atime sec %d nsec %d\n",
1878 	      (int)attr->va_atime.tv_sec, (int)attr->va_atime.tv_nsec));
1879     myprintf(("      mtime sec %d nsec %d\n",
1880 	      (int)attr->va_mtime.tv_sec, (int)attr->va_mtime.tv_nsec));
1881     myprintf(("      ctime sec %d nsec %d\n",
1882 	      (int)attr->va_ctime.tv_sec, (int)attr->va_ctime.tv_nsec));
1883 }
1884 
1885 /*
1886  * Return a vnode for the given fid.
1887  * If no cnode exists for this fid create one and put it
1888  * in a table hashed by coda_f2i().  If the cnode for
1889  * this fid is already in the table return it (ref count is
1890  * incremented by coda_find.  The cnode will be flushed from the
1891  * table when coda_inactive calls coda_unsave.
1892  */
1893 struct cnode *
1894 make_coda_node(CodaFid *fid, struct mount *fvsp, short type)
1895 {
1896     struct cnode *cp;
1897     int          error;
1898 
1899     if ((cp = coda_find(fid)) == NULL) {
1900 	vnode_t *vp;
1901 
1902 	cp = coda_alloc();
1903 	cp->c_fid = *fid;
1904 
1905 	error = getnewvnode(VT_CODA, fvsp, coda_vnodeop_p, NULL, &vp);
1906 	if (error) {
1907 	    panic("%s: getnewvnode returned error %d", __func__, error);
1908 	}
1909 	vp->v_data = cp;
1910 	vp->v_type = type;
1911 	cp->c_vnode = vp;
1912 	uvm_vnp_setsize(vp, 0);
1913 	coda_save(cp);
1914 
1915     } else {
1916 	vref(CTOV(cp));
1917     }
1918 
1919     return cp;
1920 }
1921 
1922 /*
1923  * coda_getpages may be called on a vnode which has not been opened,
1924  * e.g. to fault in pages to execute a program.  In that case, we must
1925  * open the file to get the container.  The vnode may or may not be
1926  * locked, and we must leave it in the same state.
1927  */
1928 int
1929 coda_getpages(void *v)
1930 {
1931 	struct vop_getpages_args /* {
1932 		vnode_t *a_vp;
1933 		voff_t a_offset;
1934 		struct vm_page **a_m;
1935 		int *a_count;
1936 		int a_centeridx;
1937 		vm_prot_t a_access_type;
1938 		int a_advice;
1939 		int a_flags;
1940 	} */ *ap = v;
1941 	vnode_t *vp = ap->a_vp, *cvp;
1942 	struct cnode *cp = VTOC(vp);
1943 	struct lwp *l = curlwp;
1944 	kauth_cred_t cred = l->l_cred;
1945 	int error, cerror;
1946 	int waslocked;	       /* 1 if vnode lock was held on entry */
1947 	int didopen = 0;	/* 1 if we opened container file */
1948 
1949 	/*
1950 	 * Handle a case that uvm_fault doesn't quite use yet.
1951 	 * See layer_vnops.c. for inspiration.
1952 	 */
1953 	if (ap->a_flags & PGO_LOCKED) {
1954 		return EBUSY;
1955 	}
1956 
1957 	KASSERT(mutex_owned(vp->v_interlock));
1958 
1959 	/* Check for control object. */
1960 	if (IS_CTL_VP(vp)) {
1961 #ifdef CODA_VERBOSE
1962 		printf("%s: control object %p\n", __func__, vp);
1963 #endif
1964 		return(EINVAL);
1965 	}
1966 
1967 	/*
1968 	 * XXX It's really not ok to be releasing the lock we get,
1969 	 * because we could be overlapping with another call to
1970 	 * getpages and drop a lock they are relying on.  We need to
1971 	 * figure out whether getpages ever is called holding the
1972 	 * lock, and if we should serialize getpages calls by some
1973 	 * mechanism.
1974 	 */
1975 	/* XXX VOP_ISLOCKED() may not be used for lock decisions. */
1976 	waslocked = VOP_ISLOCKED(vp);
1977 
1978 	/* Get container file if not already present. */
1979 	cvp = cp->c_ovp;
1980 	if (cvp == NULL) {
1981 		/*
1982 		 * VOP_OPEN requires a locked vnode.  We must avoid
1983 		 * locking the vnode if it is already locked, and
1984 		 * leave it in the same state on exit.
1985 		 */
1986 		if (waslocked == 0) {
1987 			mutex_exit(vp->v_interlock);
1988 			cerror = vn_lock(vp, LK_EXCLUSIVE);
1989 			if (cerror) {
1990 #ifdef CODA_VERBOSE
1991 				printf("%s: can't lock vnode %p\n",
1992 				    __func__, vp);
1993 #endif
1994 				return cerror;
1995 			}
1996 #ifdef CODA_VERBOSE
1997 			printf("%s: locked vnode %p\n", __func__, vp);
1998 #endif
1999 		}
2000 
2001 		/*
2002 		 * Open file (causes upcall to venus).
2003 		 * XXX Perhaps we should not fully open the file, but
2004 		 * simply obtain a container file.
2005 		 */
2006 		/* XXX Is it ok to do this while holding the simplelock? */
2007 		cerror = VOP_OPEN(vp, FREAD, cred);
2008 
2009 		if (cerror) {
2010 #ifdef CODA_VERBOSE
2011 			printf("%s: cannot open vnode %p => %d\n", __func__,
2012 			    vp, cerror);
2013 #endif
2014 			if (waslocked == 0)
2015 				VOP_UNLOCK(vp);
2016 			return cerror;
2017 		}
2018 
2019 #ifdef CODA_VERBOSE
2020 		printf("%s: opened vnode %p\n", __func__, vp);
2021 #endif
2022 		cvp = cp->c_ovp;
2023 		didopen = 1;
2024 		if (waslocked == 0)
2025 			mutex_enter(vp->v_interlock);
2026 	}
2027 	KASSERT(cvp != NULL);
2028 
2029 	/* Munge the arg structure to refer to the container vnode. */
2030 	KASSERT(cvp->v_interlock == vp->v_interlock);
2031 	ap->a_vp = cp->c_ovp;
2032 
2033 	/* Finally, call getpages on it. */
2034 	error = VCALL(ap->a_vp, VOFFSET(vop_getpages), ap);
2035 
2036 	/* If we opened the vnode, we must close it. */
2037 	if (didopen) {
2038 		/*
2039 		 * VOP_CLOSE requires a locked vnode, but we are still
2040 		 * holding the lock (or riding a caller's lock).
2041 		 */
2042 		cerror = VOP_CLOSE(vp, FREAD, cred);
2043 #ifdef CODA_VERBOSE
2044 		if (cerror != 0)
2045 			/* XXX How should we handle this? */
2046 			printf("%s: closed vnode %p -> %d\n", __func__,
2047 			    vp, cerror);
2048 #endif
2049 
2050 		/* If we obtained a lock, drop it. */
2051 		if (waslocked == 0)
2052 			VOP_UNLOCK(vp);
2053 	}
2054 
2055 	return error;
2056 }
2057 
2058 /*
2059  * The protocol requires v_interlock to be held by the caller.
2060  */
2061 int
2062 coda_putpages(void *v)
2063 {
2064 	struct vop_putpages_args /* {
2065 		vnode_t *a_vp;
2066 		voff_t a_offlo;
2067 		voff_t a_offhi;
2068 		int a_flags;
2069 	} */ *ap = v;
2070 	vnode_t *vp = ap->a_vp, *cvp;
2071 	struct cnode *cp = VTOC(vp);
2072 	int error;
2073 
2074 	KASSERT(mutex_owned(vp->v_interlock));
2075 
2076 	/* Check for control object. */
2077 	if (IS_CTL_VP(vp)) {
2078 		mutex_exit(vp->v_interlock);
2079 #ifdef CODA_VERBOSE
2080 		printf("%s: control object %p\n", __func__, vp);
2081 #endif
2082 		return(EINVAL);
2083 	}
2084 
2085 	/*
2086 	 * If container object is not present, then there are no pages
2087 	 * to put; just return without error.  This happens all the
2088 	 * time, apparently during discard of a closed vnode (which
2089 	 * trivially can't have dirty pages).
2090 	 */
2091 	cvp = cp->c_ovp;
2092 	if (cvp == NULL) {
2093 		mutex_exit(vp->v_interlock);
2094 		return 0;
2095 	}
2096 
2097 	/* Munge the arg structure to refer to the container vnode. */
2098 	KASSERT(cvp->v_interlock == vp->v_interlock);
2099 	ap->a_vp = cvp;
2100 
2101 	/* Finally, call putpages on it. */
2102 	error = VCALL(ap->a_vp, VOFFSET(vop_putpages), ap);
2103 
2104 	return error;
2105 }
2106