xref: /netbsd-src/sys/coda/coda_vnops.c (revision b7b7574d3bf8eeb51a1fa3977b59142ec6434a55)
1 /*	$NetBSD: coda_vnops.c,v 1.96 2014/03/20 06:48:54 skrll Exp $	*/
2 
3 /*
4  *
5  *             Coda: an Experimental Distributed File System
6  *                              Release 3.1
7  *
8  *           Copyright (c) 1987-1998 Carnegie Mellon University
9  *                          All Rights Reserved
10  *
11  * Permission  to  use, copy, modify and distribute this software and its
12  * documentation is hereby granted,  provided  that  both  the  copyright
13  * notice  and  this  permission  notice  appear  in  all  copies  of the
14  * software, derivative works or  modified  versions,  and  any  portions
15  * thereof, and that both notices appear in supporting documentation, and
16  * that credit is given to Carnegie Mellon University  in  all  documents
17  * and publicity pertaining to direct or indirect use of this code or its
18  * derivatives.
19  *
20  * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS  KNOWN  TO  HAVE  BUGS,
21  * SOME  OF  WHICH MAY HAVE SERIOUS CONSEQUENCES.  CARNEGIE MELLON ALLOWS
22  * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION.   CARNEGIE  MELLON
23  * DISCLAIMS  ANY  LIABILITY  OF  ANY  KIND  FOR  ANY  DAMAGES WHATSOEVER
24  * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE  OR  OF
25  * ANY DERIVATIVE WORK.
26  *
27  * Carnegie  Mellon  encourages  users  of  this  software  to return any
28  * improvements or extensions that  they  make,  and  to  grant  Carnegie
29  * Mellon the rights to redistribute these changes without encumbrance.
30  *
31  * 	@(#) coda/coda_vnops.c,v 1.1.1.1 1998/08/29 21:26:46 rvb Exp $
32  */
33 
34 /*
35  * Mach Operating System
36  * Copyright (c) 1990 Carnegie-Mellon University
37  * Copyright (c) 1989 Carnegie-Mellon University
38  * All rights reserved.  The CMU software License Agreement specifies
39  * the terms and conditions for use and redistribution.
40  */
41 
42 /*
43  * This code was written for the Coda file system at Carnegie Mellon
44  * University.  Contributers include David Steere, James Kistler, and
45  * M. Satyanarayanan.
46  */
47 
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: coda_vnops.c,v 1.96 2014/03/20 06:48:54 skrll Exp $");
50 
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/malloc.h>
54 #include <sys/errno.h>
55 #include <sys/acct.h>
56 #include <sys/file.h>
57 #include <sys/uio.h>
58 #include <sys/namei.h>
59 #include <sys/ioctl.h>
60 #include <sys/mount.h>
61 #include <sys/proc.h>
62 #include <sys/select.h>
63 #include <sys/vnode.h>
64 #include <sys/kauth.h>
65 
66 #include <miscfs/genfs/genfs.h>
67 
68 #include <coda/coda.h>
69 #include <coda/cnode.h>
70 #include <coda/coda_vnops.h>
71 #include <coda/coda_venus.h>
72 #include <coda/coda_opstats.h>
73 #include <coda/coda_subr.h>
74 #include <coda/coda_namecache.h>
75 #include <coda/coda_pioctl.h>
76 
77 /*
78  * These flags select various performance enhancements.
79  */
80 int coda_attr_cache  = 1;       /* Set to cache attributes in the kernel */
81 int coda_symlink_cache = 1;     /* Set to cache symbolic link information */
82 int coda_access_cache = 1;      /* Set to handle some access checks directly */
83 
84 /* structure to keep track of vfs calls */
85 
86 struct coda_op_stats coda_vnodeopstats[CODA_VNODEOPS_SIZE];
87 
88 #define MARK_ENTRY(op) (coda_vnodeopstats[op].entries++)
89 #define MARK_INT_SAT(op) (coda_vnodeopstats[op].sat_intrn++)
90 #define MARK_INT_FAIL(op) (coda_vnodeopstats[op].unsat_intrn++)
91 #define MARK_INT_GEN(op) (coda_vnodeopstats[op].gen_intrn++)
92 
93 /* What we are delaying for in printf */
94 static int coda_lockdebug = 0;
95 
96 #define ENTRY if(coda_vnop_print_entry) myprintf(("Entered %s\n",__func__))
97 
98 /* Definition of the vnode operation vector */
99 
100 const struct vnodeopv_entry_desc coda_vnodeop_entries[] = {
101     { &vop_default_desc, coda_vop_error },
102     { &vop_lookup_desc, coda_lookup },		/* lookup */
103     { &vop_create_desc, coda_create },		/* create */
104     { &vop_mknod_desc, coda_vop_error },	/* mknod */
105     { &vop_open_desc, coda_open },		/* open */
106     { &vop_close_desc, coda_close },		/* close */
107     { &vop_access_desc, coda_access },		/* access */
108     { &vop_getattr_desc, coda_getattr },	/* getattr */
109     { &vop_setattr_desc, coda_setattr },	/* setattr */
110     { &vop_read_desc, coda_read },		/* read */
111     { &vop_write_desc, coda_write },		/* write */
112     { &vop_fcntl_desc, genfs_fcntl },		/* fcntl */
113     { &vop_ioctl_desc, coda_ioctl },		/* ioctl */
114     { &vop_mmap_desc, genfs_mmap },		/* mmap */
115     { &vop_fsync_desc, coda_fsync },		/* fsync */
116     { &vop_remove_desc, coda_remove },		/* remove */
117     { &vop_link_desc, coda_link },		/* link */
118     { &vop_rename_desc, coda_rename },		/* rename */
119     { &vop_mkdir_desc, coda_mkdir },		/* mkdir */
120     { &vop_rmdir_desc, coda_rmdir },		/* rmdir */
121     { &vop_symlink_desc, coda_symlink },	/* symlink */
122     { &vop_readdir_desc, coda_readdir },	/* readdir */
123     { &vop_readlink_desc, coda_readlink },	/* readlink */
124     { &vop_abortop_desc, coda_abortop },	/* abortop */
125     { &vop_inactive_desc, coda_inactive },	/* inactive */
126     { &vop_reclaim_desc, coda_reclaim },	/* reclaim */
127     { &vop_lock_desc, coda_lock },		/* lock */
128     { &vop_unlock_desc, coda_unlock },		/* unlock */
129     { &vop_bmap_desc, coda_bmap },		/* bmap */
130     { &vop_strategy_desc, coda_strategy },	/* strategy */
131     { &vop_print_desc, coda_vop_error },	/* print */
132     { &vop_islocked_desc, coda_islocked },	/* islocked */
133     { &vop_pathconf_desc, coda_vop_error },	/* pathconf */
134     { &vop_advlock_desc, coda_vop_nop },	/* advlock */
135     { &vop_bwrite_desc, coda_vop_error },	/* bwrite */
136     { &vop_seek_desc, genfs_seek },		/* seek */
137     { &vop_poll_desc, genfs_poll },		/* poll */
138     { &vop_getpages_desc, coda_getpages },	/* getpages */
139     { &vop_putpages_desc, coda_putpages },	/* putpages */
140     { NULL, NULL }
141 };
142 
143 static void coda_print_vattr(struct vattr *);
144 
145 int (**coda_vnodeop_p)(void *);
146 const struct vnodeopv_desc coda_vnodeop_opv_desc =
147         { &coda_vnodeop_p, coda_vnodeop_entries };
148 
149 /* Definitions of NetBSD vnodeop interfaces */
150 
151 /*
152  * A generic error routine.  Return EIO without looking at arguments.
153  */
154 int
155 coda_vop_error(void *anon) {
156     struct vnodeop_desc **desc = (struct vnodeop_desc **)anon;
157 
158     if (codadebug) {
159 	myprintf(("%s: Vnode operation %s called (error).\n",
160 	    __func__, (*desc)->vdesc_name));
161     }
162 
163     return EIO;
164 }
165 
166 /* A generic do-nothing. */
167 int
168 coda_vop_nop(void *anon) {
169     struct vnodeop_desc **desc = (struct vnodeop_desc **)anon;
170 
171     if (codadebug) {
172 	myprintf(("Vnode operation %s called, but unsupported\n",
173 		  (*desc)->vdesc_name));
174     }
175    return (0);
176 }
177 
178 int
179 coda_vnodeopstats_init(void)
180 {
181 	int i;
182 
183 	for(i=0;i<CODA_VNODEOPS_SIZE;i++) {
184 		coda_vnodeopstats[i].opcode = i;
185 		coda_vnodeopstats[i].entries = 0;
186 		coda_vnodeopstats[i].sat_intrn = 0;
187 		coda_vnodeopstats[i].unsat_intrn = 0;
188 		coda_vnodeopstats[i].gen_intrn = 0;
189 	}
190 
191 	return 0;
192 }
193 
194 /*
195  * XXX The entire relationship between VOP_OPEN and having a container
196  * file (via venus_open) needs to be reexamined.  In particular, it's
197  * valid to open/mmap/close and then reference.  Instead of doing
198  * VOP_OPEN when getpages needs a container, we should do the
199  * venus_open part, and record that the vnode has opened the container
200  * for getpages, and do the matching logical close on coda_inactive.
201  * Further, coda_rdwr needs a container file, and sometimes needs to
202  * do the equivalent of open (core dumps).
203  */
204 /*
205  * coda_open calls Venus to return the device and inode of the
206  * container file, and then obtains a vnode for that file.  The
207  * container vnode is stored in the coda vnode, and a reference is
208  * added for each open file.
209  */
210 int
211 coda_open(void *v)
212 {
213     /*
214      * NetBSD can pass the O_EXCL flag in mode, even though the check
215      * has already happened.  Venus defensively assumes that if open
216      * is passed the EXCL, it must be a bug.  We strip the flag here.
217      */
218 /* true args */
219     struct vop_open_args *ap = v;
220     vnode_t *vp = ap->a_vp;
221     struct cnode *cp = VTOC(vp);
222     int flag = ap->a_mode & (~O_EXCL);
223     kauth_cred_t cred = ap->a_cred;
224 /* locals */
225     int error;
226     dev_t dev;			/* container file device, inode, vnode */
227     ino_t inode;
228     vnode_t *container_vp;
229 
230     MARK_ENTRY(CODA_OPEN_STATS);
231 
232     KASSERT(VOP_ISLOCKED(vp));
233     /* Check for open of control file. */
234     if (IS_CTL_VP(vp)) {
235 	/* if (WRITABLE(flag)) */
236 	if (flag & (FWRITE | O_TRUNC | O_CREAT | O_EXCL)) {
237 	    MARK_INT_FAIL(CODA_OPEN_STATS);
238 	    return(EACCES);
239 	}
240 	MARK_INT_SAT(CODA_OPEN_STATS);
241 	return(0);
242     }
243 
244     error = venus_open(vtomi(vp), &cp->c_fid, flag, cred, curlwp, &dev, &inode);
245     if (error)
246 	return (error);
247     if (!error) {
248 	    CODADEBUG(CODA_OPEN, myprintf((
249 		"%s: dev 0x%llx inode %llu result %d\n", __func__,
250 		(unsigned long long)dev, (unsigned long long)inode, error));)
251     }
252 
253     /*
254      * Obtain locked and referenced container vnode from container
255      * device/inode.
256      */
257     error = coda_grab_vnode(vp, dev, inode, &container_vp);
258     if (error)
259 	return (error);
260 
261     /* Save the vnode pointer for the container file. */
262     if (cp->c_ovp == NULL) {
263 	cp->c_ovp = container_vp;
264     } else {
265 	if (cp->c_ovp != container_vp)
266 	    /*
267 	     * Perhaps venus returned a different container, or
268 	     * something else went wrong.
269 	     */
270 	    panic("%s: cp->c_ovp != container_vp", __func__);
271     }
272     cp->c_ocount++;
273 
274     /* Flush the attribute cache if writing the file. */
275     if (flag & FWRITE) {
276 	cp->c_owrite++;
277 	cp->c_flags &= ~C_VATTR;
278     }
279 
280     /*
281      * Save the <device, inode> pair for the container file to speed
282      * up subsequent reads while closed (mmap, program execution).
283      * This is perhaps safe because venus will invalidate the node
284      * before changing the container file mapping.
285      */
286     cp->c_device = dev;
287     cp->c_inode = inode;
288 
289     /* Open the container file. */
290     error = VOP_OPEN(container_vp, flag, cred);
291     /*
292      * Drop the lock on the container, after we have done VOP_OPEN
293      * (which requires a locked vnode).
294      */
295     VOP_UNLOCK(container_vp);
296     return(error);
297 }
298 
299 /*
300  * Close the cache file used for I/O and notify Venus.
301  */
302 int
303 coda_close(void *v)
304 {
305 /* true args */
306     struct vop_close_args *ap = v;
307     vnode_t *vp = ap->a_vp;
308     struct cnode *cp = VTOC(vp);
309     int flag = ap->a_fflag;
310     kauth_cred_t cred = ap->a_cred;
311 /* locals */
312     int error;
313 
314     MARK_ENTRY(CODA_CLOSE_STATS);
315 
316     /* Check for close of control file. */
317     if (IS_CTL_VP(vp)) {
318 	MARK_INT_SAT(CODA_CLOSE_STATS);
319 	return(0);
320     }
321 
322     /*
323      * XXX The IS_UNMOUNTING part of this is very suspect.
324      */
325     if (IS_UNMOUNTING(cp)) {
326 	if (cp->c_ovp) {
327 #ifdef	CODA_VERBOSE
328 	    printf("%s: destroying container %d, ufs vp %p of vp %p/cp %p\n",
329 		__func__, vp->v_usecount, cp->c_ovp, vp, cp);
330 #endif
331 #ifdef	hmm
332 	    vgone(cp->c_ovp);
333 #else
334 	    vn_lock(cp->c_ovp, LK_EXCLUSIVE | LK_RETRY);
335 	    VOP_CLOSE(cp->c_ovp, flag, cred); /* Do errors matter here? */
336 	    vput(cp->c_ovp);
337 #endif
338 	} else {
339 #ifdef	CODA_VERBOSE
340 	    printf("%s: NO container vp %p/cp %p\n", __func__, vp, cp);
341 #endif
342 	}
343 	return ENODEV;
344     }
345 
346     /* Lock the container node, and VOP_CLOSE it. */
347     vn_lock(cp->c_ovp, LK_EXCLUSIVE | LK_RETRY);
348     VOP_CLOSE(cp->c_ovp, flag, cred); /* Do errors matter here? */
349     /*
350      * Drop the lock we just obtained, and vrele the container vnode.
351      * Decrement reference counts, and clear container vnode pointer on
352      * last close.
353      */
354     vput(cp->c_ovp);
355     if (flag & FWRITE)
356 	--cp->c_owrite;
357     if (--cp->c_ocount == 0)
358 	cp->c_ovp = NULL;
359 
360     error = venus_close(vtomi(vp), &cp->c_fid, flag, cred, curlwp);
361 
362     CODADEBUG(CODA_CLOSE, myprintf(("%s: result %d\n", __func__, error)); )
363     return(error);
364 }
365 
366 int
367 coda_read(void *v)
368 {
369     struct vop_read_args *ap = v;
370 
371     ENTRY;
372     return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_READ,
373 		    ap->a_ioflag, ap->a_cred, curlwp));
374 }
375 
376 int
377 coda_write(void *v)
378 {
379     struct vop_write_args *ap = v;
380 
381     ENTRY;
382     return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_WRITE,
383 		    ap->a_ioflag, ap->a_cred, curlwp));
384 }
385 
386 int
387 coda_rdwr(vnode_t *vp, struct uio *uiop, enum uio_rw rw, int ioflag,
388 	kauth_cred_t cred, struct lwp *l)
389 {
390 /* upcall decl */
391   /* NOTE: container file operation!!! */
392 /* locals */
393     struct cnode *cp = VTOC(vp);
394     vnode_t *cfvp = cp->c_ovp;
395     struct proc *p = l->l_proc;
396     int opened_internally = 0;
397     int error = 0;
398 
399     MARK_ENTRY(CODA_RDWR_STATS);
400 
401     CODADEBUG(CODA_RDWR, myprintf(("coda_rdwr(%d, %p, %lu, %lld)\n", rw,
402 	uiop->uio_iov->iov_base, (unsigned long) uiop->uio_resid,
403 	(long long) uiop->uio_offset)); )
404 
405     /* Check for rdwr of control object. */
406     if (IS_CTL_VP(vp)) {
407 	MARK_INT_FAIL(CODA_RDWR_STATS);
408 	return(EINVAL);
409     }
410 
411     /* Redirect the request to UFS. */
412 
413     /*
414      * If file is not already open this must be a page
415      * {read,write} request.  Iget the cache file's inode
416      * pointer if we still have its <device, inode> pair.
417      * Otherwise, we must do an internal open to derive the
418      * pair.
419      * XXX Integrate this into a coherent strategy for container
420      * file acquisition.
421      */
422     if (cfvp == NULL) {
423 	/*
424 	 * If we're dumping core, do the internal open. Otherwise
425 	 * venus won't have the correct size of the core when
426 	 * it's completely written.
427 	 */
428 	if (cp->c_inode != 0 && !(p && (p->p_acflag & ACORE))) {
429 #ifdef CODA_VERBOSE
430 	    printf("%s: grabbing container vnode, losing reference\n",
431 		__func__);
432 #endif
433 	    /* Get locked and refed vnode. */
434 	    error = coda_grab_vnode(vp, cp->c_device, cp->c_inode, &cfvp);
435 	    if (error) {
436 		MARK_INT_FAIL(CODA_RDWR_STATS);
437 		return(error);
438 	    }
439 	    /*
440 	     * Drop lock.
441 	     * XXX Where is reference released.
442 	     */
443 	    VOP_UNLOCK(cfvp);
444 	}
445 	else {
446 #ifdef CODA_VERBOSE
447 	    printf("%s: internal VOP_OPEN\n", __func__);
448 #endif
449 	    opened_internally = 1;
450 	    MARK_INT_GEN(CODA_OPEN_STATS);
451 	    error = VOP_OPEN(vp, (rw == UIO_READ ? FREAD : FWRITE), cred);
452 #ifdef	CODA_VERBOSE
453 	    printf("%s: Internally Opening %p\n", __func__, vp);
454 #endif
455 	    if (error) {
456 		MARK_INT_FAIL(CODA_RDWR_STATS);
457 		return(error);
458 	    }
459 	    cfvp = cp->c_ovp;
460 	}
461     }
462 
463     /* Have UFS handle the call. */
464     CODADEBUG(CODA_RDWR, myprintf(("%s: fid = %s, refcnt = %d\n", __func__,
465 	coda_f2s(&cp->c_fid), CTOV(cp)->v_usecount)); )
466 
467     if (rw == UIO_READ) {
468 	error = VOP_READ(cfvp, uiop, ioflag, cred);
469     } else {
470 	error = VOP_WRITE(cfvp, uiop, ioflag, cred);
471     }
472 
473     if (error)
474 	MARK_INT_FAIL(CODA_RDWR_STATS);
475     else
476 	MARK_INT_SAT(CODA_RDWR_STATS);
477 
478     /* Do an internal close if necessary. */
479     if (opened_internally) {
480 	MARK_INT_GEN(CODA_CLOSE_STATS);
481 	(void)VOP_CLOSE(vp, (rw == UIO_READ ? FREAD : FWRITE), cred);
482     }
483 
484     /* Invalidate cached attributes if writing. */
485     if (rw == UIO_WRITE)
486 	cp->c_flags &= ~C_VATTR;
487     return(error);
488 }
489 
490 int
491 coda_ioctl(void *v)
492 {
493 /* true args */
494     struct vop_ioctl_args *ap = v;
495     vnode_t *vp = ap->a_vp;
496     int com = ap->a_command;
497     void *data = ap->a_data;
498     int flag = ap->a_fflag;
499     kauth_cred_t cred = ap->a_cred;
500 /* locals */
501     int error;
502     vnode_t *tvp;
503     struct PioctlData *iap = (struct PioctlData *)data;
504     namei_simple_flags_t sflags;
505 
506     MARK_ENTRY(CODA_IOCTL_STATS);
507 
508     CODADEBUG(CODA_IOCTL, myprintf(("in coda_ioctl on %s\n", iap->path));)
509 
510     /* Don't check for operation on a dying object, for ctlvp it
511        shouldn't matter */
512 
513     /* Must be control object to succeed. */
514     if (!IS_CTL_VP(vp)) {
515 	MARK_INT_FAIL(CODA_IOCTL_STATS);
516 	CODADEBUG(CODA_IOCTL, myprintf(("%s error: vp != ctlvp", __func__));)
517 	return (EOPNOTSUPP);
518     }
519     /* Look up the pathname. */
520 
521     /* Should we use the name cache here? It would get it from
522        lookupname sooner or later anyway, right? */
523 
524     sflags = iap->follow ? NSM_FOLLOW_NOEMULROOT : NSM_NOFOLLOW_NOEMULROOT;
525     error = namei_simple_user(iap->path, sflags, &tvp);
526 
527     if (error) {
528 	MARK_INT_FAIL(CODA_IOCTL_STATS);
529 	CODADEBUG(CODA_IOCTL, myprintf(("%s error: lookup returns %d\n",
530 	    __func__, error));)
531 	return(error);
532     }
533 
534     /*
535      * Make sure this is a coda style cnode, but it may be a
536      * different vfsp
537      */
538     /* XXX: this totally violates the comment about vtagtype in vnode.h */
539     if (tvp->v_tag != VT_CODA) {
540 	vrele(tvp);
541 	MARK_INT_FAIL(CODA_IOCTL_STATS);
542 	CODADEBUG(CODA_IOCTL, myprintf(("%s error: %s not a coda object\n",
543 	    __func__, iap->path));)
544 	return(EINVAL);
545     }
546 
547     if (iap->vi.in_size > VC_MAXDATASIZE || iap->vi.out_size > VC_MAXDATASIZE) {
548 	vrele(tvp);
549 	return(EINVAL);
550     }
551     error = venus_ioctl(vtomi(tvp), &((VTOC(tvp))->c_fid), com, flag, data,
552 	cred, curlwp);
553 
554     if (error)
555 	MARK_INT_FAIL(CODA_IOCTL_STATS);
556     else
557 	CODADEBUG(CODA_IOCTL, myprintf(("Ioctl returns %d \n", error)); )
558 
559     vrele(tvp);
560     return(error);
561 }
562 
563 /*
564  * To reduce the cost of a user-level venus;we cache attributes in
565  * the kernel.  Each cnode has storage allocated for an attribute. If
566  * c_vattr is valid, return a reference to it. Otherwise, get the
567  * attributes from venus and store them in the cnode.  There is some
568  * question if this method is a security leak. But I think that in
569  * order to make this call, the user must have done a lookup and
570  * opened the file, and therefore should already have access.
571  */
572 int
573 coda_getattr(void *v)
574 {
575 /* true args */
576     struct vop_getattr_args *ap = v;
577     vnode_t *vp = ap->a_vp;
578     struct cnode *cp = VTOC(vp);
579     struct vattr *vap = ap->a_vap;
580     kauth_cred_t cred = ap->a_cred;
581 /* locals */
582     int error;
583 
584     MARK_ENTRY(CODA_GETATTR_STATS);
585 
586     /* Check for getattr of control object. */
587     if (IS_CTL_VP(vp)) {
588 	MARK_INT_FAIL(CODA_GETATTR_STATS);
589 	return(ENOENT);
590     }
591 
592     /* Check to see if the attributes have already been cached */
593     if (VALID_VATTR(cp)) {
594 	CODADEBUG(CODA_GETATTR, { myprintf(("%s: attr cache hit: %s\n",
595 	    __func__, coda_f2s(&cp->c_fid)));})
596 	CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR))
597 	    coda_print_vattr(&cp->c_vattr); )
598 
599 	*vap = cp->c_vattr;
600 	MARK_INT_SAT(CODA_GETATTR_STATS);
601 	return(0);
602     }
603 
604     error = venus_getattr(vtomi(vp), &cp->c_fid, cred, curlwp, vap);
605 
606     if (!error) {
607 	CODADEBUG(CODA_GETATTR, myprintf(("%s miss %s: result %d\n",
608 	    __func__, coda_f2s(&cp->c_fid), error)); )
609 
610 	CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR))
611 	    coda_print_vattr(vap);	)
612 
613 	/* If not open for write, store attributes in cnode */
614 	if ((cp->c_owrite == 0) && (coda_attr_cache)) {
615 	    cp->c_vattr = *vap;
616 	    cp->c_flags |= C_VATTR;
617 	}
618 
619     }
620     return(error);
621 }
622 
623 int
624 coda_setattr(void *v)
625 {
626 /* true args */
627     struct vop_setattr_args *ap = v;
628     vnode_t *vp = ap->a_vp;
629     struct cnode *cp = VTOC(vp);
630     struct vattr *vap = ap->a_vap;
631     kauth_cred_t cred = ap->a_cred;
632 /* locals */
633     int error;
634 
635     MARK_ENTRY(CODA_SETATTR_STATS);
636 
637     /* Check for setattr of control object. */
638     if (IS_CTL_VP(vp)) {
639 	MARK_INT_FAIL(CODA_SETATTR_STATS);
640 	return(ENOENT);
641     }
642 
643     if (codadebug & CODADBGMSK(CODA_SETATTR)) {
644 	coda_print_vattr(vap);
645     }
646     error = venus_setattr(vtomi(vp), &cp->c_fid, vap, cred, curlwp);
647 
648     if (!error)
649 	cp->c_flags &= ~C_VATTR;
650 
651     CODADEBUG(CODA_SETATTR,	myprintf(("setattr %d\n", error)); )
652     return(error);
653 }
654 
655 int
656 coda_access(void *v)
657 {
658 /* true args */
659     struct vop_access_args *ap = v;
660     vnode_t *vp = ap->a_vp;
661     struct cnode *cp = VTOC(vp);
662     int mode = ap->a_mode;
663     kauth_cred_t cred = ap->a_cred;
664 /* locals */
665     int error;
666 
667     MARK_ENTRY(CODA_ACCESS_STATS);
668 
669     /* Check for access of control object.  Only read access is
670        allowed on it. */
671     if (IS_CTL_VP(vp)) {
672 	/* bogus hack - all will be marked as successes */
673 	MARK_INT_SAT(CODA_ACCESS_STATS);
674 	return(((mode & VREAD) && !(mode & (VWRITE | VEXEC)))
675 	       ? 0 : EACCES);
676     }
677 
678     /*
679      * if the file is a directory, and we are checking exec (eg lookup)
680      * access, and the file is in the namecache, then the user must have
681      * lookup access to it.
682      */
683     if (coda_access_cache) {
684 	if ((vp->v_type == VDIR) && (mode & VEXEC)) {
685 	    if (coda_nc_lookup(cp, ".", 1, cred)) {
686 		MARK_INT_SAT(CODA_ACCESS_STATS);
687 		return(0);                     /* it was in the cache */
688 	    }
689 	}
690     }
691 
692     error = venus_access(vtomi(vp), &cp->c_fid, mode, cred, curlwp);
693 
694     return(error);
695 }
696 
697 /*
698  * CODA abort op, called after namei() when a CREATE/DELETE isn't actually
699  * done. If a buffer has been saved in anticipation of a coda_create or
700  * a coda_remove, delete it.
701  */
702 /* ARGSUSED */
703 int
704 coda_abortop(void *v)
705 {
706 /* true args */
707     struct vop_abortop_args /* {
708 	vnode_t *a_dvp;
709 	struct componentname *a_cnp;
710     } */ *ap = v;
711 
712     (void)ap;
713 /* upcall decl */
714 /* locals */
715 
716     return (0);
717 }
718 
719 int
720 coda_readlink(void *v)
721 {
722 /* true args */
723     struct vop_readlink_args *ap = v;
724     vnode_t *vp = ap->a_vp;
725     struct cnode *cp = VTOC(vp);
726     struct uio *uiop = ap->a_uio;
727     kauth_cred_t cred = ap->a_cred;
728 /* locals */
729     struct lwp *l = curlwp;
730     int error;
731     char *str;
732     int len;
733 
734     MARK_ENTRY(CODA_READLINK_STATS);
735 
736     /* Check for readlink of control object. */
737     if (IS_CTL_VP(vp)) {
738 	MARK_INT_FAIL(CODA_READLINK_STATS);
739 	return(ENOENT);
740     }
741 
742     if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) { /* symlink was cached */
743 	uiop->uio_rw = UIO_READ;
744 	error = uiomove(cp->c_symlink, (int)cp->c_symlen, uiop);
745 	if (error)
746 	    MARK_INT_FAIL(CODA_READLINK_STATS);
747 	else
748 	    MARK_INT_SAT(CODA_READLINK_STATS);
749 	return(error);
750     }
751 
752     error = venus_readlink(vtomi(vp), &cp->c_fid, cred, l, &str, &len);
753 
754     if (!error) {
755 	uiop->uio_rw = UIO_READ;
756 	error = uiomove(str, len, uiop);
757 
758 	if (coda_symlink_cache) {
759 	    cp->c_symlink = str;
760 	    cp->c_symlen = len;
761 	    cp->c_flags |= C_SYMLINK;
762 	} else
763 	    CODA_FREE(str, len);
764     }
765 
766     CODADEBUG(CODA_READLINK, myprintf(("in readlink result %d\n",error));)
767     return(error);
768 }
769 
770 int
771 coda_fsync(void *v)
772 {
773 /* true args */
774     struct vop_fsync_args *ap = v;
775     vnode_t *vp = ap->a_vp;
776     struct cnode *cp = VTOC(vp);
777     kauth_cred_t cred = ap->a_cred;
778 /* locals */
779     vnode_t *convp = cp->c_ovp;
780     int error;
781 
782     MARK_ENTRY(CODA_FSYNC_STATS);
783 
784     /* Check for fsync on an unmounting object */
785     /* The NetBSD kernel, in it's infinite wisdom, can try to fsync
786      * after an unmount has been initiated.  This is a Bad Thing,
787      * which we have to avoid.  Not a legitimate failure for stats.
788      */
789     if (IS_UNMOUNTING(cp)) {
790 	return(ENODEV);
791     }
792 
793     /* Check for fsync of control object. */
794     if (IS_CTL_VP(vp)) {
795 	MARK_INT_SAT(CODA_FSYNC_STATS);
796 	return(0);
797     }
798 
799     if (convp)
800     	VOP_FSYNC(convp, cred, MNT_WAIT, 0, 0);
801 
802     /*
803      * We can expect fsync on any vnode at all if venus is pruging it.
804      * Venus can't very well answer the fsync request, now can it?
805      * Hopefully, it won't have to, because hopefully, venus preserves
806      * the (possibly untrue) invariant that it never purges an open
807      * vnode.  Hopefully.
808      */
809     if (cp->c_flags & C_PURGING) {
810 	return(0);
811     }
812 
813     error = venus_fsync(vtomi(vp), &cp->c_fid, cred, curlwp);
814 
815     CODADEBUG(CODA_FSYNC, myprintf(("in fsync result %d\n",error)); )
816     return(error);
817 }
818 
819 /*
820  * vp is locked on entry, and we must unlock it.
821  * XXX This routine is suspect and probably needs rewriting.
822  */
823 int
824 coda_inactive(void *v)
825 {
826 /* true args */
827     struct vop_inactive_args *ap = v;
828     vnode_t *vp = ap->a_vp;
829     struct cnode *cp = VTOC(vp);
830     kauth_cred_t cred __unused = NULL;
831 
832     /* We don't need to send inactive to venus - DCS */
833     MARK_ENTRY(CODA_INACTIVE_STATS);
834 
835     if (IS_CTL_VP(vp)) {
836 	MARK_INT_SAT(CODA_INACTIVE_STATS);
837 	return 0;
838     }
839 
840     CODADEBUG(CODA_INACTIVE, myprintf(("in inactive, %s, vfsp %p\n",
841 				  coda_f2s(&cp->c_fid), vp->v_mount));)
842 
843     /* If an array has been allocated to hold the symlink, deallocate it */
844     if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) {
845 	if (cp->c_symlink == NULL)
846 	    panic("%s: null symlink pointer in cnode", __func__);
847 
848 	CODA_FREE(cp->c_symlink, cp->c_symlen);
849 	cp->c_flags &= ~C_SYMLINK;
850 	cp->c_symlen = 0;
851     }
852 
853     /* Remove it from the table so it can't be found. */
854     coda_unsave(cp);
855     if (vp->v_mount->mnt_data == NULL) {
856 	myprintf(("Help! vfsp->vfs_data was NULL, but vnode %p wasn't dying\n", vp));
857 	panic("badness in coda_inactive");
858     }
859 
860 #ifdef CODA_VERBOSE
861     /* Sanity checks that perhaps should be panic. */
862     if (vp->v_usecount > 1)
863 	printf("%s: %p usecount %d\n", __func__, vp, vp->v_usecount);
864     if (cp->c_ovp != NULL)
865 	printf("%s: %p ovp != NULL\n", __func__, vp);
866 #endif
867     /* XXX Do we need to VOP_CLOSE container vnodes? */
868     VOP_UNLOCK(vp);
869     if (!IS_UNMOUNTING(cp))
870 	*ap->a_recycle = true;
871 
872     MARK_INT_SAT(CODA_INACTIVE_STATS);
873     return(0);
874 }
875 
876 /*
877  * Coda does not use the normal namecache, but a private version.
878  * Consider how to use the standard facility instead.
879  */
880 int
881 coda_lookup(void *v)
882 {
883 /* true args */
884     struct vop_lookup_v2_args *ap = v;
885     /* (locked) vnode of dir in which to do lookup */
886     vnode_t *dvp = ap->a_dvp;
887     struct cnode *dcp = VTOC(dvp);
888     /* output variable for result */
889     vnode_t **vpp = ap->a_vpp;
890     /* name to lookup */
891     struct componentname *cnp = ap->a_cnp;
892     kauth_cred_t cred = cnp->cn_cred;
893     struct lwp *l = curlwp;
894 /* locals */
895     struct cnode *cp;
896     const char *nm = cnp->cn_nameptr;
897     int len = cnp->cn_namelen;
898     CodaFid VFid;
899     int	vtype;
900     int error = 0;
901 
902     MARK_ENTRY(CODA_LOOKUP_STATS);
903 
904     CODADEBUG(CODA_LOOKUP, myprintf(("%s: %s in %s\n", __func__,
905 	nm, coda_f2s(&dcp->c_fid)));)
906 
907     /*
908      * XXX componentname flags in MODMASK are not handled at all
909      */
910 
911     /*
912      * The overall strategy is to switch on the lookup type and get a
913      * result vnode that is vref'd but not locked.
914      */
915 
916     /* Check for lookup of control object. */
917     if (IS_CTL_NAME(dvp, nm, len)) {
918 	*vpp = coda_ctlvp;
919 	vref(*vpp);
920 	MARK_INT_SAT(CODA_LOOKUP_STATS);
921 	goto exit;
922     }
923 
924     /* Avoid trying to hand venus an unreasonably long name. */
925     if (len+1 > CODA_MAXNAMLEN) {
926 	MARK_INT_FAIL(CODA_LOOKUP_STATS);
927 	CODADEBUG(CODA_LOOKUP, myprintf(("%s: name too long:, %s (%s)\n",
928 	    __func__, coda_f2s(&dcp->c_fid), nm));)
929 	*vpp = (vnode_t *)0;
930 	error = EINVAL;
931 	goto exit;
932     }
933 
934     /*
935      * Try to resolve the lookup in the minicache.  If that fails, ask
936      * venus to do the lookup.  XXX The interaction between vnode
937      * locking and any locking that coda does is not clear.
938      */
939     cp = coda_nc_lookup(dcp, nm, len, cred);
940     if (cp) {
941 	*vpp = CTOV(cp);
942 	vref(*vpp);
943 	CODADEBUG(CODA_LOOKUP,
944 		 myprintf(("lookup result %d vpp %p\n",error,*vpp));)
945     } else {
946 	/* The name wasn't cached, so ask Venus. */
947 	error = venus_lookup(vtomi(dvp), &dcp->c_fid, nm, len, cred, l, &VFid,
948 	    &vtype);
949 
950 	if (error) {
951 	    MARK_INT_FAIL(CODA_LOOKUP_STATS);
952 	    CODADEBUG(CODA_LOOKUP, myprintf(("%s: lookup error on %s (%s)%d\n",
953 		__func__, coda_f2s(&dcp->c_fid), nm, error));)
954 	    *vpp = (vnode_t *)0;
955 	} else {
956 	    MARK_INT_SAT(CODA_LOOKUP_STATS);
957 	    CODADEBUG(CODA_LOOKUP, myprintf(("%s: %s type %o result %d\n",
958 		__func__, coda_f2s(&VFid), vtype, error)); )
959 
960 	    cp = make_coda_node(&VFid, dvp->v_mount, vtype);
961 	    *vpp = CTOV(cp);
962 	    /* vpp is now vrefed. */
963 
964 	    /*
965 	     * Unless this vnode is marked CODA_NOCACHE, enter it into
966 	     * the coda name cache to avoid a future venus round-trip.
967 	     * XXX Interaction with componentname NOCACHE is unclear.
968 	     */
969 	    if (!(vtype & CODA_NOCACHE))
970 		coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
971 	}
972     }
973 
974  exit:
975     /*
976      * If we are creating, and this was the last name to be looked up,
977      * and the error was ENOENT, then make the leaf NULL and return
978      * success.
979      * XXX Check against new lookup rules.
980      */
981     if (((cnp->cn_nameiop == CREATE) || (cnp->cn_nameiop == RENAME))
982 	&& (cnp->cn_flags & ISLASTCN)
983 	&& (error == ENOENT))
984     {
985 	error = EJUSTRETURN;
986 	*ap->a_vpp = NULL;
987     }
988 
989     return(error);
990 }
991 
992 /*ARGSUSED*/
993 int
994 coda_create(void *v)
995 {
996 /* true args */
997     struct vop_create_v3_args *ap = v;
998     vnode_t *dvp = ap->a_dvp;
999     struct cnode *dcp = VTOC(dvp);
1000     struct vattr *va = ap->a_vap;
1001     int exclusive = 1;
1002     int mode = ap->a_vap->va_mode;
1003     vnode_t **vpp = ap->a_vpp;
1004     struct componentname  *cnp = ap->a_cnp;
1005     kauth_cred_t cred = cnp->cn_cred;
1006     struct lwp *l = curlwp;
1007 /* locals */
1008     int error;
1009     struct cnode *cp;
1010     const char *nm = cnp->cn_nameptr;
1011     int len = cnp->cn_namelen;
1012     CodaFid VFid;
1013     struct vattr attr;
1014 
1015     MARK_ENTRY(CODA_CREATE_STATS);
1016 
1017     /* All creates are exclusive XXX */
1018     /* I'm assuming the 'mode' argument is the file mode bits XXX */
1019 
1020     /* Check for create of control object. */
1021     if (IS_CTL_NAME(dvp, nm, len)) {
1022 	*vpp = (vnode_t *)0;
1023 	MARK_INT_FAIL(CODA_CREATE_STATS);
1024 	return(EACCES);
1025     }
1026 
1027     error = venus_create(vtomi(dvp), &dcp->c_fid, nm, len, exclusive, mode, va, cred, l, &VFid, &attr);
1028 
1029     if (!error) {
1030 
1031         /*
1032 	 * XXX Violation of venus/kernel invariants is a difficult case,
1033 	 * but venus should not be able to cause a panic.
1034 	 */
1035 	/* If this is an exclusive create, panic if the file already exists. */
1036 	/* Venus should have detected the file and reported EEXIST. */
1037 
1038 	if ((exclusive == 1) &&
1039 	    (coda_find(&VFid) != NULL))
1040 	    panic("cnode existed for newly created file!");
1041 
1042 	cp = make_coda_node(&VFid, dvp->v_mount, attr.va_type);
1043 	*vpp = CTOV(cp);
1044 
1045 	/* XXX vnodeops doesn't say this argument can be changed. */
1046 	/* Update va to reflect the new attributes. */
1047 	(*va) = attr;
1048 
1049 	/* Update the attribute cache and mark it as valid */
1050 	if (coda_attr_cache) {
1051 	    VTOC(*vpp)->c_vattr = attr;
1052 	    VTOC(*vpp)->c_flags |= C_VATTR;
1053 	}
1054 
1055 	/* Invalidate parent's attr cache (modification time has changed). */
1056 	VTOC(dvp)->c_flags &= ~C_VATTR;
1057 
1058 	/* enter the new vnode in the Name Cache */
1059 	coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
1060 
1061 	CODADEBUG(CODA_CREATE, myprintf(("%s: %s, result %d\n", __func__,
1062 	    coda_f2s(&VFid), error)); )
1063     } else {
1064 	*vpp = (vnode_t *)0;
1065 	CODADEBUG(CODA_CREATE, myprintf(("%s: create error %d\n", __func__,
1066 	    error));)
1067     }
1068 
1069     if (!error) {
1070 #ifdef CODA_VERBOSE
1071 	if ((cnp->cn_flags & LOCKLEAF) == 0)
1072 	    /* This should not happen; flags are for lookup only. */
1073 	    printf("%s: LOCKLEAF not set!\n", __func__);
1074 #endif
1075     }
1076 
1077     return(error);
1078 }
1079 
1080 int
1081 coda_remove(void *v)
1082 {
1083 /* true args */
1084     struct vop_remove_args *ap = v;
1085     vnode_t *dvp = ap->a_dvp;
1086     struct cnode *cp = VTOC(dvp);
1087     vnode_t *vp = ap->a_vp;
1088     struct componentname  *cnp = ap->a_cnp;
1089     kauth_cred_t cred = cnp->cn_cred;
1090     struct lwp *l = curlwp;
1091 /* locals */
1092     int error;
1093     const char *nm = cnp->cn_nameptr;
1094     int len = cnp->cn_namelen;
1095     struct cnode *tp;
1096 
1097     MARK_ENTRY(CODA_REMOVE_STATS);
1098 
1099     CODADEBUG(CODA_REMOVE, myprintf(("%s: %s in %s\n", __func__,
1100 	nm, coda_f2s(&cp->c_fid)));)
1101 
1102     /* Remove the file's entry from the CODA Name Cache */
1103     /* We're being conservative here, it might be that this person
1104      * doesn't really have sufficient access to delete the file
1105      * but we feel zapping the entry won't really hurt anyone -- dcs
1106      */
1107     /* I'm gonna go out on a limb here. If a file and a hardlink to it
1108      * exist, and one is removed, the link count on the other will be
1109      * off by 1. We could either invalidate the attrs if cached, or
1110      * fix them. I'll try to fix them. DCS 11/8/94
1111      */
1112     tp = coda_nc_lookup(VTOC(dvp), nm, len, cred);
1113     if (tp) {
1114 	if (VALID_VATTR(tp)) {	/* If attrs are cached */
1115 	    if (tp->c_vattr.va_nlink > 1) {	/* If it's a hard link */
1116 		tp->c_vattr.va_nlink--;
1117 	    }
1118 	}
1119 
1120 	coda_nc_zapfile(VTOC(dvp), nm, len);
1121 	/* No need to flush it if it doesn't exist! */
1122     }
1123     /* Invalidate the parent's attr cache, the modification time has changed */
1124     VTOC(dvp)->c_flags &= ~C_VATTR;
1125 
1126     /* Check for remove of control object. */
1127     if (IS_CTL_NAME(dvp, nm, len)) {
1128 	MARK_INT_FAIL(CODA_REMOVE_STATS);
1129 	return(ENOENT);
1130     }
1131 
1132     error = venus_remove(vtomi(dvp), &cp->c_fid, nm, len, cred, l);
1133 
1134     CODADEBUG(CODA_REMOVE, myprintf(("in remove result %d\n",error)); )
1135 
1136     /*
1137      * Unlock parent and child (avoiding double if ".").
1138      */
1139     if (dvp == vp) {
1140 	vrele(vp);
1141     } else {
1142 	vput(vp);
1143     }
1144     vput(dvp);
1145 
1146     return(error);
1147 }
1148 
1149 /*
1150  * dvp is the directory where the link is to go, and is locked.
1151  * vp is the object to be linked to, and is unlocked.
1152  * At exit, we must unlock dvp, and vput dvp.
1153  */
1154 int
1155 coda_link(void *v)
1156 {
1157 /* true args */
1158     struct vop_link_args *ap = v;
1159     vnode_t *vp = ap->a_vp;
1160     struct cnode *cp = VTOC(vp);
1161     vnode_t *dvp = ap->a_dvp;
1162     struct cnode *dcp = VTOC(dvp);
1163     struct componentname *cnp = ap->a_cnp;
1164     kauth_cred_t cred = cnp->cn_cred;
1165     struct lwp *l = curlwp;
1166 /* locals */
1167     int error;
1168     const char *nm = cnp->cn_nameptr;
1169     int len = cnp->cn_namelen;
1170 
1171     MARK_ENTRY(CODA_LINK_STATS);
1172 
1173     if (codadebug & CODADBGMSK(CODA_LINK)) {
1174 
1175 	myprintf(("%s: vp fid: %s\n", __func__, coda_f2s(&cp->c_fid)));
1176 	myprintf(("%s: dvp fid: %s)\n", __func__, coda_f2s(&dcp->c_fid)));
1177 
1178     }
1179     if (codadebug & CODADBGMSK(CODA_LINK)) {
1180 	myprintf(("%s: vp fid: %s\n", __func__, coda_f2s(&cp->c_fid)));
1181 	myprintf(("%s: dvp fid: %s\n", __func__, coda_f2s(&dcp->c_fid)));
1182 
1183     }
1184 
1185     /* Check for link to/from control object. */
1186     if (IS_CTL_NAME(dvp, nm, len) || IS_CTL_VP(vp)) {
1187 	MARK_INT_FAIL(CODA_LINK_STATS);
1188 	return(EACCES);
1189     }
1190 
1191     /* If linking . to a name, error out earlier. */
1192     if (vp == dvp) {
1193 #ifdef CODA_VERBOSE
1194         printf("%s coda_link vp==dvp\n", __func__);
1195 #endif
1196 	error = EISDIR;
1197 	goto exit;
1198     }
1199 
1200     /* XXX Why does venus_link need the vnode to be locked?*/
1201     if ((error = vn_lock(vp, LK_EXCLUSIVE)) != 0) {
1202 #ifdef CODA_VERBOSE
1203 	printf("%s: couldn't lock vnode %p\n", __func__, vp);
1204 #endif
1205 	error = EFAULT;		/* XXX better value */
1206 	goto exit;
1207     }
1208     error = venus_link(vtomi(vp), &cp->c_fid, &dcp->c_fid, nm, len, cred, l);
1209     VOP_UNLOCK(vp);
1210 
1211     /* Invalidate parent's attr cache (the modification time has changed). */
1212     VTOC(dvp)->c_flags &= ~C_VATTR;
1213     /* Invalidate child's attr cache (XXX why). */
1214     VTOC(vp)->c_flags &= ~C_VATTR;
1215 
1216     CODADEBUG(CODA_LINK,	myprintf(("in link result %d\n",error)); )
1217 
1218 exit:
1219     vput(dvp);
1220     return(error);
1221 }
1222 
1223 int
1224 coda_rename(void *v)
1225 {
1226 /* true args */
1227     struct vop_rename_args *ap = v;
1228     vnode_t *odvp = ap->a_fdvp;
1229     struct cnode *odcp = VTOC(odvp);
1230     struct componentname  *fcnp = ap->a_fcnp;
1231     vnode_t *ndvp = ap->a_tdvp;
1232     struct cnode *ndcp = VTOC(ndvp);
1233     struct componentname  *tcnp = ap->a_tcnp;
1234     kauth_cred_t cred = fcnp->cn_cred;
1235     struct lwp *l = curlwp;
1236 /* true args */
1237     int error;
1238     const char *fnm = fcnp->cn_nameptr;
1239     int flen = fcnp->cn_namelen;
1240     const char *tnm = tcnp->cn_nameptr;
1241     int tlen = tcnp->cn_namelen;
1242 
1243     MARK_ENTRY(CODA_RENAME_STATS);
1244 
1245     /* Hmmm.  The vnodes are already looked up.  Perhaps they are locked?
1246        This could be Bad. XXX */
1247 #ifdef OLD_DIAGNOSTIC
1248     if ((fcnp->cn_cred != tcnp->cn_cred)
1249 	|| (fcnp->cn_lwp != tcnp->cn_lwp))
1250     {
1251 	panic("%s: component names don't agree", __func__);
1252     }
1253 #endif
1254 
1255     /* Check for rename involving control object. */
1256     if (IS_CTL_NAME(odvp, fnm, flen) || IS_CTL_NAME(ndvp, tnm, tlen)) {
1257 	MARK_INT_FAIL(CODA_RENAME_STATS);
1258 	return(EACCES);
1259     }
1260 
1261     /* Problem with moving directories -- need to flush entry for .. */
1262     if (odvp != ndvp) {
1263 	struct cnode *ovcp = coda_nc_lookup(VTOC(odvp), fnm, flen, cred);
1264 	if (ovcp) {
1265 	    vnode_t *ovp = CTOV(ovcp);
1266 	    if ((ovp) &&
1267 		(ovp->v_type == VDIR)) /* If it's a directory */
1268 		coda_nc_zapfile(VTOC(ovp),"..", 2);
1269 	}
1270     }
1271 
1272     /* Remove the entries for both source and target files */
1273     coda_nc_zapfile(VTOC(odvp), fnm, flen);
1274     coda_nc_zapfile(VTOC(ndvp), tnm, tlen);
1275 
1276     /* Invalidate the parent's attr cache, the modification time has changed */
1277     VTOC(odvp)->c_flags &= ~C_VATTR;
1278     VTOC(ndvp)->c_flags &= ~C_VATTR;
1279 
1280     if (flen+1 > CODA_MAXNAMLEN) {
1281 	MARK_INT_FAIL(CODA_RENAME_STATS);
1282 	error = EINVAL;
1283 	goto exit;
1284     }
1285 
1286     if (tlen+1 > CODA_MAXNAMLEN) {
1287 	MARK_INT_FAIL(CODA_RENAME_STATS);
1288 	error = EINVAL;
1289 	goto exit;
1290     }
1291 
1292     error = venus_rename(vtomi(odvp), &odcp->c_fid, &ndcp->c_fid, fnm, flen, tnm, tlen, cred, l);
1293 
1294  exit:
1295     CODADEBUG(CODA_RENAME, myprintf(("in rename result %d\n",error));)
1296     /* XXX - do we need to call cache pureg on the moved vnode? */
1297     cache_purge(ap->a_fvp);
1298 
1299     /* It seems to be incumbent on us to drop locks on all four vnodes */
1300     /* From-vnodes are not locked, only ref'd.  To-vnodes are locked. */
1301 
1302     vrele(ap->a_fvp);
1303     vrele(odvp);
1304 
1305     if (ap->a_tvp) {
1306 	if (ap->a_tvp == ndvp) {
1307 	    vrele(ap->a_tvp);
1308 	} else {
1309 	    vput(ap->a_tvp);
1310 	}
1311     }
1312 
1313     vput(ndvp);
1314     return(error);
1315 }
1316 
1317 int
1318 coda_mkdir(void *v)
1319 {
1320 /* true args */
1321     struct vop_mkdir_v3_args *ap = v;
1322     vnode_t *dvp = ap->a_dvp;
1323     struct cnode *dcp = VTOC(dvp);
1324     struct componentname  *cnp = ap->a_cnp;
1325     struct vattr *va = ap->a_vap;
1326     vnode_t **vpp = ap->a_vpp;
1327     kauth_cred_t cred = cnp->cn_cred;
1328     struct lwp *l = curlwp;
1329 /* locals */
1330     int error;
1331     const char *nm = cnp->cn_nameptr;
1332     int len = cnp->cn_namelen;
1333     struct cnode *cp;
1334     CodaFid VFid;
1335     struct vattr ova;
1336 
1337     MARK_ENTRY(CODA_MKDIR_STATS);
1338 
1339     /* Check for mkdir of target object. */
1340     if (IS_CTL_NAME(dvp, nm, len)) {
1341 	*vpp = (vnode_t *)0;
1342 	MARK_INT_FAIL(CODA_MKDIR_STATS);
1343 	return(EACCES);
1344     }
1345 
1346     if (len+1 > CODA_MAXNAMLEN) {
1347 	*vpp = (vnode_t *)0;
1348 	MARK_INT_FAIL(CODA_MKDIR_STATS);
1349 	return(EACCES);
1350     }
1351 
1352     error = venus_mkdir(vtomi(dvp), &dcp->c_fid, nm, len, va, cred, l, &VFid, &ova);
1353 
1354     if (!error) {
1355 	if (coda_find(&VFid) != NULL)
1356 	    panic("cnode existed for newly created directory!");
1357 
1358 
1359 	cp =  make_coda_node(&VFid, dvp->v_mount, va->va_type);
1360 	*vpp = CTOV(cp);
1361 
1362 	/* enter the new vnode in the Name Cache */
1363 	coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
1364 
1365 	/* as a side effect, enter "." and ".." for the directory */
1366 	coda_nc_enter(VTOC(*vpp), ".", 1, cred, VTOC(*vpp));
1367 	coda_nc_enter(VTOC(*vpp), "..", 2, cred, VTOC(dvp));
1368 
1369 	if (coda_attr_cache) {
1370 	    VTOC(*vpp)->c_vattr = ova;		/* update the attr cache */
1371 	    VTOC(*vpp)->c_flags |= C_VATTR;	/* Valid attributes in cnode */
1372 	}
1373 
1374 	/* Invalidate the parent's attr cache, the modification time has changed */
1375 	VTOC(dvp)->c_flags &= ~C_VATTR;
1376 
1377 	CODADEBUG( CODA_MKDIR, myprintf(("%s: %s result %d\n", __func__,
1378 	    coda_f2s(&VFid), error)); )
1379     } else {
1380 	*vpp = (vnode_t *)0;
1381 	CODADEBUG(CODA_MKDIR, myprintf(("%s error %d\n", __func__, error));)
1382     }
1383 
1384     return(error);
1385 }
1386 
1387 int
1388 coda_rmdir(void *v)
1389 {
1390 /* true args */
1391     struct vop_rmdir_args *ap = v;
1392     vnode_t *dvp = ap->a_dvp;
1393     struct cnode *dcp = VTOC(dvp);
1394     vnode_t *vp = ap->a_vp;
1395     struct componentname  *cnp = ap->a_cnp;
1396     kauth_cred_t cred = cnp->cn_cred;
1397     struct lwp *l = curlwp;
1398 /* true args */
1399     int error;
1400     const char *nm = cnp->cn_nameptr;
1401     int len = cnp->cn_namelen;
1402     struct cnode *cp;
1403 
1404     MARK_ENTRY(CODA_RMDIR_STATS);
1405 
1406     /* Check for rmdir of control object. */
1407     if (IS_CTL_NAME(dvp, nm, len)) {
1408 	MARK_INT_FAIL(CODA_RMDIR_STATS);
1409 	return(ENOENT);
1410     }
1411 
1412     /* Can't remove . in self. */
1413     if (dvp == vp) {
1414 #ifdef CODA_VERBOSE
1415 	printf("%s: dvp == vp\n", __func__);
1416 #endif
1417 	error = EINVAL;
1418 	goto exit;
1419     }
1420 
1421     /*
1422      * The caller may not have adequate permissions, and the venus
1423      * operation may fail, but it doesn't hurt from a correctness
1424      * viewpoint to invalidate cache entries.
1425      * XXX Why isn't this done after the venus_rmdir call?
1426      */
1427     /* Look up child in name cache (by name, from parent). */
1428     cp = coda_nc_lookup(dcp, nm, len, cred);
1429     /* If found, remove all children of the child (., ..). */
1430     if (cp) coda_nc_zapParentfid(&(cp->c_fid), NOT_DOWNCALL);
1431 
1432     /* Remove child's own entry. */
1433     coda_nc_zapfile(dcp, nm, len);
1434 
1435     /* Invalidate parent's attr cache (the modification time has changed). */
1436     dcp->c_flags &= ~C_VATTR;
1437 
1438     error = venus_rmdir(vtomi(dvp), &dcp->c_fid, nm, len, cred, l);
1439 
1440     CODADEBUG(CODA_RMDIR, myprintf(("in rmdir result %d\n", error)); )
1441 
1442 exit:
1443     /* vput both vnodes */
1444     vput(dvp);
1445     if (dvp == vp) {
1446 	vrele(vp);
1447     } else {
1448 	vput(vp);
1449     }
1450 
1451     return(error);
1452 }
1453 
1454 int
1455 coda_symlink(void *v)
1456 {
1457 /* true args */
1458     struct vop_symlink_v3_args *ap = v;
1459     vnode_t *dvp = ap->a_dvp;
1460     struct cnode *dcp = VTOC(dvp);
1461     /* a_vpp is used in place below */
1462     struct componentname *cnp = ap->a_cnp;
1463     struct vattr *tva = ap->a_vap;
1464     char *path = ap->a_target;
1465     kauth_cred_t cred = cnp->cn_cred;
1466     struct lwp *l = curlwp;
1467 /* locals */
1468     int error;
1469     u_long saved_cn_flags;
1470     const char *nm = cnp->cn_nameptr;
1471     int len = cnp->cn_namelen;
1472     int plen = strlen(path);
1473 
1474     /*
1475      * Here's the strategy for the moment: perform the symlink, then
1476      * do a lookup to grab the resulting vnode.  I know this requires
1477      * two communications with Venus for a new sybolic link, but
1478      * that's the way the ball bounces.  I don't yet want to change
1479      * the way the Mach symlink works.  When Mach support is
1480      * deprecated, we should change symlink so that the common case
1481      * returns the resultant vnode in a vpp argument.
1482      */
1483 
1484     MARK_ENTRY(CODA_SYMLINK_STATS);
1485 
1486     /* Check for symlink of control object. */
1487     if (IS_CTL_NAME(dvp, nm, len)) {
1488 	MARK_INT_FAIL(CODA_SYMLINK_STATS);
1489 	error = EACCES;
1490 	goto exit;
1491     }
1492 
1493     if (plen+1 > CODA_MAXPATHLEN) {
1494 	MARK_INT_FAIL(CODA_SYMLINK_STATS);
1495 	error = EINVAL;
1496 	goto exit;
1497     }
1498 
1499     if (len+1 > CODA_MAXNAMLEN) {
1500 	MARK_INT_FAIL(CODA_SYMLINK_STATS);
1501 	error = EINVAL;
1502 	goto exit;
1503     }
1504 
1505     error = venus_symlink(vtomi(dvp), &dcp->c_fid, path, plen, nm, len, tva, cred, l);
1506 
1507     /* Invalidate the parent's attr cache (modification time has changed). */
1508     dcp->c_flags &= ~C_VATTR;
1509 
1510     if (!error) {
1511 	/*
1512 	 * VOP_SYMLINK is not defined to pay attention to cnp->cn_flags;
1513 	 * these are defined only for VOP_LOOKUP.   We desire to reuse
1514 	 * cnp for a VOP_LOOKUP operation, and must be sure to not pass
1515 	 * stray flags passed to us.  Such stray flags can occur because
1516 	 * sys_symlink makes a namei call and then reuses the
1517 	 * componentname structure.
1518 	 */
1519 	/*
1520 	 * XXX Arguably we should create our own componentname structure
1521 	 * and not reuse the one that was passed in.
1522 	 */
1523 	saved_cn_flags = cnp->cn_flags;
1524 	cnp->cn_flags &= ~(MODMASK | OPMASK);
1525 	cnp->cn_flags |= LOOKUP;
1526 	error = VOP_LOOKUP(dvp, ap->a_vpp, cnp);
1527 	cnp->cn_flags = saved_cn_flags;
1528     }
1529 
1530  exit:
1531     CODADEBUG(CODA_SYMLINK, myprintf(("in symlink result %d\n",error)); )
1532     return(error);
1533 }
1534 
1535 /*
1536  * Read directory entries.
1537  */
1538 int
1539 coda_readdir(void *v)
1540 {
1541 /* true args */
1542     struct vop_readdir_args *ap = v;
1543     vnode_t *vp = ap->a_vp;
1544     struct cnode *cp = VTOC(vp);
1545     struct uio *uiop = ap->a_uio;
1546     kauth_cred_t cred = ap->a_cred;
1547     int *eofflag = ap->a_eofflag;
1548     off_t **cookies = ap->a_cookies;
1549     int *ncookies = ap->a_ncookies;
1550 /* upcall decl */
1551 /* locals */
1552     int error = 0;
1553 
1554     MARK_ENTRY(CODA_READDIR_STATS);
1555 
1556     CODADEBUG(CODA_READDIR, myprintf(("%s: (%p, %lu, %lld)\n", __func__,
1557 	uiop->uio_iov->iov_base, (unsigned long) uiop->uio_resid,
1558 	(long long) uiop->uio_offset)); )
1559 
1560     /* Check for readdir of control object. */
1561     if (IS_CTL_VP(vp)) {
1562 	MARK_INT_FAIL(CODA_READDIR_STATS);
1563 	return(ENOENT);
1564     }
1565 
1566     {
1567 	/* Redirect the request to UFS. */
1568 
1569 	/* If directory is not already open do an "internal open" on it. */
1570 	int opened_internally = 0;
1571 	if (cp->c_ovp == NULL) {
1572 	    opened_internally = 1;
1573 	    MARK_INT_GEN(CODA_OPEN_STATS);
1574 	    error = VOP_OPEN(vp, FREAD, cred);
1575 #ifdef	CODA_VERBOSE
1576 	    printf("%s: Internally Opening %p\n", __func__, vp);
1577 #endif
1578 	    if (error) return(error);
1579 	} else
1580 	    vp = cp->c_ovp;
1581 
1582 	/* Have UFS handle the call. */
1583 	CODADEBUG(CODA_READDIR, myprintf(("%s: fid = %s, refcnt = %d\n",
1584 	    __func__, coda_f2s(&cp->c_fid), vp->v_usecount)); )
1585 	error = VOP_READDIR(vp, uiop, cred, eofflag, cookies, ncookies);
1586 	if (error)
1587 	    MARK_INT_FAIL(CODA_READDIR_STATS);
1588 	else
1589 	    MARK_INT_SAT(CODA_READDIR_STATS);
1590 
1591 	/* Do an "internal close" if necessary. */
1592 	if (opened_internally) {
1593 	    MARK_INT_GEN(CODA_CLOSE_STATS);
1594 	    (void)VOP_CLOSE(vp, FREAD, cred);
1595 	}
1596     }
1597 
1598     return(error);
1599 }
1600 
1601 /*
1602  * Convert from file system blocks to device blocks
1603  */
1604 int
1605 coda_bmap(void *v)
1606 {
1607     /* XXX on the global proc */
1608 /* true args */
1609     struct vop_bmap_args *ap = v;
1610     vnode_t *vp __unused = ap->a_vp;	/* file's vnode */
1611     daddr_t bn __unused = ap->a_bn;	/* fs block number */
1612     vnode_t **vpp = ap->a_vpp;			/* RETURN vp of device */
1613     daddr_t *bnp __unused = ap->a_bnp;	/* RETURN device block number */
1614     struct lwp *l __unused = curlwp;
1615 /* upcall decl */
1616 /* locals */
1617 
1618 	*vpp = (vnode_t *)0;
1619 	myprintf(("coda_bmap called!\n"));
1620 	return(EINVAL);
1621 }
1622 
1623 /*
1624  * I don't think the following two things are used anywhere, so I've
1625  * commented them out
1626  *
1627  * struct buf *async_bufhead;
1628  * int async_daemon_count;
1629  */
1630 int
1631 coda_strategy(void *v)
1632 {
1633 /* true args */
1634     struct vop_strategy_args *ap = v;
1635     struct buf *bp __unused = ap->a_bp;
1636     struct lwp *l __unused = curlwp;
1637 /* upcall decl */
1638 /* locals */
1639 
1640 	myprintf(("coda_strategy called!  "));
1641 	return(EINVAL);
1642 }
1643 
1644 int
1645 coda_reclaim(void *v)
1646 {
1647 /* true args */
1648     struct vop_reclaim_args *ap = v;
1649     vnode_t *vp = ap->a_vp;
1650     struct cnode *cp = VTOC(vp);
1651 /* upcall decl */
1652 /* locals */
1653 
1654 /*
1655  * Forced unmount/flush will let vnodes with non zero use be destroyed!
1656  */
1657     ENTRY;
1658 
1659     if (IS_UNMOUNTING(cp)) {
1660 #ifdef	DEBUG
1661 	if (VTOC(vp)->c_ovp) {
1662 	    if (IS_UNMOUNTING(cp))
1663 		printf("%s: c_ovp not void: vp %p, cp %p\n", __func__, vp, cp);
1664 	}
1665 #endif
1666     } else {
1667 #ifdef OLD_DIAGNOSTIC
1668 	if (vp->v_usecount != 0)
1669 	    print("%s: pushing active %p\n", __func__, vp);
1670 	if (VTOC(vp)->c_ovp) {
1671 	    panic("%s: c_ovp not void", __func__);
1672 	}
1673 #endif
1674     }
1675     coda_free(VTOC(vp));
1676     SET_VTOC(vp) = NULL;
1677     return (0);
1678 }
1679 
1680 int
1681 coda_lock(void *v)
1682 {
1683 /* true args */
1684     struct vop_lock_args *ap = v;
1685     vnode_t *vp = ap->a_vp;
1686     struct cnode *cp = VTOC(vp);
1687 /* upcall decl */
1688 /* locals */
1689 
1690     ENTRY;
1691 
1692     if (coda_lockdebug) {
1693 	myprintf(("Attempting lock on %s\n",
1694 		  coda_f2s(&cp->c_fid)));
1695     }
1696 
1697     return genfs_lock(v);
1698 }
1699 
1700 int
1701 coda_unlock(void *v)
1702 {
1703 /* true args */
1704     struct vop_unlock_args *ap = v;
1705     vnode_t *vp = ap->a_vp;
1706     struct cnode *cp = VTOC(vp);
1707 /* upcall decl */
1708 /* locals */
1709 
1710     ENTRY;
1711     if (coda_lockdebug) {
1712 	myprintf(("Attempting unlock on %s\n",
1713 		  coda_f2s(&cp->c_fid)));
1714     }
1715 
1716     return genfs_unlock(v);
1717 }
1718 
1719 int
1720 coda_islocked(void *v)
1721 {
1722 /* true args */
1723     ENTRY;
1724 
1725     return genfs_islocked(v);
1726 }
1727 
1728 /*
1729  * Given a device and inode, obtain a locked vnode.  One reference is
1730  * obtained and passed back to the caller.
1731  */
1732 int
1733 coda_grab_vnode(vnode_t *uvp, dev_t dev, ino_t ino, vnode_t **vpp)
1734 {
1735     int           error;
1736     struct mount *mp;
1737 
1738     /* Obtain mount point structure from device. */
1739     if (!(mp = devtomp(dev))) {
1740 	myprintf(("%s: devtomp(0x%llx) returns NULL\n", __func__,
1741 	    (unsigned long long)dev));
1742 	return(ENXIO);
1743     }
1744 
1745     /*
1746      * Obtain vnode from mount point and inode.
1747      */
1748     error = VFS_VGET(mp, ino, vpp);
1749     if (error) {
1750 	myprintf(("%s: iget/vget(0x%llx, %llu) returns %p, err %d\n", __func__,
1751 	    (unsigned long long)dev, (unsigned long long)ino, *vpp, error));
1752 	return(ENOENT);
1753     }
1754     /* share the underlying vnode lock with the coda vnode */
1755     mutex_obj_hold((*vpp)->v_interlock);
1756     uvm_obj_setlock(&uvp->v_uobj, (*vpp)->v_interlock);
1757     KASSERT(VOP_ISLOCKED(*vpp));
1758     return(0);
1759 }
1760 
1761 static void
1762 coda_print_vattr(struct vattr *attr)
1763 {
1764     const char *typestr;
1765 
1766     switch (attr->va_type) {
1767     case VNON:
1768 	typestr = "VNON";
1769 	break;
1770     case VREG:
1771 	typestr = "VREG";
1772 	break;
1773     case VDIR:
1774 	typestr = "VDIR";
1775 	break;
1776     case VBLK:
1777 	typestr = "VBLK";
1778 	break;
1779     case VCHR:
1780 	typestr = "VCHR";
1781 	break;
1782     case VLNK:
1783 	typestr = "VLNK";
1784 	break;
1785     case VSOCK:
1786 	typestr = "VSCK";
1787 	break;
1788     case VFIFO:
1789 	typestr = "VFFO";
1790 	break;
1791     case VBAD:
1792 	typestr = "VBAD";
1793 	break;
1794     default:
1795 	typestr = "????";
1796 	break;
1797     }
1798 
1799 
1800     myprintf(("attr: type %s mode %d uid %d gid %d fsid %d rdev %d\n",
1801 	      typestr, (int)attr->va_mode, (int)attr->va_uid,
1802 	      (int)attr->va_gid, (int)attr->va_fsid, (int)attr->va_rdev));
1803 
1804     myprintf(("      fileid %d nlink %d size %d blocksize %d bytes %d\n",
1805 	      (int)attr->va_fileid, (int)attr->va_nlink,
1806 	      (int)attr->va_size,
1807 	      (int)attr->va_blocksize,(int)attr->va_bytes));
1808     myprintf(("      gen %ld flags %ld vaflags %d\n",
1809 	      attr->va_gen, attr->va_flags, attr->va_vaflags));
1810     myprintf(("      atime sec %d nsec %d\n",
1811 	      (int)attr->va_atime.tv_sec, (int)attr->va_atime.tv_nsec));
1812     myprintf(("      mtime sec %d nsec %d\n",
1813 	      (int)attr->va_mtime.tv_sec, (int)attr->va_mtime.tv_nsec));
1814     myprintf(("      ctime sec %d nsec %d\n",
1815 	      (int)attr->va_ctime.tv_sec, (int)attr->va_ctime.tv_nsec));
1816 }
1817 
1818 /*
1819  * Return a vnode for the given fid.
1820  * If no cnode exists for this fid create one and put it
1821  * in a table hashed by coda_f2i().  If the cnode for
1822  * this fid is already in the table return it (ref count is
1823  * incremented by coda_find.  The cnode will be flushed from the
1824  * table when coda_inactive calls coda_unsave.
1825  */
1826 struct cnode *
1827 make_coda_node(CodaFid *fid, struct mount *fvsp, short type)
1828 {
1829     struct cnode *cp;
1830     int          error;
1831 
1832     if ((cp = coda_find(fid)) == NULL) {
1833 	vnode_t *vp;
1834 
1835 	cp = coda_alloc();
1836 	cp->c_fid = *fid;
1837 
1838 	error = getnewvnode(VT_CODA, fvsp, coda_vnodeop_p, NULL, &vp);
1839 	if (error) {
1840 	    panic("%s: getnewvnode returned error %d", __func__, error);
1841 	}
1842 	vp->v_data = cp;
1843 	vp->v_type = type;
1844 	cp->c_vnode = vp;
1845 	uvm_vnp_setsize(vp, 0);
1846 	coda_save(cp);
1847 
1848     } else {
1849 	vref(CTOV(cp));
1850     }
1851 
1852     return cp;
1853 }
1854 
1855 /*
1856  * coda_getpages may be called on a vnode which has not been opened,
1857  * e.g. to fault in pages to execute a program.  In that case, we must
1858  * open the file to get the container.  The vnode may or may not be
1859  * locked, and we must leave it in the same state.
1860  */
1861 int
1862 coda_getpages(void *v)
1863 {
1864 	struct vop_getpages_args /* {
1865 		vnode_t *a_vp;
1866 		voff_t a_offset;
1867 		struct vm_page **a_m;
1868 		int *a_count;
1869 		int a_centeridx;
1870 		vm_prot_t a_access_type;
1871 		int a_advice;
1872 		int a_flags;
1873 	} */ *ap = v;
1874 	vnode_t *vp = ap->a_vp, *cvp;
1875 	struct cnode *cp = VTOC(vp);
1876 	struct lwp *l = curlwp;
1877 	kauth_cred_t cred = l->l_cred;
1878 	int error, cerror;
1879 	int waslocked;	       /* 1 if vnode lock was held on entry */
1880 	int didopen = 0;	/* 1 if we opened container file */
1881 
1882 	/*
1883 	 * Handle a case that uvm_fault doesn't quite use yet.
1884 	 * See layer_vnops.c. for inspiration.
1885 	 */
1886 	if (ap->a_flags & PGO_LOCKED) {
1887 		return EBUSY;
1888 	}
1889 
1890 	KASSERT(mutex_owned(vp->v_interlock));
1891 
1892 	/* Check for control object. */
1893 	if (IS_CTL_VP(vp)) {
1894 #ifdef CODA_VERBOSE
1895 		printf("%s: control object %p\n", __func__, vp);
1896 #endif
1897 		return(EINVAL);
1898 	}
1899 
1900 	/*
1901 	 * XXX It's really not ok to be releasing the lock we get,
1902 	 * because we could be overlapping with another call to
1903 	 * getpages and drop a lock they are relying on.  We need to
1904 	 * figure out whether getpages ever is called holding the
1905 	 * lock, and if we should serialize getpages calls by some
1906 	 * mechanism.
1907 	 */
1908 	/* XXX VOP_ISLOCKED() may not be used for lock decisions. */
1909 	waslocked = VOP_ISLOCKED(vp);
1910 
1911 	/* Get container file if not already present. */
1912 	cvp = cp->c_ovp;
1913 	if (cvp == NULL) {
1914 		/*
1915 		 * VOP_OPEN requires a locked vnode.  We must avoid
1916 		 * locking the vnode if it is already locked, and
1917 		 * leave it in the same state on exit.
1918 		 */
1919 		if (waslocked == 0) {
1920 			mutex_exit(vp->v_interlock);
1921 			cerror = vn_lock(vp, LK_EXCLUSIVE);
1922 			if (cerror) {
1923 #ifdef CODA_VERBOSE
1924 				printf("%s: can't lock vnode %p\n",
1925 				    __func__, vp);
1926 #endif
1927 				return cerror;
1928 			}
1929 #ifdef CODA_VERBOSE
1930 			printf("%s: locked vnode %p\n", __func__, vp);
1931 #endif
1932 		}
1933 
1934 		/*
1935 		 * Open file (causes upcall to venus).
1936 		 * XXX Perhaps we should not fully open the file, but
1937 		 * simply obtain a container file.
1938 		 */
1939 		/* XXX Is it ok to do this while holding the mutex? */
1940 		cerror = VOP_OPEN(vp, FREAD, cred);
1941 
1942 		if (cerror) {
1943 #ifdef CODA_VERBOSE
1944 			printf("%s: cannot open vnode %p => %d\n", __func__,
1945 			    vp, cerror);
1946 #endif
1947 			if (waslocked == 0)
1948 				VOP_UNLOCK(vp);
1949 			return cerror;
1950 		}
1951 
1952 #ifdef CODA_VERBOSE
1953 		printf("%s: opened vnode %p\n", __func__, vp);
1954 #endif
1955 		cvp = cp->c_ovp;
1956 		didopen = 1;
1957 		if (waslocked == 0)
1958 			mutex_enter(vp->v_interlock);
1959 	}
1960 	KASSERT(cvp != NULL);
1961 
1962 	/* Munge the arg structure to refer to the container vnode. */
1963 	KASSERT(cvp->v_interlock == vp->v_interlock);
1964 	ap->a_vp = cp->c_ovp;
1965 
1966 	/* Finally, call getpages on it. */
1967 	error = VCALL(ap->a_vp, VOFFSET(vop_getpages), ap);
1968 
1969 	/* If we opened the vnode, we must close it. */
1970 	if (didopen) {
1971 		/*
1972 		 * VOP_CLOSE requires a locked vnode, but we are still
1973 		 * holding the lock (or riding a caller's lock).
1974 		 */
1975 		cerror = VOP_CLOSE(vp, FREAD, cred);
1976 #ifdef CODA_VERBOSE
1977 		if (cerror != 0)
1978 			/* XXX How should we handle this? */
1979 			printf("%s: closed vnode %p -> %d\n", __func__,
1980 			    vp, cerror);
1981 #endif
1982 
1983 		/* If we obtained a lock, drop it. */
1984 		if (waslocked == 0)
1985 			VOP_UNLOCK(vp);
1986 	}
1987 
1988 	return error;
1989 }
1990 
1991 /*
1992  * The protocol requires v_interlock to be held by the caller.
1993  */
1994 int
1995 coda_putpages(void *v)
1996 {
1997 	struct vop_putpages_args /* {
1998 		vnode_t *a_vp;
1999 		voff_t a_offlo;
2000 		voff_t a_offhi;
2001 		int a_flags;
2002 	} */ *ap = v;
2003 	vnode_t *vp = ap->a_vp, *cvp;
2004 	struct cnode *cp = VTOC(vp);
2005 	int error;
2006 
2007 	KASSERT(mutex_owned(vp->v_interlock));
2008 
2009 	/* Check for control object. */
2010 	if (IS_CTL_VP(vp)) {
2011 		mutex_exit(vp->v_interlock);
2012 #ifdef CODA_VERBOSE
2013 		printf("%s: control object %p\n", __func__, vp);
2014 #endif
2015 		return(EINVAL);
2016 	}
2017 
2018 	/*
2019 	 * If container object is not present, then there are no pages
2020 	 * to put; just return without error.  This happens all the
2021 	 * time, apparently during discard of a closed vnode (which
2022 	 * trivially can't have dirty pages).
2023 	 */
2024 	cvp = cp->c_ovp;
2025 	if (cvp == NULL) {
2026 		mutex_exit(vp->v_interlock);
2027 		return 0;
2028 	}
2029 
2030 	/* Munge the arg structure to refer to the container vnode. */
2031 	KASSERT(cvp->v_interlock == vp->v_interlock);
2032 	ap->a_vp = cvp;
2033 
2034 	/* Finally, call putpages on it. */
2035 	error = VCALL(ap->a_vp, VOFFSET(vop_putpages), ap);
2036 
2037 	return error;
2038 }
2039