1 /* $NetBSD: coda_vnops.c,v 1.106 2017/05/26 14:21:00 riastradh Exp $ */ 2 3 /* 4 * 5 * Coda: an Experimental Distributed File System 6 * Release 3.1 7 * 8 * Copyright (c) 1987-1998 Carnegie Mellon University 9 * All Rights Reserved 10 * 11 * Permission to use, copy, modify and distribute this software and its 12 * documentation is hereby granted, provided that both the copyright 13 * notice and this permission notice appear in all copies of the 14 * software, derivative works or modified versions, and any portions 15 * thereof, and that both notices appear in supporting documentation, and 16 * that credit is given to Carnegie Mellon University in all documents 17 * and publicity pertaining to direct or indirect use of this code or its 18 * derivatives. 19 * 20 * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS, 21 * SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS 22 * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON 23 * DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER 24 * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF 25 * ANY DERIVATIVE WORK. 26 * 27 * Carnegie Mellon encourages users of this software to return any 28 * improvements or extensions that they make, and to grant Carnegie 29 * Mellon the rights to redistribute these changes without encumbrance. 30 * 31 * @(#) coda/coda_vnops.c,v 1.1.1.1 1998/08/29 21:26:46 rvb Exp $ 32 */ 33 34 /* 35 * Mach Operating System 36 * Copyright (c) 1990 Carnegie-Mellon University 37 * Copyright (c) 1989 Carnegie-Mellon University 38 * All rights reserved. The CMU software License Agreement specifies 39 * the terms and conditions for use and redistribution. 40 */ 41 42 /* 43 * This code was written for the Coda file system at Carnegie Mellon 44 * University. Contributers include David Steere, James Kistler, and 45 * M. Satyanarayanan. 46 */ 47 48 #include <sys/cdefs.h> 49 __KERNEL_RCSID(0, "$NetBSD: coda_vnops.c,v 1.106 2017/05/26 14:21:00 riastradh Exp $"); 50 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/malloc.h> 54 #include <sys/errno.h> 55 #include <sys/acct.h> 56 #include <sys/file.h> 57 #include <sys/uio.h> 58 #include <sys/namei.h> 59 #include <sys/ioctl.h> 60 #include <sys/mount.h> 61 #include <sys/proc.h> 62 #include <sys/select.h> 63 #include <sys/vnode.h> 64 #include <sys/kauth.h> 65 66 #include <miscfs/genfs/genfs.h> 67 #include <miscfs/specfs/specdev.h> 68 69 #include <coda/coda.h> 70 #include <coda/cnode.h> 71 #include <coda/coda_vnops.h> 72 #include <coda/coda_venus.h> 73 #include <coda/coda_opstats.h> 74 #include <coda/coda_subr.h> 75 #include <coda/coda_namecache.h> 76 #include <coda/coda_pioctl.h> 77 78 /* 79 * These flags select various performance enhancements. 80 */ 81 int coda_attr_cache = 1; /* Set to cache attributes in the kernel */ 82 int coda_symlink_cache = 1; /* Set to cache symbolic link information */ 83 int coda_access_cache = 1; /* Set to handle some access checks directly */ 84 85 /* structure to keep track of vfs calls */ 86 87 struct coda_op_stats coda_vnodeopstats[CODA_VNODEOPS_SIZE]; 88 89 #define MARK_ENTRY(op) (coda_vnodeopstats[op].entries++) 90 #define MARK_INT_SAT(op) (coda_vnodeopstats[op].sat_intrn++) 91 #define MARK_INT_FAIL(op) (coda_vnodeopstats[op].unsat_intrn++) 92 #define MARK_INT_GEN(op) (coda_vnodeopstats[op].gen_intrn++) 93 94 /* What we are delaying for in printf */ 95 static int coda_lockdebug = 0; 96 97 #define ENTRY if(coda_vnop_print_entry) myprintf(("Entered %s\n",__func__)) 98 99 /* Definition of the vnode operation vector */ 100 101 const struct vnodeopv_entry_desc coda_vnodeop_entries[] = { 102 { &vop_default_desc, coda_vop_error }, 103 { &vop_lookup_desc, coda_lookup }, /* lookup */ 104 { &vop_create_desc, coda_create }, /* create */ 105 { &vop_mknod_desc, coda_vop_error }, /* mknod */ 106 { &vop_open_desc, coda_open }, /* open */ 107 { &vop_close_desc, coda_close }, /* close */ 108 { &vop_access_desc, coda_access }, /* access */ 109 { &vop_getattr_desc, coda_getattr }, /* getattr */ 110 { &vop_setattr_desc, coda_setattr }, /* setattr */ 111 { &vop_read_desc, coda_read }, /* read */ 112 { &vop_write_desc, coda_write }, /* write */ 113 { &vop_fallocate_desc, genfs_eopnotsupp }, /* fallocate */ 114 { &vop_fdiscard_desc, genfs_eopnotsupp }, /* fdiscard */ 115 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */ 116 { &vop_ioctl_desc, coda_ioctl }, /* ioctl */ 117 { &vop_mmap_desc, genfs_mmap }, /* mmap */ 118 { &vop_fsync_desc, coda_fsync }, /* fsync */ 119 { &vop_remove_desc, coda_remove }, /* remove */ 120 { &vop_link_desc, coda_link }, /* link */ 121 { &vop_rename_desc, coda_rename }, /* rename */ 122 { &vop_mkdir_desc, coda_mkdir }, /* mkdir */ 123 { &vop_rmdir_desc, coda_rmdir }, /* rmdir */ 124 { &vop_symlink_desc, coda_symlink }, /* symlink */ 125 { &vop_readdir_desc, coda_readdir }, /* readdir */ 126 { &vop_readlink_desc, coda_readlink }, /* readlink */ 127 { &vop_abortop_desc, coda_abortop }, /* abortop */ 128 { &vop_inactive_desc, coda_inactive }, /* inactive */ 129 { &vop_reclaim_desc, coda_reclaim }, /* reclaim */ 130 { &vop_lock_desc, coda_lock }, /* lock */ 131 { &vop_unlock_desc, coda_unlock }, /* unlock */ 132 { &vop_bmap_desc, coda_bmap }, /* bmap */ 133 { &vop_strategy_desc, coda_strategy }, /* strategy */ 134 { &vop_print_desc, coda_vop_error }, /* print */ 135 { &vop_islocked_desc, coda_islocked }, /* islocked */ 136 { &vop_pathconf_desc, coda_vop_error }, /* pathconf */ 137 { &vop_advlock_desc, coda_vop_nop }, /* advlock */ 138 { &vop_bwrite_desc, coda_vop_error }, /* bwrite */ 139 { &vop_seek_desc, genfs_seek }, /* seek */ 140 { &vop_poll_desc, genfs_poll }, /* poll */ 141 { &vop_getpages_desc, coda_getpages }, /* getpages */ 142 { &vop_putpages_desc, coda_putpages }, /* putpages */ 143 { NULL, NULL } 144 }; 145 146 static void coda_print_vattr(struct vattr *); 147 148 int (**coda_vnodeop_p)(void *); 149 const struct vnodeopv_desc coda_vnodeop_opv_desc = 150 { &coda_vnodeop_p, coda_vnodeop_entries }; 151 152 /* Definitions of NetBSD vnodeop interfaces */ 153 154 /* 155 * A generic error routine. Return EIO without looking at arguments. 156 */ 157 int 158 coda_vop_error(void *anon) { 159 struct vnodeop_desc **desc = (struct vnodeop_desc **)anon; 160 161 if (codadebug) { 162 myprintf(("%s: Vnode operation %s called (error).\n", 163 __func__, (*desc)->vdesc_name)); 164 } 165 166 return EIO; 167 } 168 169 /* A generic do-nothing. */ 170 int 171 coda_vop_nop(void *anon) { 172 struct vnodeop_desc **desc = (struct vnodeop_desc **)anon; 173 174 if (codadebug) { 175 myprintf(("Vnode operation %s called, but unsupported\n", 176 (*desc)->vdesc_name)); 177 } 178 return (0); 179 } 180 181 int 182 coda_vnodeopstats_init(void) 183 { 184 int i; 185 186 for(i=0;i<CODA_VNODEOPS_SIZE;i++) { 187 coda_vnodeopstats[i].opcode = i; 188 coda_vnodeopstats[i].entries = 0; 189 coda_vnodeopstats[i].sat_intrn = 0; 190 coda_vnodeopstats[i].unsat_intrn = 0; 191 coda_vnodeopstats[i].gen_intrn = 0; 192 } 193 194 return 0; 195 } 196 197 /* 198 * XXX The entire relationship between VOP_OPEN and having a container 199 * file (via venus_open) needs to be reexamined. In particular, it's 200 * valid to open/mmap/close and then reference. Instead of doing 201 * VOP_OPEN when getpages needs a container, we should do the 202 * venus_open part, and record that the vnode has opened the container 203 * for getpages, and do the matching logical close on coda_inactive. 204 * Further, coda_rdwr needs a container file, and sometimes needs to 205 * do the equivalent of open (core dumps). 206 */ 207 /* 208 * coda_open calls Venus to return the device and inode of the 209 * container file, and then obtains a vnode for that file. The 210 * container vnode is stored in the coda vnode, and a reference is 211 * added for each open file. 212 */ 213 int 214 coda_open(void *v) 215 { 216 /* 217 * NetBSD can pass the O_EXCL flag in mode, even though the check 218 * has already happened. Venus defensively assumes that if open 219 * is passed the EXCL, it must be a bug. We strip the flag here. 220 */ 221 /* true args */ 222 struct vop_open_args *ap = v; 223 vnode_t *vp = ap->a_vp; 224 struct cnode *cp = VTOC(vp); 225 int flag = ap->a_mode & (~O_EXCL); 226 kauth_cred_t cred = ap->a_cred; 227 /* locals */ 228 int error; 229 dev_t dev; /* container file device, inode, vnode */ 230 ino_t inode; 231 vnode_t *container_vp; 232 233 MARK_ENTRY(CODA_OPEN_STATS); 234 235 KASSERT(VOP_ISLOCKED(vp)); 236 /* Check for open of control file. */ 237 if (IS_CTL_VP(vp)) { 238 /* if (WRITABLE(flag)) */ 239 if (flag & (FWRITE | O_TRUNC | O_CREAT | O_EXCL)) { 240 MARK_INT_FAIL(CODA_OPEN_STATS); 241 return(EACCES); 242 } 243 MARK_INT_SAT(CODA_OPEN_STATS); 244 return(0); 245 } 246 247 error = venus_open(vtomi(vp), &cp->c_fid, flag, cred, curlwp, &dev, &inode); 248 if (error) 249 return (error); 250 if (!error) { 251 CODADEBUG(CODA_OPEN, myprintf(( 252 "%s: dev 0x%llx inode %llu result %d\n", __func__, 253 (unsigned long long)dev, (unsigned long long)inode, error));) 254 } 255 256 /* 257 * Obtain locked and referenced container vnode from container 258 * device/inode. 259 */ 260 error = coda_grab_vnode(vp, dev, inode, &container_vp); 261 if (error) 262 return (error); 263 264 /* Save the vnode pointer for the container file. */ 265 if (cp->c_ovp == NULL) { 266 cp->c_ovp = container_vp; 267 } else { 268 if (cp->c_ovp != container_vp) 269 /* 270 * Perhaps venus returned a different container, or 271 * something else went wrong. 272 */ 273 panic("%s: cp->c_ovp != container_vp", __func__); 274 } 275 cp->c_ocount++; 276 277 /* Flush the attribute cache if writing the file. */ 278 if (flag & FWRITE) { 279 cp->c_owrite++; 280 cp->c_flags &= ~C_VATTR; 281 } 282 283 /* 284 * Save the <device, inode> pair for the container file to speed 285 * up subsequent reads while closed (mmap, program execution). 286 * This is perhaps safe because venus will invalidate the node 287 * before changing the container file mapping. 288 */ 289 cp->c_device = dev; 290 cp->c_inode = inode; 291 292 /* Open the container file. */ 293 error = VOP_OPEN(container_vp, flag, cred); 294 /* 295 * Drop the lock on the container, after we have done VOP_OPEN 296 * (which requires a locked vnode). 297 */ 298 VOP_UNLOCK(container_vp); 299 return(error); 300 } 301 302 /* 303 * Close the cache file used for I/O and notify Venus. 304 */ 305 int 306 coda_close(void *v) 307 { 308 /* true args */ 309 struct vop_close_args *ap = v; 310 vnode_t *vp = ap->a_vp; 311 struct cnode *cp = VTOC(vp); 312 int flag = ap->a_fflag; 313 kauth_cred_t cred = ap->a_cred; 314 /* locals */ 315 int error; 316 317 MARK_ENTRY(CODA_CLOSE_STATS); 318 319 /* Check for close of control file. */ 320 if (IS_CTL_VP(vp)) { 321 MARK_INT_SAT(CODA_CLOSE_STATS); 322 return(0); 323 } 324 325 /* 326 * XXX The IS_UNMOUNTING part of this is very suspect. 327 */ 328 if (IS_UNMOUNTING(cp)) { 329 if (cp->c_ovp) { 330 #ifdef CODA_VERBOSE 331 printf("%s: destroying container %d, ufs vp %p of vp %p/cp %p\n", 332 __func__, vp->v_usecount, cp->c_ovp, vp, cp); 333 #endif 334 #ifdef hmm 335 vgone(cp->c_ovp); 336 #else 337 vn_lock(cp->c_ovp, LK_EXCLUSIVE | LK_RETRY); 338 VOP_CLOSE(cp->c_ovp, flag, cred); /* Do errors matter here? */ 339 vput(cp->c_ovp); 340 #endif 341 } else { 342 #ifdef CODA_VERBOSE 343 printf("%s: NO container vp %p/cp %p\n", __func__, vp, cp); 344 #endif 345 } 346 return ENODEV; 347 } 348 349 /* Lock the container node, and VOP_CLOSE it. */ 350 vn_lock(cp->c_ovp, LK_EXCLUSIVE | LK_RETRY); 351 VOP_CLOSE(cp->c_ovp, flag, cred); /* Do errors matter here? */ 352 /* 353 * Drop the lock we just obtained, and vrele the container vnode. 354 * Decrement reference counts, and clear container vnode pointer on 355 * last close. 356 */ 357 vput(cp->c_ovp); 358 if (flag & FWRITE) 359 --cp->c_owrite; 360 if (--cp->c_ocount == 0) 361 cp->c_ovp = NULL; 362 363 error = venus_close(vtomi(vp), &cp->c_fid, flag, cred, curlwp); 364 365 CODADEBUG(CODA_CLOSE, myprintf(("%s: result %d\n", __func__, error)); ) 366 return(error); 367 } 368 369 int 370 coda_read(void *v) 371 { 372 struct vop_read_args *ap = v; 373 374 ENTRY; 375 return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_READ, 376 ap->a_ioflag, ap->a_cred, curlwp)); 377 } 378 379 int 380 coda_write(void *v) 381 { 382 struct vop_write_args *ap = v; 383 384 ENTRY; 385 return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_WRITE, 386 ap->a_ioflag, ap->a_cred, curlwp)); 387 } 388 389 int 390 coda_rdwr(vnode_t *vp, struct uio *uiop, enum uio_rw rw, int ioflag, 391 kauth_cred_t cred, struct lwp *l) 392 { 393 /* upcall decl */ 394 /* NOTE: container file operation!!! */ 395 /* locals */ 396 struct cnode *cp = VTOC(vp); 397 vnode_t *cfvp = cp->c_ovp; 398 struct proc *p = l->l_proc; 399 int opened_internally = 0; 400 int error = 0; 401 402 MARK_ENTRY(CODA_RDWR_STATS); 403 404 CODADEBUG(CODA_RDWR, myprintf(("coda_rdwr(%d, %p, %lu, %lld)\n", rw, 405 uiop->uio_iov->iov_base, (unsigned long) uiop->uio_resid, 406 (long long) uiop->uio_offset)); ) 407 408 /* Check for rdwr of control object. */ 409 if (IS_CTL_VP(vp)) { 410 MARK_INT_FAIL(CODA_RDWR_STATS); 411 return(EINVAL); 412 } 413 414 /* Redirect the request to UFS. */ 415 416 /* 417 * If file is not already open this must be a page 418 * {read,write} request. Iget the cache file's inode 419 * pointer if we still have its <device, inode> pair. 420 * Otherwise, we must do an internal open to derive the 421 * pair. 422 * XXX Integrate this into a coherent strategy for container 423 * file acquisition. 424 */ 425 if (cfvp == NULL) { 426 /* 427 * If we're dumping core, do the internal open. Otherwise 428 * venus won't have the correct size of the core when 429 * it's completely written. 430 */ 431 if (cp->c_inode != 0 && !(p && (p->p_acflag & ACORE))) { 432 #ifdef CODA_VERBOSE 433 printf("%s: grabbing container vnode, losing reference\n", 434 __func__); 435 #endif 436 /* Get locked and refed vnode. */ 437 error = coda_grab_vnode(vp, cp->c_device, cp->c_inode, &cfvp); 438 if (error) { 439 MARK_INT_FAIL(CODA_RDWR_STATS); 440 return(error); 441 } 442 /* 443 * Drop lock. 444 * XXX Where is reference released. 445 */ 446 VOP_UNLOCK(cfvp); 447 } 448 else { 449 #ifdef CODA_VERBOSE 450 printf("%s: internal VOP_OPEN\n", __func__); 451 #endif 452 opened_internally = 1; 453 MARK_INT_GEN(CODA_OPEN_STATS); 454 error = VOP_OPEN(vp, (rw == UIO_READ ? FREAD : FWRITE), cred); 455 #ifdef CODA_VERBOSE 456 printf("%s: Internally Opening %p\n", __func__, vp); 457 #endif 458 if (error) { 459 MARK_INT_FAIL(CODA_RDWR_STATS); 460 return(error); 461 } 462 cfvp = cp->c_ovp; 463 } 464 } 465 466 /* Have UFS handle the call. */ 467 CODADEBUG(CODA_RDWR, myprintf(("%s: fid = %s, refcnt = %d\n", __func__, 468 coda_f2s(&cp->c_fid), CTOV(cp)->v_usecount)); ) 469 470 if (rw == UIO_READ) { 471 error = VOP_READ(cfvp, uiop, ioflag, cred); 472 } else { 473 error = VOP_WRITE(cfvp, uiop, ioflag, cred); 474 } 475 476 if (error) 477 MARK_INT_FAIL(CODA_RDWR_STATS); 478 else 479 MARK_INT_SAT(CODA_RDWR_STATS); 480 481 /* Do an internal close if necessary. */ 482 if (opened_internally) { 483 MARK_INT_GEN(CODA_CLOSE_STATS); 484 (void)VOP_CLOSE(vp, (rw == UIO_READ ? FREAD : FWRITE), cred); 485 } 486 487 /* Invalidate cached attributes if writing. */ 488 if (rw == UIO_WRITE) 489 cp->c_flags &= ~C_VATTR; 490 return(error); 491 } 492 493 int 494 coda_ioctl(void *v) 495 { 496 /* true args */ 497 struct vop_ioctl_args *ap = v; 498 vnode_t *vp = ap->a_vp; 499 int com = ap->a_command; 500 void *data = ap->a_data; 501 int flag = ap->a_fflag; 502 kauth_cred_t cred = ap->a_cred; 503 /* locals */ 504 int error; 505 vnode_t *tvp; 506 struct PioctlData *iap = (struct PioctlData *)data; 507 namei_simple_flags_t sflags; 508 509 MARK_ENTRY(CODA_IOCTL_STATS); 510 511 CODADEBUG(CODA_IOCTL, myprintf(("in coda_ioctl on %s\n", iap->path));) 512 513 /* Don't check for operation on a dying object, for ctlvp it 514 shouldn't matter */ 515 516 /* Must be control object to succeed. */ 517 if (!IS_CTL_VP(vp)) { 518 MARK_INT_FAIL(CODA_IOCTL_STATS); 519 CODADEBUG(CODA_IOCTL, myprintf(("%s error: vp != ctlvp", __func__));) 520 return (EOPNOTSUPP); 521 } 522 /* Look up the pathname. */ 523 524 /* Should we use the name cache here? It would get it from 525 lookupname sooner or later anyway, right? */ 526 527 sflags = iap->follow ? NSM_FOLLOW_NOEMULROOT : NSM_NOFOLLOW_NOEMULROOT; 528 error = namei_simple_user(iap->path, sflags, &tvp); 529 530 if (error) { 531 MARK_INT_FAIL(CODA_IOCTL_STATS); 532 CODADEBUG(CODA_IOCTL, myprintf(("%s error: lookup returns %d\n", 533 __func__, error));) 534 return(error); 535 } 536 537 /* 538 * Make sure this is a coda style cnode, but it may be a 539 * different vfsp 540 */ 541 /* XXX: this totally violates the comment about vtagtype in vnode.h */ 542 if (tvp->v_tag != VT_CODA) { 543 vrele(tvp); 544 MARK_INT_FAIL(CODA_IOCTL_STATS); 545 CODADEBUG(CODA_IOCTL, myprintf(("%s error: %s not a coda object\n", 546 __func__, iap->path));) 547 return(EINVAL); 548 } 549 550 if (iap->vi.in_size > VC_MAXDATASIZE || iap->vi.out_size > VC_MAXDATASIZE) { 551 vrele(tvp); 552 return(EINVAL); 553 } 554 error = venus_ioctl(vtomi(tvp), &((VTOC(tvp))->c_fid), com, flag, data, 555 cred, curlwp); 556 557 if (error) 558 MARK_INT_FAIL(CODA_IOCTL_STATS); 559 else 560 CODADEBUG(CODA_IOCTL, myprintf(("Ioctl returns %d \n", error)); ) 561 562 vrele(tvp); 563 return(error); 564 } 565 566 /* 567 * To reduce the cost of a user-level venus;we cache attributes in 568 * the kernel. Each cnode has storage allocated for an attribute. If 569 * c_vattr is valid, return a reference to it. Otherwise, get the 570 * attributes from venus and store them in the cnode. There is some 571 * question if this method is a security leak. But I think that in 572 * order to make this call, the user must have done a lookup and 573 * opened the file, and therefore should already have access. 574 */ 575 int 576 coda_getattr(void *v) 577 { 578 /* true args */ 579 struct vop_getattr_args *ap = v; 580 vnode_t *vp = ap->a_vp; 581 struct cnode *cp = VTOC(vp); 582 struct vattr *vap = ap->a_vap; 583 kauth_cred_t cred = ap->a_cred; 584 /* locals */ 585 int error; 586 587 MARK_ENTRY(CODA_GETATTR_STATS); 588 589 /* Check for getattr of control object. */ 590 if (IS_CTL_VP(vp)) { 591 MARK_INT_FAIL(CODA_GETATTR_STATS); 592 return(ENOENT); 593 } 594 595 /* Check to see if the attributes have already been cached */ 596 if (VALID_VATTR(cp)) { 597 CODADEBUG(CODA_GETATTR, { myprintf(("%s: attr cache hit: %s\n", 598 __func__, coda_f2s(&cp->c_fid)));}) 599 CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR)) 600 coda_print_vattr(&cp->c_vattr); ) 601 602 *vap = cp->c_vattr; 603 MARK_INT_SAT(CODA_GETATTR_STATS); 604 return(0); 605 } 606 607 error = venus_getattr(vtomi(vp), &cp->c_fid, cred, curlwp, vap); 608 609 if (!error) { 610 CODADEBUG(CODA_GETATTR, myprintf(("%s miss %s: result %d\n", 611 __func__, coda_f2s(&cp->c_fid), error)); ) 612 613 CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR)) 614 coda_print_vattr(vap); ) 615 616 /* If not open for write, store attributes in cnode */ 617 if ((cp->c_owrite == 0) && (coda_attr_cache)) { 618 cp->c_vattr = *vap; 619 cp->c_flags |= C_VATTR; 620 } 621 622 } 623 return(error); 624 } 625 626 int 627 coda_setattr(void *v) 628 { 629 /* true args */ 630 struct vop_setattr_args *ap = v; 631 vnode_t *vp = ap->a_vp; 632 struct cnode *cp = VTOC(vp); 633 struct vattr *vap = ap->a_vap; 634 kauth_cred_t cred = ap->a_cred; 635 /* locals */ 636 int error; 637 638 MARK_ENTRY(CODA_SETATTR_STATS); 639 640 /* Check for setattr of control object. */ 641 if (IS_CTL_VP(vp)) { 642 MARK_INT_FAIL(CODA_SETATTR_STATS); 643 return(ENOENT); 644 } 645 646 if (codadebug & CODADBGMSK(CODA_SETATTR)) { 647 coda_print_vattr(vap); 648 } 649 error = venus_setattr(vtomi(vp), &cp->c_fid, vap, cred, curlwp); 650 651 if (!error) 652 cp->c_flags &= ~C_VATTR; 653 654 CODADEBUG(CODA_SETATTR, myprintf(("setattr %d\n", error)); ) 655 return(error); 656 } 657 658 int 659 coda_access(void *v) 660 { 661 /* true args */ 662 struct vop_access_args *ap = v; 663 vnode_t *vp = ap->a_vp; 664 struct cnode *cp = VTOC(vp); 665 int mode = ap->a_mode; 666 kauth_cred_t cred = ap->a_cred; 667 /* locals */ 668 int error; 669 670 MARK_ENTRY(CODA_ACCESS_STATS); 671 672 /* Check for access of control object. Only read access is 673 allowed on it. */ 674 if (IS_CTL_VP(vp)) { 675 /* bogus hack - all will be marked as successes */ 676 MARK_INT_SAT(CODA_ACCESS_STATS); 677 return(((mode & VREAD) && !(mode & (VWRITE | VEXEC))) 678 ? 0 : EACCES); 679 } 680 681 /* 682 * if the file is a directory, and we are checking exec (eg lookup) 683 * access, and the file is in the namecache, then the user must have 684 * lookup access to it. 685 */ 686 if (coda_access_cache) { 687 if ((vp->v_type == VDIR) && (mode & VEXEC)) { 688 if (coda_nc_lookup(cp, ".", 1, cred)) { 689 MARK_INT_SAT(CODA_ACCESS_STATS); 690 return(0); /* it was in the cache */ 691 } 692 } 693 } 694 695 error = venus_access(vtomi(vp), &cp->c_fid, mode, cred, curlwp); 696 697 return(error); 698 } 699 700 /* 701 * CODA abort op, called after namei() when a CREATE/DELETE isn't actually 702 * done. If a buffer has been saved in anticipation of a coda_create or 703 * a coda_remove, delete it. 704 */ 705 /* ARGSUSED */ 706 int 707 coda_abortop(void *v) 708 { 709 /* true args */ 710 struct vop_abortop_args /* { 711 vnode_t *a_dvp; 712 struct componentname *a_cnp; 713 } */ *ap = v; 714 715 (void)ap; 716 /* upcall decl */ 717 /* locals */ 718 719 return (0); 720 } 721 722 int 723 coda_readlink(void *v) 724 { 725 /* true args */ 726 struct vop_readlink_args *ap = v; 727 vnode_t *vp = ap->a_vp; 728 struct cnode *cp = VTOC(vp); 729 struct uio *uiop = ap->a_uio; 730 kauth_cred_t cred = ap->a_cred; 731 /* locals */ 732 struct lwp *l = curlwp; 733 int error; 734 char *str; 735 int len; 736 737 MARK_ENTRY(CODA_READLINK_STATS); 738 739 /* Check for readlink of control object. */ 740 if (IS_CTL_VP(vp)) { 741 MARK_INT_FAIL(CODA_READLINK_STATS); 742 return(ENOENT); 743 } 744 745 if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) { /* symlink was cached */ 746 uiop->uio_rw = UIO_READ; 747 error = uiomove(cp->c_symlink, (int)cp->c_symlen, uiop); 748 if (error) 749 MARK_INT_FAIL(CODA_READLINK_STATS); 750 else 751 MARK_INT_SAT(CODA_READLINK_STATS); 752 return(error); 753 } 754 755 error = venus_readlink(vtomi(vp), &cp->c_fid, cred, l, &str, &len); 756 757 if (!error) { 758 uiop->uio_rw = UIO_READ; 759 error = uiomove(str, len, uiop); 760 761 if (coda_symlink_cache) { 762 cp->c_symlink = str; 763 cp->c_symlen = len; 764 cp->c_flags |= C_SYMLINK; 765 } else 766 CODA_FREE(str, len); 767 } 768 769 CODADEBUG(CODA_READLINK, myprintf(("in readlink result %d\n",error));) 770 return(error); 771 } 772 773 int 774 coda_fsync(void *v) 775 { 776 /* true args */ 777 struct vop_fsync_args *ap = v; 778 vnode_t *vp = ap->a_vp; 779 struct cnode *cp = VTOC(vp); 780 kauth_cred_t cred = ap->a_cred; 781 /* locals */ 782 vnode_t *convp = cp->c_ovp; 783 int error; 784 785 MARK_ENTRY(CODA_FSYNC_STATS); 786 787 /* Check for fsync on an unmounting object */ 788 /* The NetBSD kernel, in its infinite wisdom, can try to fsync 789 * after an unmount has been initiated. This is a Bad Thing, 790 * which we have to avoid. Not a legitimate failure for stats. 791 */ 792 if (IS_UNMOUNTING(cp)) { 793 return(ENODEV); 794 } 795 796 /* Check for fsync of control object or unitialized cnode. */ 797 if (IS_CTL_VP(vp) || vp->v_type == VNON) { 798 MARK_INT_SAT(CODA_FSYNC_STATS); 799 return(0); 800 } 801 802 if (convp) 803 VOP_FSYNC(convp, cred, MNT_WAIT, 0, 0); 804 805 /* 806 * We can expect fsync on any vnode at all if venus is pruging it. 807 * Venus can't very well answer the fsync request, now can it? 808 * Hopefully, it won't have to, because hopefully, venus preserves 809 * the (possibly untrue) invariant that it never purges an open 810 * vnode. Hopefully. 811 */ 812 if (cp->c_flags & C_PURGING) { 813 return(0); 814 } 815 816 error = venus_fsync(vtomi(vp), &cp->c_fid, cred, curlwp); 817 818 CODADEBUG(CODA_FSYNC, myprintf(("in fsync result %d\n",error)); ) 819 return(error); 820 } 821 822 /* 823 * vp is locked on entry, and we must unlock it. 824 * XXX This routine is suspect and probably needs rewriting. 825 */ 826 int 827 coda_inactive(void *v) 828 { 829 /* true args */ 830 struct vop_inactive_v2_args *ap = v; 831 vnode_t *vp = ap->a_vp; 832 struct cnode *cp = VTOC(vp); 833 kauth_cred_t cred __unused = NULL; 834 835 /* We don't need to send inactive to venus - DCS */ 836 MARK_ENTRY(CODA_INACTIVE_STATS); 837 838 if (IS_CTL_VP(vp)) { 839 MARK_INT_SAT(CODA_INACTIVE_STATS); 840 return 0; 841 } 842 843 CODADEBUG(CODA_INACTIVE, myprintf(("in inactive, %s, vfsp %p\n", 844 coda_f2s(&cp->c_fid), vp->v_mount));) 845 846 if (vp->v_mount->mnt_data == NULL) { 847 myprintf(("Help! vfsp->vfs_data was NULL, but vnode %p wasn't dying\n", vp)); 848 panic("badness in coda_inactive"); 849 } 850 851 #ifdef CODA_VERBOSE 852 /* Sanity checks that perhaps should be panic. */ 853 if (vp->v_usecount > 1) 854 printf("%s: %p usecount %d\n", __func__, vp, vp->v_usecount); 855 if (cp->c_ovp != NULL) 856 printf("%s: %p ovp != NULL\n", __func__, vp); 857 #endif 858 /* XXX Do we need to VOP_CLOSE container vnodes? */ 859 if (!IS_UNMOUNTING(cp)) 860 *ap->a_recycle = true; 861 862 MARK_INT_SAT(CODA_INACTIVE_STATS); 863 return(0); 864 } 865 866 /* 867 * Coda does not use the normal namecache, but a private version. 868 * Consider how to use the standard facility instead. 869 */ 870 int 871 coda_lookup(void *v) 872 { 873 /* true args */ 874 struct vop_lookup_v2_args *ap = v; 875 /* (locked) vnode of dir in which to do lookup */ 876 vnode_t *dvp = ap->a_dvp; 877 struct cnode *dcp = VTOC(dvp); 878 /* output variable for result */ 879 vnode_t **vpp = ap->a_vpp; 880 /* name to lookup */ 881 struct componentname *cnp = ap->a_cnp; 882 kauth_cred_t cred = cnp->cn_cred; 883 struct lwp *l = curlwp; 884 /* locals */ 885 struct cnode *cp; 886 const char *nm = cnp->cn_nameptr; 887 int len = cnp->cn_namelen; 888 CodaFid VFid; 889 int vtype; 890 int error = 0; 891 892 MARK_ENTRY(CODA_LOOKUP_STATS); 893 894 CODADEBUG(CODA_LOOKUP, myprintf(("%s: %s in %s\n", __func__, 895 nm, coda_f2s(&dcp->c_fid)));) 896 897 /* 898 * XXX componentname flags in MODMASK are not handled at all 899 */ 900 901 /* 902 * The overall strategy is to switch on the lookup type and get a 903 * result vnode that is vref'd but not locked. 904 */ 905 906 /* Check for lookup of control object. */ 907 if (IS_CTL_NAME(dvp, nm, len)) { 908 *vpp = coda_ctlvp; 909 vref(*vpp); 910 MARK_INT_SAT(CODA_LOOKUP_STATS); 911 goto exit; 912 } 913 914 /* Avoid trying to hand venus an unreasonably long name. */ 915 if (len+1 > CODA_MAXNAMLEN) { 916 MARK_INT_FAIL(CODA_LOOKUP_STATS); 917 CODADEBUG(CODA_LOOKUP, myprintf(("%s: name too long:, %s (%s)\n", 918 __func__, coda_f2s(&dcp->c_fid), nm));) 919 *vpp = (vnode_t *)0; 920 error = EINVAL; 921 goto exit; 922 } 923 924 /* 925 * Try to resolve the lookup in the minicache. If that fails, ask 926 * venus to do the lookup. XXX The interaction between vnode 927 * locking and any locking that coda does is not clear. 928 */ 929 cp = coda_nc_lookup(dcp, nm, len, cred); 930 if (cp) { 931 *vpp = CTOV(cp); 932 vref(*vpp); 933 CODADEBUG(CODA_LOOKUP, 934 myprintf(("lookup result %d vpp %p\n",error,*vpp));) 935 } else { 936 /* The name wasn't cached, so ask Venus. */ 937 error = venus_lookup(vtomi(dvp), &dcp->c_fid, nm, len, cred, l, &VFid, 938 &vtype); 939 940 if (error) { 941 MARK_INT_FAIL(CODA_LOOKUP_STATS); 942 CODADEBUG(CODA_LOOKUP, myprintf(("%s: lookup error on %s (%s)%d\n", 943 __func__, coda_f2s(&dcp->c_fid), nm, error));) 944 *vpp = (vnode_t *)0; 945 } else { 946 MARK_INT_SAT(CODA_LOOKUP_STATS); 947 CODADEBUG(CODA_LOOKUP, myprintf(("%s: %s type %o result %d\n", 948 __func__, coda_f2s(&VFid), vtype, error)); ) 949 950 cp = make_coda_node(&VFid, dvp->v_mount, vtype); 951 *vpp = CTOV(cp); 952 /* vpp is now vrefed. */ 953 954 /* 955 * Unless this vnode is marked CODA_NOCACHE, enter it into 956 * the coda name cache to avoid a future venus round-trip. 957 * XXX Interaction with componentname NOCACHE is unclear. 958 */ 959 if (!(vtype & CODA_NOCACHE)) 960 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp)); 961 } 962 } 963 964 exit: 965 /* 966 * If we are creating, and this was the last name to be looked up, 967 * and the error was ENOENT, then make the leaf NULL and return 968 * success. 969 * XXX Check against new lookup rules. 970 */ 971 if (((cnp->cn_nameiop == CREATE) || (cnp->cn_nameiop == RENAME)) 972 && (cnp->cn_flags & ISLASTCN) 973 && (error == ENOENT)) 974 { 975 error = EJUSTRETURN; 976 *ap->a_vpp = NULL; 977 } 978 979 return(error); 980 } 981 982 /*ARGSUSED*/ 983 int 984 coda_create(void *v) 985 { 986 /* true args */ 987 struct vop_create_v3_args *ap = v; 988 vnode_t *dvp = ap->a_dvp; 989 struct cnode *dcp = VTOC(dvp); 990 struct vattr *va = ap->a_vap; 991 int exclusive = 1; 992 int mode = ap->a_vap->va_mode; 993 vnode_t **vpp = ap->a_vpp; 994 struct componentname *cnp = ap->a_cnp; 995 kauth_cred_t cred = cnp->cn_cred; 996 struct lwp *l = curlwp; 997 /* locals */ 998 int error; 999 struct cnode *cp; 1000 const char *nm = cnp->cn_nameptr; 1001 int len = cnp->cn_namelen; 1002 CodaFid VFid; 1003 struct vattr attr; 1004 1005 MARK_ENTRY(CODA_CREATE_STATS); 1006 1007 /* All creates are exclusive XXX */ 1008 /* I'm assuming the 'mode' argument is the file mode bits XXX */ 1009 1010 /* Check for create of control object. */ 1011 if (IS_CTL_NAME(dvp, nm, len)) { 1012 *vpp = (vnode_t *)0; 1013 MARK_INT_FAIL(CODA_CREATE_STATS); 1014 return(EACCES); 1015 } 1016 1017 error = venus_create(vtomi(dvp), &dcp->c_fid, nm, len, exclusive, mode, va, cred, l, &VFid, &attr); 1018 1019 if (!error) { 1020 1021 /* 1022 * XXX Violation of venus/kernel invariants is a difficult case, 1023 * but venus should not be able to cause a panic. 1024 */ 1025 /* If this is an exclusive create, panic if the file already exists. */ 1026 /* Venus should have detected the file and reported EEXIST. */ 1027 1028 if ((exclusive == 1) && 1029 (coda_find(&VFid) != NULL)) 1030 panic("cnode existed for newly created file!"); 1031 1032 cp = make_coda_node(&VFid, dvp->v_mount, attr.va_type); 1033 *vpp = CTOV(cp); 1034 1035 /* XXX vnodeops doesn't say this argument can be changed. */ 1036 /* Update va to reflect the new attributes. */ 1037 (*va) = attr; 1038 1039 /* Update the attribute cache and mark it as valid */ 1040 if (coda_attr_cache) { 1041 VTOC(*vpp)->c_vattr = attr; 1042 VTOC(*vpp)->c_flags |= C_VATTR; 1043 } 1044 1045 /* Invalidate parent's attr cache (modification time has changed). */ 1046 VTOC(dvp)->c_flags &= ~C_VATTR; 1047 1048 /* enter the new vnode in the Name Cache */ 1049 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp)); 1050 1051 CODADEBUG(CODA_CREATE, myprintf(("%s: %s, result %d\n", __func__, 1052 coda_f2s(&VFid), error)); ) 1053 } else { 1054 *vpp = (vnode_t *)0; 1055 CODADEBUG(CODA_CREATE, myprintf(("%s: create error %d\n", __func__, 1056 error));) 1057 } 1058 1059 if (!error) { 1060 #ifdef CODA_VERBOSE 1061 if ((cnp->cn_flags & LOCKLEAF) == 0) 1062 /* This should not happen; flags are for lookup only. */ 1063 printf("%s: LOCKLEAF not set!\n", __func__); 1064 #endif 1065 } 1066 1067 return(error); 1068 } 1069 1070 int 1071 coda_remove(void *v) 1072 { 1073 /* true args */ 1074 struct vop_remove_v2_args *ap = v; 1075 vnode_t *dvp = ap->a_dvp; 1076 struct cnode *cp = VTOC(dvp); 1077 vnode_t *vp = ap->a_vp; 1078 struct componentname *cnp = ap->a_cnp; 1079 kauth_cred_t cred = cnp->cn_cred; 1080 struct lwp *l = curlwp; 1081 /* locals */ 1082 int error; 1083 const char *nm = cnp->cn_nameptr; 1084 int len = cnp->cn_namelen; 1085 struct cnode *tp; 1086 1087 MARK_ENTRY(CODA_REMOVE_STATS); 1088 1089 CODADEBUG(CODA_REMOVE, myprintf(("%s: %s in %s\n", __func__, 1090 nm, coda_f2s(&cp->c_fid)));) 1091 1092 /* Remove the file's entry from the CODA Name Cache */ 1093 /* We're being conservative here, it might be that this person 1094 * doesn't really have sufficient access to delete the file 1095 * but we feel zapping the entry won't really hurt anyone -- dcs 1096 */ 1097 /* I'm gonna go out on a limb here. If a file and a hardlink to it 1098 * exist, and one is removed, the link count on the other will be 1099 * off by 1. We could either invalidate the attrs if cached, or 1100 * fix them. I'll try to fix them. DCS 11/8/94 1101 */ 1102 tp = coda_nc_lookup(VTOC(dvp), nm, len, cred); 1103 if (tp) { 1104 if (VALID_VATTR(tp)) { /* If attrs are cached */ 1105 if (tp->c_vattr.va_nlink > 1) { /* If it's a hard link */ 1106 tp->c_vattr.va_nlink--; 1107 } 1108 } 1109 1110 coda_nc_zapfile(VTOC(dvp), nm, len); 1111 /* No need to flush it if it doesn't exist! */ 1112 } 1113 /* Invalidate the parent's attr cache, the modification time has changed */ 1114 VTOC(dvp)->c_flags &= ~C_VATTR; 1115 1116 /* Check for remove of control object. */ 1117 if (IS_CTL_NAME(dvp, nm, len)) { 1118 MARK_INT_FAIL(CODA_REMOVE_STATS); 1119 return(ENOENT); 1120 } 1121 1122 error = venus_remove(vtomi(dvp), &cp->c_fid, nm, len, cred, l); 1123 1124 CODADEBUG(CODA_REMOVE, myprintf(("in remove result %d\n",error)); ) 1125 1126 /* 1127 * Unlock and release child (avoiding double if "."). 1128 */ 1129 if (dvp == vp) { 1130 vrele(vp); 1131 } else { 1132 vput(vp); 1133 } 1134 1135 return(error); 1136 } 1137 1138 /* 1139 * dvp is the directory where the link is to go, and is locked. 1140 * vp is the object to be linked to, and is unlocked. 1141 * At exit, we must unlock dvp, and vput dvp. 1142 */ 1143 int 1144 coda_link(void *v) 1145 { 1146 /* true args */ 1147 struct vop_link_v2_args *ap = v; 1148 vnode_t *vp = ap->a_vp; 1149 struct cnode *cp = VTOC(vp); 1150 vnode_t *dvp = ap->a_dvp; 1151 struct cnode *dcp = VTOC(dvp); 1152 struct componentname *cnp = ap->a_cnp; 1153 kauth_cred_t cred = cnp->cn_cred; 1154 struct lwp *l = curlwp; 1155 /* locals */ 1156 int error; 1157 const char *nm = cnp->cn_nameptr; 1158 int len = cnp->cn_namelen; 1159 1160 MARK_ENTRY(CODA_LINK_STATS); 1161 1162 if (codadebug & CODADBGMSK(CODA_LINK)) { 1163 1164 myprintf(("%s: vp fid: %s\n", __func__, coda_f2s(&cp->c_fid))); 1165 myprintf(("%s: dvp fid: %s)\n", __func__, coda_f2s(&dcp->c_fid))); 1166 1167 } 1168 if (codadebug & CODADBGMSK(CODA_LINK)) { 1169 myprintf(("%s: vp fid: %s\n", __func__, coda_f2s(&cp->c_fid))); 1170 myprintf(("%s: dvp fid: %s\n", __func__, coda_f2s(&dcp->c_fid))); 1171 1172 } 1173 1174 /* Check for link to/from control object. */ 1175 if (IS_CTL_NAME(dvp, nm, len) || IS_CTL_VP(vp)) { 1176 MARK_INT_FAIL(CODA_LINK_STATS); 1177 return(EACCES); 1178 } 1179 1180 /* If linking . to a name, error out earlier. */ 1181 if (vp == dvp) { 1182 #ifdef CODA_VERBOSE 1183 printf("%s coda_link vp==dvp\n", __func__); 1184 #endif 1185 error = EISDIR; 1186 goto exit; 1187 } 1188 1189 /* XXX Why does venus_link need the vnode to be locked?*/ 1190 if ((error = vn_lock(vp, LK_EXCLUSIVE)) != 0) { 1191 #ifdef CODA_VERBOSE 1192 printf("%s: couldn't lock vnode %p\n", __func__, vp); 1193 #endif 1194 error = EFAULT; /* XXX better value */ 1195 goto exit; 1196 } 1197 error = venus_link(vtomi(vp), &cp->c_fid, &dcp->c_fid, nm, len, cred, l); 1198 VOP_UNLOCK(vp); 1199 1200 /* Invalidate parent's attr cache (the modification time has changed). */ 1201 VTOC(dvp)->c_flags &= ~C_VATTR; 1202 /* Invalidate child's attr cache (XXX why). */ 1203 VTOC(vp)->c_flags &= ~C_VATTR; 1204 1205 CODADEBUG(CODA_LINK, myprintf(("in link result %d\n",error)); ) 1206 1207 exit: 1208 return(error); 1209 } 1210 1211 int 1212 coda_rename(void *v) 1213 { 1214 /* true args */ 1215 struct vop_rename_args *ap = v; 1216 vnode_t *odvp = ap->a_fdvp; 1217 struct cnode *odcp = VTOC(odvp); 1218 struct componentname *fcnp = ap->a_fcnp; 1219 vnode_t *ndvp = ap->a_tdvp; 1220 struct cnode *ndcp = VTOC(ndvp); 1221 struct componentname *tcnp = ap->a_tcnp; 1222 kauth_cred_t cred = fcnp->cn_cred; 1223 struct lwp *l = curlwp; 1224 /* true args */ 1225 int error; 1226 const char *fnm = fcnp->cn_nameptr; 1227 int flen = fcnp->cn_namelen; 1228 const char *tnm = tcnp->cn_nameptr; 1229 int tlen = tcnp->cn_namelen; 1230 1231 MARK_ENTRY(CODA_RENAME_STATS); 1232 1233 /* Hmmm. The vnodes are already looked up. Perhaps they are locked? 1234 This could be Bad. XXX */ 1235 #ifdef OLD_DIAGNOSTIC 1236 if ((fcnp->cn_cred != tcnp->cn_cred) 1237 || (fcnp->cn_lwp != tcnp->cn_lwp)) 1238 { 1239 panic("%s: component names don't agree", __func__); 1240 } 1241 #endif 1242 1243 /* Check for rename involving control object. */ 1244 if (IS_CTL_NAME(odvp, fnm, flen) || IS_CTL_NAME(ndvp, tnm, tlen)) { 1245 MARK_INT_FAIL(CODA_RENAME_STATS); 1246 return(EACCES); 1247 } 1248 1249 /* Problem with moving directories -- need to flush entry for .. */ 1250 if (odvp != ndvp) { 1251 struct cnode *ovcp = coda_nc_lookup(VTOC(odvp), fnm, flen, cred); 1252 if (ovcp) { 1253 vnode_t *ovp = CTOV(ovcp); 1254 if ((ovp) && 1255 (ovp->v_type == VDIR)) /* If it's a directory */ 1256 coda_nc_zapfile(VTOC(ovp),"..", 2); 1257 } 1258 } 1259 1260 /* Remove the entries for both source and target files */ 1261 coda_nc_zapfile(VTOC(odvp), fnm, flen); 1262 coda_nc_zapfile(VTOC(ndvp), tnm, tlen); 1263 1264 /* Invalidate the parent's attr cache, the modification time has changed */ 1265 VTOC(odvp)->c_flags &= ~C_VATTR; 1266 VTOC(ndvp)->c_flags &= ~C_VATTR; 1267 1268 if (flen+1 > CODA_MAXNAMLEN) { 1269 MARK_INT_FAIL(CODA_RENAME_STATS); 1270 error = EINVAL; 1271 goto exit; 1272 } 1273 1274 if (tlen+1 > CODA_MAXNAMLEN) { 1275 MARK_INT_FAIL(CODA_RENAME_STATS); 1276 error = EINVAL; 1277 goto exit; 1278 } 1279 1280 error = venus_rename(vtomi(odvp), &odcp->c_fid, &ndcp->c_fid, fnm, flen, tnm, tlen, cred, l); 1281 1282 exit: 1283 CODADEBUG(CODA_RENAME, myprintf(("in rename result %d\n",error));) 1284 /* XXX - do we need to call cache pureg on the moved vnode? */ 1285 cache_purge(ap->a_fvp); 1286 1287 /* It seems to be incumbent on us to drop locks on all four vnodes */ 1288 /* From-vnodes are not locked, only ref'd. To-vnodes are locked. */ 1289 1290 vrele(ap->a_fvp); 1291 vrele(odvp); 1292 1293 if (ap->a_tvp) { 1294 if (ap->a_tvp == ndvp) { 1295 vrele(ap->a_tvp); 1296 } else { 1297 vput(ap->a_tvp); 1298 } 1299 } 1300 1301 vput(ndvp); 1302 return(error); 1303 } 1304 1305 int 1306 coda_mkdir(void *v) 1307 { 1308 /* true args */ 1309 struct vop_mkdir_v3_args *ap = v; 1310 vnode_t *dvp = ap->a_dvp; 1311 struct cnode *dcp = VTOC(dvp); 1312 struct componentname *cnp = ap->a_cnp; 1313 struct vattr *va = ap->a_vap; 1314 vnode_t **vpp = ap->a_vpp; 1315 kauth_cred_t cred = cnp->cn_cred; 1316 struct lwp *l = curlwp; 1317 /* locals */ 1318 int error; 1319 const char *nm = cnp->cn_nameptr; 1320 int len = cnp->cn_namelen; 1321 struct cnode *cp; 1322 CodaFid VFid; 1323 struct vattr ova; 1324 1325 MARK_ENTRY(CODA_MKDIR_STATS); 1326 1327 /* Check for mkdir of target object. */ 1328 if (IS_CTL_NAME(dvp, nm, len)) { 1329 *vpp = (vnode_t *)0; 1330 MARK_INT_FAIL(CODA_MKDIR_STATS); 1331 return(EACCES); 1332 } 1333 1334 if (len+1 > CODA_MAXNAMLEN) { 1335 *vpp = (vnode_t *)0; 1336 MARK_INT_FAIL(CODA_MKDIR_STATS); 1337 return(EACCES); 1338 } 1339 1340 error = venus_mkdir(vtomi(dvp), &dcp->c_fid, nm, len, va, cred, l, &VFid, &ova); 1341 1342 if (!error) { 1343 if (coda_find(&VFid) != NULL) 1344 panic("cnode existed for newly created directory!"); 1345 1346 1347 cp = make_coda_node(&VFid, dvp->v_mount, va->va_type); 1348 *vpp = CTOV(cp); 1349 1350 /* enter the new vnode in the Name Cache */ 1351 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp)); 1352 1353 /* as a side effect, enter "." and ".." for the directory */ 1354 coda_nc_enter(VTOC(*vpp), ".", 1, cred, VTOC(*vpp)); 1355 coda_nc_enter(VTOC(*vpp), "..", 2, cred, VTOC(dvp)); 1356 1357 if (coda_attr_cache) { 1358 VTOC(*vpp)->c_vattr = ova; /* update the attr cache */ 1359 VTOC(*vpp)->c_flags |= C_VATTR; /* Valid attributes in cnode */ 1360 } 1361 1362 /* Invalidate the parent's attr cache, the modification time has changed */ 1363 VTOC(dvp)->c_flags &= ~C_VATTR; 1364 1365 CODADEBUG( CODA_MKDIR, myprintf(("%s: %s result %d\n", __func__, 1366 coda_f2s(&VFid), error)); ) 1367 } else { 1368 *vpp = (vnode_t *)0; 1369 CODADEBUG(CODA_MKDIR, myprintf(("%s error %d\n", __func__, error));) 1370 } 1371 1372 return(error); 1373 } 1374 1375 int 1376 coda_rmdir(void *v) 1377 { 1378 /* true args */ 1379 struct vop_rmdir_v2_args *ap = v; 1380 vnode_t *dvp = ap->a_dvp; 1381 struct cnode *dcp = VTOC(dvp); 1382 vnode_t *vp = ap->a_vp; 1383 struct componentname *cnp = ap->a_cnp; 1384 kauth_cred_t cred = cnp->cn_cred; 1385 struct lwp *l = curlwp; 1386 /* true args */ 1387 int error; 1388 const char *nm = cnp->cn_nameptr; 1389 int len = cnp->cn_namelen; 1390 struct cnode *cp; 1391 1392 MARK_ENTRY(CODA_RMDIR_STATS); 1393 1394 /* Check for rmdir of control object. */ 1395 if (IS_CTL_NAME(dvp, nm, len)) { 1396 MARK_INT_FAIL(CODA_RMDIR_STATS); 1397 return(ENOENT); 1398 } 1399 1400 /* Can't remove . in self. */ 1401 if (dvp == vp) { 1402 #ifdef CODA_VERBOSE 1403 printf("%s: dvp == vp\n", __func__); 1404 #endif 1405 error = EINVAL; 1406 goto exit; 1407 } 1408 1409 /* 1410 * The caller may not have adequate permissions, and the venus 1411 * operation may fail, but it doesn't hurt from a correctness 1412 * viewpoint to invalidate cache entries. 1413 * XXX Why isn't this done after the venus_rmdir call? 1414 */ 1415 /* Look up child in name cache (by name, from parent). */ 1416 cp = coda_nc_lookup(dcp, nm, len, cred); 1417 /* If found, remove all children of the child (., ..). */ 1418 if (cp) coda_nc_zapParentfid(&(cp->c_fid), NOT_DOWNCALL); 1419 1420 /* Remove child's own entry. */ 1421 coda_nc_zapfile(dcp, nm, len); 1422 1423 /* Invalidate parent's attr cache (the modification time has changed). */ 1424 dcp->c_flags &= ~C_VATTR; 1425 1426 error = venus_rmdir(vtomi(dvp), &dcp->c_fid, nm, len, cred, l); 1427 1428 CODADEBUG(CODA_RMDIR, myprintf(("in rmdir result %d\n", error)); ) 1429 1430 exit: 1431 /* unlock and release child */ 1432 if (dvp == vp) { 1433 vrele(vp); 1434 } else { 1435 vput(vp); 1436 } 1437 1438 return(error); 1439 } 1440 1441 int 1442 coda_symlink(void *v) 1443 { 1444 /* true args */ 1445 struct vop_symlink_v3_args *ap = v; 1446 vnode_t *dvp = ap->a_dvp; 1447 struct cnode *dcp = VTOC(dvp); 1448 /* a_vpp is used in place below */ 1449 struct componentname *cnp = ap->a_cnp; 1450 struct vattr *tva = ap->a_vap; 1451 char *path = ap->a_target; 1452 kauth_cred_t cred = cnp->cn_cred; 1453 struct lwp *l = curlwp; 1454 /* locals */ 1455 int error; 1456 u_long saved_cn_flags; 1457 const char *nm = cnp->cn_nameptr; 1458 int len = cnp->cn_namelen; 1459 int plen = strlen(path); 1460 1461 /* 1462 * Here's the strategy for the moment: perform the symlink, then 1463 * do a lookup to grab the resulting vnode. I know this requires 1464 * two communications with Venus for a new sybolic link, but 1465 * that's the way the ball bounces. I don't yet want to change 1466 * the way the Mach symlink works. When Mach support is 1467 * deprecated, we should change symlink so that the common case 1468 * returns the resultant vnode in a vpp argument. 1469 */ 1470 1471 MARK_ENTRY(CODA_SYMLINK_STATS); 1472 1473 /* Check for symlink of control object. */ 1474 if (IS_CTL_NAME(dvp, nm, len)) { 1475 MARK_INT_FAIL(CODA_SYMLINK_STATS); 1476 error = EACCES; 1477 goto exit; 1478 } 1479 1480 if (plen+1 > CODA_MAXPATHLEN) { 1481 MARK_INT_FAIL(CODA_SYMLINK_STATS); 1482 error = EINVAL; 1483 goto exit; 1484 } 1485 1486 if (len+1 > CODA_MAXNAMLEN) { 1487 MARK_INT_FAIL(CODA_SYMLINK_STATS); 1488 error = EINVAL; 1489 goto exit; 1490 } 1491 1492 error = venus_symlink(vtomi(dvp), &dcp->c_fid, path, plen, nm, len, tva, cred, l); 1493 1494 /* Invalidate the parent's attr cache (modification time has changed). */ 1495 dcp->c_flags &= ~C_VATTR; 1496 1497 if (!error) { 1498 /* 1499 * VOP_SYMLINK is not defined to pay attention to cnp->cn_flags; 1500 * these are defined only for VOP_LOOKUP. We desire to reuse 1501 * cnp for a VOP_LOOKUP operation, and must be sure to not pass 1502 * stray flags passed to us. Such stray flags can occur because 1503 * sys_symlink makes a namei call and then reuses the 1504 * componentname structure. 1505 */ 1506 /* 1507 * XXX Arguably we should create our own componentname structure 1508 * and not reuse the one that was passed in. 1509 */ 1510 saved_cn_flags = cnp->cn_flags; 1511 cnp->cn_flags &= ~(MODMASK | OPMASK); 1512 cnp->cn_flags |= LOOKUP; 1513 error = VOP_LOOKUP(dvp, ap->a_vpp, cnp); 1514 cnp->cn_flags = saved_cn_flags; 1515 } 1516 1517 exit: 1518 CODADEBUG(CODA_SYMLINK, myprintf(("in symlink result %d\n",error)); ) 1519 return(error); 1520 } 1521 1522 /* 1523 * Read directory entries. 1524 */ 1525 int 1526 coda_readdir(void *v) 1527 { 1528 /* true args */ 1529 struct vop_readdir_args *ap = v; 1530 vnode_t *vp = ap->a_vp; 1531 struct cnode *cp = VTOC(vp); 1532 struct uio *uiop = ap->a_uio; 1533 kauth_cred_t cred = ap->a_cred; 1534 int *eofflag = ap->a_eofflag; 1535 off_t **cookies = ap->a_cookies; 1536 int *ncookies = ap->a_ncookies; 1537 /* upcall decl */ 1538 /* locals */ 1539 int error = 0; 1540 1541 MARK_ENTRY(CODA_READDIR_STATS); 1542 1543 CODADEBUG(CODA_READDIR, myprintf(("%s: (%p, %lu, %lld)\n", __func__, 1544 uiop->uio_iov->iov_base, (unsigned long) uiop->uio_resid, 1545 (long long) uiop->uio_offset)); ) 1546 1547 /* Check for readdir of control object. */ 1548 if (IS_CTL_VP(vp)) { 1549 MARK_INT_FAIL(CODA_READDIR_STATS); 1550 return(ENOENT); 1551 } 1552 1553 { 1554 /* Redirect the request to UFS. */ 1555 1556 /* If directory is not already open do an "internal open" on it. */ 1557 int opened_internally = 0; 1558 if (cp->c_ovp == NULL) { 1559 opened_internally = 1; 1560 MARK_INT_GEN(CODA_OPEN_STATS); 1561 error = VOP_OPEN(vp, FREAD, cred); 1562 #ifdef CODA_VERBOSE 1563 printf("%s: Internally Opening %p\n", __func__, vp); 1564 #endif 1565 if (error) return(error); 1566 } else 1567 vp = cp->c_ovp; 1568 1569 /* Have UFS handle the call. */ 1570 CODADEBUG(CODA_READDIR, myprintf(("%s: fid = %s, refcnt = %d\n", 1571 __func__, coda_f2s(&cp->c_fid), vp->v_usecount)); ) 1572 error = VOP_READDIR(vp, uiop, cred, eofflag, cookies, ncookies); 1573 if (error) 1574 MARK_INT_FAIL(CODA_READDIR_STATS); 1575 else 1576 MARK_INT_SAT(CODA_READDIR_STATS); 1577 1578 /* Do an "internal close" if necessary. */ 1579 if (opened_internally) { 1580 MARK_INT_GEN(CODA_CLOSE_STATS); 1581 (void)VOP_CLOSE(vp, FREAD, cred); 1582 } 1583 } 1584 1585 return(error); 1586 } 1587 1588 /* 1589 * Convert from file system blocks to device blocks 1590 */ 1591 int 1592 coda_bmap(void *v) 1593 { 1594 /* XXX on the global proc */ 1595 /* true args */ 1596 struct vop_bmap_args *ap = v; 1597 vnode_t *vp __unused = ap->a_vp; /* file's vnode */ 1598 daddr_t bn __unused = ap->a_bn; /* fs block number */ 1599 vnode_t **vpp = ap->a_vpp; /* RETURN vp of device */ 1600 daddr_t *bnp __unused = ap->a_bnp; /* RETURN device block number */ 1601 struct lwp *l __unused = curlwp; 1602 /* upcall decl */ 1603 /* locals */ 1604 1605 *vpp = (vnode_t *)0; 1606 myprintf(("coda_bmap called!\n")); 1607 return(EINVAL); 1608 } 1609 1610 /* 1611 * I don't think the following two things are used anywhere, so I've 1612 * commented them out 1613 * 1614 * struct buf *async_bufhead; 1615 * int async_daemon_count; 1616 */ 1617 int 1618 coda_strategy(void *v) 1619 { 1620 /* true args */ 1621 struct vop_strategy_args *ap = v; 1622 struct buf *bp __unused = ap->a_bp; 1623 struct lwp *l __unused = curlwp; 1624 /* upcall decl */ 1625 /* locals */ 1626 1627 myprintf(("coda_strategy called! ")); 1628 return(EINVAL); 1629 } 1630 1631 int 1632 coda_reclaim(void *v) 1633 { 1634 /* true args */ 1635 struct vop_reclaim_v2_args *ap = v; 1636 vnode_t *vp = ap->a_vp; 1637 struct cnode *cp = VTOC(vp); 1638 /* upcall decl */ 1639 /* locals */ 1640 1641 VOP_UNLOCK(vp); 1642 1643 /* 1644 * Forced unmount/flush will let vnodes with non zero use be destroyed! 1645 */ 1646 ENTRY; 1647 1648 if (IS_UNMOUNTING(cp)) { 1649 #ifdef DEBUG 1650 if (VTOC(vp)->c_ovp) { 1651 if (IS_UNMOUNTING(cp)) 1652 printf("%s: c_ovp not void: vp %p, cp %p\n", __func__, vp, cp); 1653 } 1654 #endif 1655 } else { 1656 #ifdef OLD_DIAGNOSTIC 1657 if (vp->v_usecount != 0) 1658 print("%s: pushing active %p\n", __func__, vp); 1659 if (VTOC(vp)->c_ovp) { 1660 panic("%s: c_ovp not void", __func__); 1661 } 1662 #endif 1663 } 1664 /* If an array has been allocated to hold the symlink, deallocate it */ 1665 if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) { 1666 if (cp->c_symlink == NULL) 1667 panic("%s: null symlink pointer in cnode", __func__); 1668 1669 CODA_FREE(cp->c_symlink, cp->c_symlen); 1670 cp->c_flags &= ~C_SYMLINK; 1671 cp->c_symlen = 0; 1672 } 1673 1674 mutex_enter(vp->v_interlock); 1675 mutex_enter(&cp->c_lock); 1676 SET_VTOC(vp) = NULL; 1677 mutex_exit(&cp->c_lock); 1678 mutex_exit(vp->v_interlock); 1679 mutex_destroy(&cp->c_lock); 1680 kmem_free(cp, sizeof(*cp)); 1681 1682 return (0); 1683 } 1684 1685 int 1686 coda_lock(void *v) 1687 { 1688 /* true args */ 1689 struct vop_lock_args *ap = v; 1690 vnode_t *vp = ap->a_vp; 1691 struct cnode *cp = VTOC(vp); 1692 /* upcall decl */ 1693 /* locals */ 1694 1695 ENTRY; 1696 1697 if (coda_lockdebug) { 1698 myprintf(("Attempting lock on %s\n", 1699 coda_f2s(&cp->c_fid))); 1700 } 1701 1702 return genfs_lock(v); 1703 } 1704 1705 int 1706 coda_unlock(void *v) 1707 { 1708 /* true args */ 1709 struct vop_unlock_args *ap = v; 1710 vnode_t *vp = ap->a_vp; 1711 struct cnode *cp = VTOC(vp); 1712 /* upcall decl */ 1713 /* locals */ 1714 1715 ENTRY; 1716 if (coda_lockdebug) { 1717 myprintf(("Attempting unlock on %s\n", 1718 coda_f2s(&cp->c_fid))); 1719 } 1720 1721 return genfs_unlock(v); 1722 } 1723 1724 int 1725 coda_islocked(void *v) 1726 { 1727 /* true args */ 1728 ENTRY; 1729 1730 return genfs_islocked(v); 1731 } 1732 1733 /* 1734 * Given a device and inode, obtain a locked vnode. One reference is 1735 * obtained and passed back to the caller. 1736 */ 1737 int 1738 coda_grab_vnode(vnode_t *uvp, dev_t dev, ino_t ino, vnode_t **vpp) 1739 { 1740 int error; 1741 struct mount *mp; 1742 1743 /* Obtain mount point structure from device. */ 1744 if (!(mp = devtomp(dev))) { 1745 myprintf(("%s: devtomp(0x%llx) returns NULL\n", __func__, 1746 (unsigned long long)dev)); 1747 return(ENXIO); 1748 } 1749 1750 /* 1751 * Obtain vnode from mount point and inode. 1752 */ 1753 error = VFS_VGET(mp, ino, vpp); 1754 if (error) { 1755 myprintf(("%s: iget/vget(0x%llx, %llu) returns %p, err %d\n", __func__, 1756 (unsigned long long)dev, (unsigned long long)ino, *vpp, error)); 1757 return(ENOENT); 1758 } 1759 /* share the underlying vnode lock with the coda vnode */ 1760 mutex_obj_hold((*vpp)->v_interlock); 1761 uvm_obj_setlock(&uvp->v_uobj, (*vpp)->v_interlock); 1762 KASSERT(VOP_ISLOCKED(*vpp)); 1763 return(0); 1764 } 1765 1766 static void 1767 coda_print_vattr(struct vattr *attr) 1768 { 1769 const char *typestr; 1770 1771 switch (attr->va_type) { 1772 case VNON: 1773 typestr = "VNON"; 1774 break; 1775 case VREG: 1776 typestr = "VREG"; 1777 break; 1778 case VDIR: 1779 typestr = "VDIR"; 1780 break; 1781 case VBLK: 1782 typestr = "VBLK"; 1783 break; 1784 case VCHR: 1785 typestr = "VCHR"; 1786 break; 1787 case VLNK: 1788 typestr = "VLNK"; 1789 break; 1790 case VSOCK: 1791 typestr = "VSCK"; 1792 break; 1793 case VFIFO: 1794 typestr = "VFFO"; 1795 break; 1796 case VBAD: 1797 typestr = "VBAD"; 1798 break; 1799 default: 1800 typestr = "????"; 1801 break; 1802 } 1803 1804 1805 myprintf(("attr: type %s mode %d uid %d gid %d fsid %d rdev %d\n", 1806 typestr, (int)attr->va_mode, (int)attr->va_uid, 1807 (int)attr->va_gid, (int)attr->va_fsid, (int)attr->va_rdev)); 1808 1809 myprintf((" fileid %d nlink %d size %d blocksize %d bytes %d\n", 1810 (int)attr->va_fileid, (int)attr->va_nlink, 1811 (int)attr->va_size, 1812 (int)attr->va_blocksize,(int)attr->va_bytes)); 1813 myprintf((" gen %ld flags %ld vaflags %d\n", 1814 attr->va_gen, attr->va_flags, attr->va_vaflags)); 1815 myprintf((" atime sec %d nsec %d\n", 1816 (int)attr->va_atime.tv_sec, (int)attr->va_atime.tv_nsec)); 1817 myprintf((" mtime sec %d nsec %d\n", 1818 (int)attr->va_mtime.tv_sec, (int)attr->va_mtime.tv_nsec)); 1819 myprintf((" ctime sec %d nsec %d\n", 1820 (int)attr->va_ctime.tv_sec, (int)attr->va_ctime.tv_nsec)); 1821 } 1822 1823 /* 1824 * Return a vnode for the given fid. 1825 * If no cnode exists for this fid create one and put it 1826 * in a table hashed by coda_f2i(). If the cnode for 1827 * this fid is already in the table return it (ref count is 1828 * incremented by coda_find. The cnode will be flushed from the 1829 * table when coda_inactive calls coda_unsave. 1830 */ 1831 struct cnode * 1832 make_coda_node(CodaFid *fid, struct mount *fvsp, short type) 1833 { 1834 int error __diagused; 1835 struct vnode *vp; 1836 struct cnode *cp; 1837 1838 error = vcache_get(fvsp, fid, sizeof(CodaFid), &vp); 1839 KASSERT(error == 0); 1840 1841 mutex_enter(vp->v_interlock); 1842 cp = VTOC(vp); 1843 KASSERT(cp != NULL); 1844 mutex_enter(&cp->c_lock); 1845 mutex_exit(vp->v_interlock); 1846 1847 if (vp->v_type != type) { 1848 if (vp->v_type == VCHR || vp->v_type == VBLK) 1849 spec_node_destroy(vp); 1850 vp->v_type = type; 1851 if (type == VCHR || type == VBLK) 1852 spec_node_init(vp, NODEV); 1853 uvm_vnp_setsize(vp, 0); 1854 } 1855 mutex_exit(&cp->c_lock); 1856 1857 return cp; 1858 } 1859 1860 /* 1861 * coda_getpages may be called on a vnode which has not been opened, 1862 * e.g. to fault in pages to execute a program. In that case, we must 1863 * open the file to get the container. The vnode may or may not be 1864 * locked, and we must leave it in the same state. 1865 */ 1866 int 1867 coda_getpages(void *v) 1868 { 1869 struct vop_getpages_args /* { 1870 vnode_t *a_vp; 1871 voff_t a_offset; 1872 struct vm_page **a_m; 1873 int *a_count; 1874 int a_centeridx; 1875 vm_prot_t a_access_type; 1876 int a_advice; 1877 int a_flags; 1878 } */ *ap = v; 1879 vnode_t *vp = ap->a_vp, *cvp; 1880 struct cnode *cp = VTOC(vp); 1881 struct lwp *l = curlwp; 1882 kauth_cred_t cred = l->l_cred; 1883 int error, cerror; 1884 int waslocked; /* 1 if vnode lock was held on entry */ 1885 int didopen = 0; /* 1 if we opened container file */ 1886 1887 /* 1888 * Handle a case that uvm_fault doesn't quite use yet. 1889 * See layer_vnops.c. for inspiration. 1890 */ 1891 if (ap->a_flags & PGO_LOCKED) { 1892 return EBUSY; 1893 } 1894 1895 KASSERT(mutex_owned(vp->v_interlock)); 1896 1897 /* Check for control object. */ 1898 if (IS_CTL_VP(vp)) { 1899 #ifdef CODA_VERBOSE 1900 printf("%s: control object %p\n", __func__, vp); 1901 #endif 1902 return(EINVAL); 1903 } 1904 1905 /* 1906 * XXX It's really not ok to be releasing the lock we get, 1907 * because we could be overlapping with another call to 1908 * getpages and drop a lock they are relying on. We need to 1909 * figure out whether getpages ever is called holding the 1910 * lock, and if we should serialize getpages calls by some 1911 * mechanism. 1912 */ 1913 /* XXX VOP_ISLOCKED() may not be used for lock decisions. */ 1914 waslocked = VOP_ISLOCKED(vp); 1915 1916 /* Get container file if not already present. */ 1917 cvp = cp->c_ovp; 1918 if (cvp == NULL) { 1919 /* 1920 * VOP_OPEN requires a locked vnode. We must avoid 1921 * locking the vnode if it is already locked, and 1922 * leave it in the same state on exit. 1923 */ 1924 if (waslocked == 0) { 1925 mutex_exit(vp->v_interlock); 1926 cerror = vn_lock(vp, LK_EXCLUSIVE); 1927 if (cerror) { 1928 #ifdef CODA_VERBOSE 1929 printf("%s: can't lock vnode %p\n", 1930 __func__, vp); 1931 #endif 1932 return cerror; 1933 } 1934 #ifdef CODA_VERBOSE 1935 printf("%s: locked vnode %p\n", __func__, vp); 1936 #endif 1937 } 1938 1939 /* 1940 * Open file (causes upcall to venus). 1941 * XXX Perhaps we should not fully open the file, but 1942 * simply obtain a container file. 1943 */ 1944 /* XXX Is it ok to do this while holding the mutex? */ 1945 cerror = VOP_OPEN(vp, FREAD, cred); 1946 1947 if (cerror) { 1948 #ifdef CODA_VERBOSE 1949 printf("%s: cannot open vnode %p => %d\n", __func__, 1950 vp, cerror); 1951 #endif 1952 if (waslocked == 0) 1953 VOP_UNLOCK(vp); 1954 return cerror; 1955 } 1956 1957 #ifdef CODA_VERBOSE 1958 printf("%s: opened vnode %p\n", __func__, vp); 1959 #endif 1960 cvp = cp->c_ovp; 1961 didopen = 1; 1962 if (waslocked == 0) 1963 mutex_enter(vp->v_interlock); 1964 } 1965 KASSERT(cvp != NULL); 1966 1967 /* Munge the arg structure to refer to the container vnode. */ 1968 KASSERT(cvp->v_interlock == vp->v_interlock); 1969 ap->a_vp = cp->c_ovp; 1970 1971 /* Finally, call getpages on it. */ 1972 error = VCALL(ap->a_vp, VOFFSET(vop_getpages), ap); 1973 1974 /* If we opened the vnode, we must close it. */ 1975 if (didopen) { 1976 /* 1977 * VOP_CLOSE requires a locked vnode, but we are still 1978 * holding the lock (or riding a caller's lock). 1979 */ 1980 cerror = VOP_CLOSE(vp, FREAD, cred); 1981 #ifdef CODA_VERBOSE 1982 if (cerror != 0) 1983 /* XXX How should we handle this? */ 1984 printf("%s: closed vnode %p -> %d\n", __func__, 1985 vp, cerror); 1986 #endif 1987 1988 /* If we obtained a lock, drop it. */ 1989 if (waslocked == 0) 1990 VOP_UNLOCK(vp); 1991 } 1992 1993 return error; 1994 } 1995 1996 /* 1997 * The protocol requires v_interlock to be held by the caller. 1998 */ 1999 int 2000 coda_putpages(void *v) 2001 { 2002 struct vop_putpages_args /* { 2003 vnode_t *a_vp; 2004 voff_t a_offlo; 2005 voff_t a_offhi; 2006 int a_flags; 2007 } */ *ap = v; 2008 vnode_t *vp = ap->a_vp, *cvp; 2009 struct cnode *cp = VTOC(vp); 2010 int error; 2011 2012 KASSERT(mutex_owned(vp->v_interlock)); 2013 2014 /* Check for control object. */ 2015 if (IS_CTL_VP(vp)) { 2016 mutex_exit(vp->v_interlock); 2017 #ifdef CODA_VERBOSE 2018 printf("%s: control object %p\n", __func__, vp); 2019 #endif 2020 return 0; 2021 } 2022 2023 /* 2024 * If container object is not present, then there are no pages 2025 * to put; just return without error. This happens all the 2026 * time, apparently during discard of a closed vnode (which 2027 * trivially can't have dirty pages). 2028 */ 2029 cvp = cp->c_ovp; 2030 if (cvp == NULL) { 2031 mutex_exit(vp->v_interlock); 2032 return 0; 2033 } 2034 2035 /* Munge the arg structure to refer to the container vnode. */ 2036 KASSERT(cvp->v_interlock == vp->v_interlock); 2037 ap->a_vp = cvp; 2038 2039 /* Finally, call putpages on it. */ 2040 error = VCALL(ap->a_vp, VOFFSET(vop_putpages), ap); 2041 2042 return error; 2043 } 2044