1 /* $NetBSD: coda_vnops.c,v 1.112 2020/05/16 18:31:48 christos Exp $ */ 2 3 /* 4 * 5 * Coda: an Experimental Distributed File System 6 * Release 3.1 7 * 8 * Copyright (c) 1987-1998 Carnegie Mellon University 9 * All Rights Reserved 10 * 11 * Permission to use, copy, modify and distribute this software and its 12 * documentation is hereby granted, provided that both the copyright 13 * notice and this permission notice appear in all copies of the 14 * software, derivative works or modified versions, and any portions 15 * thereof, and that both notices appear in supporting documentation, and 16 * that credit is given to Carnegie Mellon University in all documents 17 * and publicity pertaining to direct or indirect use of this code or its 18 * derivatives. 19 * 20 * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS, 21 * SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS 22 * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON 23 * DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER 24 * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF 25 * ANY DERIVATIVE WORK. 26 * 27 * Carnegie Mellon encourages users of this software to return any 28 * improvements or extensions that they make, and to grant Carnegie 29 * Mellon the rights to redistribute these changes without encumbrance. 30 * 31 * @(#) coda/coda_vnops.c,v 1.1.1.1 1998/08/29 21:26:46 rvb Exp $ 32 */ 33 34 /* 35 * Mach Operating System 36 * Copyright (c) 1990 Carnegie-Mellon University 37 * Copyright (c) 1989 Carnegie-Mellon University 38 * All rights reserved. The CMU software License Agreement specifies 39 * the terms and conditions for use and redistribution. 40 */ 41 42 /* 43 * This code was written for the Coda file system at Carnegie Mellon 44 * University. Contributers include David Steere, James Kistler, and 45 * M. Satyanarayanan. 46 */ 47 48 #include <sys/cdefs.h> 49 __KERNEL_RCSID(0, "$NetBSD: coda_vnops.c,v 1.112 2020/05/16 18:31:48 christos Exp $"); 50 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/malloc.h> 54 #include <sys/errno.h> 55 #include <sys/acct.h> 56 #include <sys/file.h> 57 #include <sys/uio.h> 58 #include <sys/namei.h> 59 #include <sys/ioctl.h> 60 #include <sys/mount.h> 61 #include <sys/proc.h> 62 #include <sys/select.h> 63 #include <sys/vnode.h> 64 #include <sys/kauth.h> 65 66 #include <miscfs/genfs/genfs.h> 67 #include <miscfs/specfs/specdev.h> 68 69 #include <coda/coda.h> 70 #include <coda/cnode.h> 71 #include <coda/coda_vnops.h> 72 #include <coda/coda_venus.h> 73 #include <coda/coda_opstats.h> 74 #include <coda/coda_subr.h> 75 #include <coda/coda_namecache.h> 76 #include <coda/coda_pioctl.h> 77 78 /* 79 * These flags select various performance enhancements. 80 */ 81 int coda_attr_cache = 1; /* Set to cache attributes in the kernel */ 82 int coda_symlink_cache = 1; /* Set to cache symbolic link information */ 83 int coda_access_cache = 1; /* Set to handle some access checks directly */ 84 85 /* structure to keep track of vfs calls */ 86 87 struct coda_op_stats coda_vnodeopstats[CODA_VNODEOPS_SIZE]; 88 89 #define MARK_ENTRY(op) (coda_vnodeopstats[op].entries++) 90 #define MARK_INT_SAT(op) (coda_vnodeopstats[op].sat_intrn++) 91 #define MARK_INT_FAIL(op) (coda_vnodeopstats[op].unsat_intrn++) 92 #define MARK_INT_GEN(op) (coda_vnodeopstats[op].gen_intrn++) 93 94 /* What we are delaying for in printf */ 95 static int coda_lockdebug = 0; 96 97 #define ENTRY if(coda_vnop_print_entry) myprintf(("Entered %s\n",__func__)) 98 99 /* Definition of the vnode operation vector */ 100 101 const struct vnodeopv_entry_desc coda_vnodeop_entries[] = { 102 { &vop_default_desc, coda_vop_error }, 103 { &vop_lookup_desc, coda_lookup }, /* lookup */ 104 { &vop_create_desc, coda_create }, /* create */ 105 { &vop_mknod_desc, coda_vop_error }, /* mknod */ 106 { &vop_open_desc, coda_open }, /* open */ 107 { &vop_close_desc, coda_close }, /* close */ 108 { &vop_access_desc, coda_access }, /* access */ 109 { &vop_accessx_desc, genfs_accessx }, /* access */ 110 { &vop_getattr_desc, coda_getattr }, /* getattr */ 111 { &vop_setattr_desc, coda_setattr }, /* setattr */ 112 { &vop_read_desc, coda_read }, /* read */ 113 { &vop_write_desc, coda_write }, /* write */ 114 { &vop_fallocate_desc, genfs_eopnotsupp }, /* fallocate */ 115 { &vop_fdiscard_desc, genfs_eopnotsupp }, /* fdiscard */ 116 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */ 117 { &vop_ioctl_desc, coda_ioctl }, /* ioctl */ 118 { &vop_mmap_desc, genfs_mmap }, /* mmap */ 119 { &vop_fsync_desc, coda_fsync }, /* fsync */ 120 { &vop_remove_desc, coda_remove }, /* remove */ 121 { &vop_link_desc, coda_link }, /* link */ 122 { &vop_rename_desc, coda_rename }, /* rename */ 123 { &vop_mkdir_desc, coda_mkdir }, /* mkdir */ 124 { &vop_rmdir_desc, coda_rmdir }, /* rmdir */ 125 { &vop_symlink_desc, coda_symlink }, /* symlink */ 126 { &vop_readdir_desc, coda_readdir }, /* readdir */ 127 { &vop_readlink_desc, coda_readlink }, /* readlink */ 128 { &vop_abortop_desc, coda_abortop }, /* abortop */ 129 { &vop_inactive_desc, coda_inactive }, /* inactive */ 130 { &vop_reclaim_desc, coda_reclaim }, /* reclaim */ 131 { &vop_lock_desc, coda_lock }, /* lock */ 132 { &vop_unlock_desc, coda_unlock }, /* unlock */ 133 { &vop_bmap_desc, coda_bmap }, /* bmap */ 134 { &vop_strategy_desc, coda_strategy }, /* strategy */ 135 { &vop_print_desc, coda_vop_error }, /* print */ 136 { &vop_islocked_desc, coda_islocked }, /* islocked */ 137 { &vop_pathconf_desc, coda_vop_error }, /* pathconf */ 138 { &vop_advlock_desc, coda_vop_nop }, /* advlock */ 139 { &vop_bwrite_desc, coda_vop_error }, /* bwrite */ 140 { &vop_seek_desc, genfs_seek }, /* seek */ 141 { &vop_poll_desc, genfs_poll }, /* poll */ 142 { &vop_getpages_desc, coda_getpages }, /* getpages */ 143 { &vop_putpages_desc, coda_putpages }, /* putpages */ 144 { NULL, NULL } 145 }; 146 147 static void coda_print_vattr(struct vattr *); 148 149 int (**coda_vnodeop_p)(void *); 150 const struct vnodeopv_desc coda_vnodeop_opv_desc = 151 { &coda_vnodeop_p, coda_vnodeop_entries }; 152 153 /* Definitions of NetBSD vnodeop interfaces */ 154 155 /* 156 * A generic error routine. Return EIO without looking at arguments. 157 */ 158 int 159 coda_vop_error(void *anon) { 160 struct vnodeop_desc **desc = (struct vnodeop_desc **)anon; 161 162 if (codadebug) { 163 myprintf(("%s: Vnode operation %s called (error).\n", 164 __func__, (*desc)->vdesc_name)); 165 } 166 167 return EIO; 168 } 169 170 /* A generic do-nothing. */ 171 int 172 coda_vop_nop(void *anon) { 173 struct vnodeop_desc **desc = (struct vnodeop_desc **)anon; 174 175 if (codadebug) { 176 myprintf(("Vnode operation %s called, but unsupported\n", 177 (*desc)->vdesc_name)); 178 } 179 return (0); 180 } 181 182 int 183 coda_vnodeopstats_init(void) 184 { 185 int i; 186 187 for(i=0;i<CODA_VNODEOPS_SIZE;i++) { 188 coda_vnodeopstats[i].opcode = i; 189 coda_vnodeopstats[i].entries = 0; 190 coda_vnodeopstats[i].sat_intrn = 0; 191 coda_vnodeopstats[i].unsat_intrn = 0; 192 coda_vnodeopstats[i].gen_intrn = 0; 193 } 194 195 return 0; 196 } 197 198 /* 199 * XXX The entire relationship between VOP_OPEN and having a container 200 * file (via venus_open) needs to be reexamined. In particular, it's 201 * valid to open/mmap/close and then reference. Instead of doing 202 * VOP_OPEN when getpages needs a container, we should do the 203 * venus_open part, and record that the vnode has opened the container 204 * for getpages, and do the matching logical close on coda_inactive. 205 * Further, coda_rdwr needs a container file, and sometimes needs to 206 * do the equivalent of open (core dumps). 207 */ 208 /* 209 * coda_open calls Venus to return the device and inode of the 210 * container file, and then obtains a vnode for that file. The 211 * container vnode is stored in the coda vnode, and a reference is 212 * added for each open file. 213 */ 214 int 215 coda_open(void *v) 216 { 217 /* 218 * NetBSD can pass the O_EXCL flag in mode, even though the check 219 * has already happened. Venus defensively assumes that if open 220 * is passed the EXCL, it must be a bug. We strip the flag here. 221 */ 222 /* true args */ 223 struct vop_open_args *ap = v; 224 vnode_t *vp = ap->a_vp; 225 struct cnode *cp = VTOC(vp); 226 int flag = ap->a_mode & (~O_EXCL); 227 kauth_cred_t cred = ap->a_cred; 228 /* locals */ 229 int error; 230 dev_t dev; /* container file device, inode, vnode */ 231 ino_t inode; 232 vnode_t *container_vp; 233 234 MARK_ENTRY(CODA_OPEN_STATS); 235 236 KASSERT(VOP_ISLOCKED(vp)); 237 /* Check for open of control file. */ 238 if (IS_CTL_VP(vp)) { 239 /* if (WRITABLE(flag)) */ 240 if (flag & (FWRITE | O_TRUNC | O_CREAT | O_EXCL)) { 241 MARK_INT_FAIL(CODA_OPEN_STATS); 242 return(EACCES); 243 } 244 MARK_INT_SAT(CODA_OPEN_STATS); 245 return(0); 246 } 247 248 error = venus_open(vtomi(vp), &cp->c_fid, flag, cred, curlwp, &dev, &inode); 249 if (error) 250 return (error); 251 if (!error) { 252 CODADEBUG(CODA_OPEN, myprintf(( 253 "%s: dev 0x%llx inode %llu result %d\n", __func__, 254 (unsigned long long)dev, (unsigned long long)inode, error));) 255 } 256 257 /* 258 * Obtain locked and referenced container vnode from container 259 * device/inode. 260 */ 261 error = coda_grab_vnode(vp, dev, inode, &container_vp); 262 if (error) 263 return (error); 264 265 /* Save the vnode pointer for the container file. */ 266 if (cp->c_ovp == NULL) { 267 cp->c_ovp = container_vp; 268 } else { 269 if (cp->c_ovp != container_vp) 270 /* 271 * Perhaps venus returned a different container, or 272 * something else went wrong. 273 */ 274 panic("%s: cp->c_ovp != container_vp", __func__); 275 } 276 cp->c_ocount++; 277 278 /* Flush the attribute cache if writing the file. */ 279 if (flag & FWRITE) { 280 cp->c_owrite++; 281 cp->c_flags &= ~C_VATTR; 282 } 283 284 /* 285 * Save the <device, inode> pair for the container file to speed 286 * up subsequent reads while closed (mmap, program execution). 287 * This is perhaps safe because venus will invalidate the node 288 * before changing the container file mapping. 289 */ 290 cp->c_device = dev; 291 cp->c_inode = inode; 292 293 /* Open the container file. */ 294 error = VOP_OPEN(container_vp, flag, cred); 295 /* 296 * Drop the lock on the container, after we have done VOP_OPEN 297 * (which requires a locked vnode). 298 */ 299 VOP_UNLOCK(container_vp); 300 return(error); 301 } 302 303 /* 304 * Close the cache file used for I/O and notify Venus. 305 */ 306 int 307 coda_close(void *v) 308 { 309 /* true args */ 310 struct vop_close_args *ap = v; 311 vnode_t *vp = ap->a_vp; 312 struct cnode *cp = VTOC(vp); 313 int flag = ap->a_fflag; 314 kauth_cred_t cred = ap->a_cred; 315 /* locals */ 316 int error; 317 318 MARK_ENTRY(CODA_CLOSE_STATS); 319 320 /* Check for close of control file. */ 321 if (IS_CTL_VP(vp)) { 322 MARK_INT_SAT(CODA_CLOSE_STATS); 323 return(0); 324 } 325 326 /* 327 * XXX The IS_UNMOUNTING part of this is very suspect. 328 */ 329 if (IS_UNMOUNTING(cp)) { 330 if (cp->c_ovp) { 331 #ifdef CODA_VERBOSE 332 printf("%s: destroying container %d, ufs vp %p of vp %p/cp %p\n", 333 __func__, vrefcnt(vp), cp->c_ovp, vp, cp); 334 #endif 335 #ifdef hmm 336 vgone(cp->c_ovp); 337 #else 338 vn_lock(cp->c_ovp, LK_EXCLUSIVE | LK_RETRY); 339 VOP_CLOSE(cp->c_ovp, flag, cred); /* Do errors matter here? */ 340 vput(cp->c_ovp); 341 #endif 342 } else { 343 #ifdef CODA_VERBOSE 344 printf("%s: NO container vp %p/cp %p\n", __func__, vp, cp); 345 #endif 346 } 347 return ENODEV; 348 } 349 350 /* Lock the container node, and VOP_CLOSE it. */ 351 vn_lock(cp->c_ovp, LK_EXCLUSIVE | LK_RETRY); 352 VOP_CLOSE(cp->c_ovp, flag, cred); /* Do errors matter here? */ 353 /* 354 * Drop the lock we just obtained, and vrele the container vnode. 355 * Decrement reference counts, and clear container vnode pointer on 356 * last close. 357 */ 358 vput(cp->c_ovp); 359 if (flag & FWRITE) 360 --cp->c_owrite; 361 if (--cp->c_ocount == 0) 362 cp->c_ovp = NULL; 363 364 error = venus_close(vtomi(vp), &cp->c_fid, flag, cred, curlwp); 365 366 CODADEBUG(CODA_CLOSE, myprintf(("%s: result %d\n", __func__, error)); ) 367 return(error); 368 } 369 370 int 371 coda_read(void *v) 372 { 373 struct vop_read_args *ap = v; 374 375 ENTRY; 376 return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_READ, 377 ap->a_ioflag, ap->a_cred, curlwp)); 378 } 379 380 int 381 coda_write(void *v) 382 { 383 struct vop_write_args *ap = v; 384 385 ENTRY; 386 return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_WRITE, 387 ap->a_ioflag, ap->a_cred, curlwp)); 388 } 389 390 int 391 coda_rdwr(vnode_t *vp, struct uio *uiop, enum uio_rw rw, int ioflag, 392 kauth_cred_t cred, struct lwp *l) 393 { 394 /* upcall decl */ 395 /* NOTE: container file operation!!! */ 396 /* locals */ 397 struct cnode *cp = VTOC(vp); 398 vnode_t *cfvp = cp->c_ovp; 399 struct proc *p = l->l_proc; 400 int opened_internally = 0; 401 int error = 0; 402 403 MARK_ENTRY(CODA_RDWR_STATS); 404 405 CODADEBUG(CODA_RDWR, myprintf(("coda_rdwr(%d, %p, %lu, %lld)\n", rw, 406 uiop->uio_iov->iov_base, (unsigned long) uiop->uio_resid, 407 (long long) uiop->uio_offset)); ) 408 409 /* Check for rdwr of control object. */ 410 if (IS_CTL_VP(vp)) { 411 MARK_INT_FAIL(CODA_RDWR_STATS); 412 return(EINVAL); 413 } 414 415 /* Redirect the request to UFS. */ 416 417 /* 418 * If file is not already open this must be a page 419 * {read,write} request. Iget the cache file's inode 420 * pointer if we still have its <device, inode> pair. 421 * Otherwise, we must do an internal open to derive the 422 * pair. 423 * XXX Integrate this into a coherent strategy for container 424 * file acquisition. 425 */ 426 if (cfvp == NULL) { 427 /* 428 * If we're dumping core, do the internal open. Otherwise 429 * venus won't have the correct size of the core when 430 * it's completely written. 431 */ 432 if (cp->c_inode != 0 && !(p && (p->p_acflag & ACORE))) { 433 #ifdef CODA_VERBOSE 434 printf("%s: grabbing container vnode, losing reference\n", 435 __func__); 436 #endif 437 /* Get locked and refed vnode. */ 438 error = coda_grab_vnode(vp, cp->c_device, cp->c_inode, &cfvp); 439 if (error) { 440 MARK_INT_FAIL(CODA_RDWR_STATS); 441 return(error); 442 } 443 /* 444 * Drop lock. 445 * XXX Where is reference released. 446 */ 447 VOP_UNLOCK(cfvp); 448 } 449 else { 450 #ifdef CODA_VERBOSE 451 printf("%s: internal VOP_OPEN\n", __func__); 452 #endif 453 opened_internally = 1; 454 MARK_INT_GEN(CODA_OPEN_STATS); 455 error = VOP_OPEN(vp, (rw == UIO_READ ? FREAD : FWRITE), cred); 456 #ifdef CODA_VERBOSE 457 printf("%s: Internally Opening %p\n", __func__, vp); 458 #endif 459 if (error) { 460 MARK_INT_FAIL(CODA_RDWR_STATS); 461 return(error); 462 } 463 cfvp = cp->c_ovp; 464 } 465 } 466 467 /* Have UFS handle the call. */ 468 CODADEBUG(CODA_RDWR, myprintf(("%s: fid = %s, refcnt = %d\n", __func__, 469 coda_f2s(&cp->c_fid), vrefcnt(CTOV(cp)))); ) 470 471 if (rw == UIO_READ) { 472 error = VOP_READ(cfvp, uiop, ioflag, cred); 473 } else { 474 error = VOP_WRITE(cfvp, uiop, ioflag, cred); 475 } 476 477 if (error) 478 MARK_INT_FAIL(CODA_RDWR_STATS); 479 else 480 MARK_INT_SAT(CODA_RDWR_STATS); 481 482 /* Do an internal close if necessary. */ 483 if (opened_internally) { 484 MARK_INT_GEN(CODA_CLOSE_STATS); 485 (void)VOP_CLOSE(vp, (rw == UIO_READ ? FREAD : FWRITE), cred); 486 } 487 488 /* Invalidate cached attributes if writing. */ 489 if (rw == UIO_WRITE) 490 cp->c_flags &= ~C_VATTR; 491 return(error); 492 } 493 494 int 495 coda_ioctl(void *v) 496 { 497 /* true args */ 498 struct vop_ioctl_args *ap = v; 499 vnode_t *vp = ap->a_vp; 500 int com = ap->a_command; 501 void *data = ap->a_data; 502 int flag = ap->a_fflag; 503 kauth_cred_t cred = ap->a_cred; 504 /* locals */ 505 int error; 506 vnode_t *tvp; 507 struct PioctlData *iap = (struct PioctlData *)data; 508 namei_simple_flags_t sflags; 509 510 MARK_ENTRY(CODA_IOCTL_STATS); 511 512 CODADEBUG(CODA_IOCTL, myprintf(("in coda_ioctl on %s\n", iap->path));) 513 514 /* Don't check for operation on a dying object, for ctlvp it 515 shouldn't matter */ 516 517 /* Must be control object to succeed. */ 518 if (!IS_CTL_VP(vp)) { 519 MARK_INT_FAIL(CODA_IOCTL_STATS); 520 CODADEBUG(CODA_IOCTL, myprintf(("%s error: vp != ctlvp", __func__));) 521 return (EOPNOTSUPP); 522 } 523 /* Look up the pathname. */ 524 525 /* Should we use the name cache here? It would get it from 526 lookupname sooner or later anyway, right? */ 527 528 sflags = iap->follow ? NSM_FOLLOW_NOEMULROOT : NSM_NOFOLLOW_NOEMULROOT; 529 error = namei_simple_user(iap->path, sflags, &tvp); 530 531 if (error) { 532 MARK_INT_FAIL(CODA_IOCTL_STATS); 533 CODADEBUG(CODA_IOCTL, myprintf(("%s error: lookup returns %d\n", 534 __func__, error));) 535 return(error); 536 } 537 538 /* 539 * Make sure this is a coda style cnode, but it may be a 540 * different vfsp 541 */ 542 /* XXX: this totally violates the comment about vtagtype in vnode.h */ 543 if (tvp->v_tag != VT_CODA) { 544 vrele(tvp); 545 MARK_INT_FAIL(CODA_IOCTL_STATS); 546 CODADEBUG(CODA_IOCTL, myprintf(("%s error: %s not a coda object\n", 547 __func__, iap->path));) 548 return(EINVAL); 549 } 550 551 if (iap->vi.in_size > VC_MAXDATASIZE || iap->vi.out_size > VC_MAXDATASIZE) { 552 vrele(tvp); 553 return(EINVAL); 554 } 555 error = venus_ioctl(vtomi(tvp), &((VTOC(tvp))->c_fid), com, flag, data, 556 cred, curlwp); 557 558 if (error) 559 MARK_INT_FAIL(CODA_IOCTL_STATS); 560 else 561 CODADEBUG(CODA_IOCTL, myprintf(("Ioctl returns %d \n", error)); ) 562 563 vrele(tvp); 564 return(error); 565 } 566 567 /* 568 * To reduce the cost of a user-level venus;we cache attributes in 569 * the kernel. Each cnode has storage allocated for an attribute. If 570 * c_vattr is valid, return a reference to it. Otherwise, get the 571 * attributes from venus and store them in the cnode. There is some 572 * question if this method is a security leak. But I think that in 573 * order to make this call, the user must have done a lookup and 574 * opened the file, and therefore should already have access. 575 */ 576 int 577 coda_getattr(void *v) 578 { 579 /* true args */ 580 struct vop_getattr_args *ap = v; 581 vnode_t *vp = ap->a_vp; 582 struct cnode *cp = VTOC(vp); 583 struct vattr *vap = ap->a_vap; 584 kauth_cred_t cred = ap->a_cred; 585 /* locals */ 586 int error; 587 588 MARK_ENTRY(CODA_GETATTR_STATS); 589 590 /* Check for getattr of control object. */ 591 if (IS_CTL_VP(vp)) { 592 MARK_INT_FAIL(CODA_GETATTR_STATS); 593 return(ENOENT); 594 } 595 596 /* Check to see if the attributes have already been cached */ 597 if (VALID_VATTR(cp)) { 598 CODADEBUG(CODA_GETATTR, { myprintf(("%s: attr cache hit: %s\n", 599 __func__, coda_f2s(&cp->c_fid)));}) 600 CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR)) 601 coda_print_vattr(&cp->c_vattr); ) 602 603 *vap = cp->c_vattr; 604 MARK_INT_SAT(CODA_GETATTR_STATS); 605 return(0); 606 } 607 608 error = venus_getattr(vtomi(vp), &cp->c_fid, cred, curlwp, vap); 609 610 if (!error) { 611 CODADEBUG(CODA_GETATTR, myprintf(("%s miss %s: result %d\n", 612 __func__, coda_f2s(&cp->c_fid), error)); ) 613 614 CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR)) 615 coda_print_vattr(vap); ) 616 617 /* If not open for write, store attributes in cnode */ 618 if ((cp->c_owrite == 0) && (coda_attr_cache)) { 619 cp->c_vattr = *vap; 620 cp->c_flags |= C_VATTR; 621 } 622 623 } 624 return(error); 625 } 626 627 int 628 coda_setattr(void *v) 629 { 630 /* true args */ 631 struct vop_setattr_args *ap = v; 632 vnode_t *vp = ap->a_vp; 633 struct cnode *cp = VTOC(vp); 634 struct vattr *vap = ap->a_vap; 635 kauth_cred_t cred = ap->a_cred; 636 /* locals */ 637 int error; 638 639 MARK_ENTRY(CODA_SETATTR_STATS); 640 641 /* Check for setattr of control object. */ 642 if (IS_CTL_VP(vp)) { 643 MARK_INT_FAIL(CODA_SETATTR_STATS); 644 return(ENOENT); 645 } 646 647 if (codadebug & CODADBGMSK(CODA_SETATTR)) { 648 coda_print_vattr(vap); 649 } 650 error = venus_setattr(vtomi(vp), &cp->c_fid, vap, cred, curlwp); 651 652 if (!error) 653 cp->c_flags &= ~C_VATTR; 654 655 CODADEBUG(CODA_SETATTR, myprintf(("setattr %d\n", error)); ) 656 return(error); 657 } 658 659 int 660 coda_access(void *v) 661 { 662 /* true args */ 663 struct vop_access_args *ap = v; 664 vnode_t *vp = ap->a_vp; 665 struct cnode *cp = VTOC(vp); 666 accmode_t accmode = ap->a_accmode; 667 kauth_cred_t cred = ap->a_cred; 668 /* locals */ 669 int error; 670 671 MARK_ENTRY(CODA_ACCESS_STATS); 672 673 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0); 674 /* Check for access of control object. Only read access is 675 allowed on it. */ 676 if (IS_CTL_VP(vp)) { 677 /* bogus hack - all will be marked as successes */ 678 MARK_INT_SAT(CODA_ACCESS_STATS); 679 return(((accmode & VREAD) && !(accmode & (VWRITE | VEXEC))) 680 ? 0 : EACCES); 681 } 682 683 /* 684 * if the file is a directory, and we are checking exec (eg lookup) 685 * access, and the file is in the namecache, then the user must have 686 * lookup access to it. 687 */ 688 if (coda_access_cache) { 689 if ((vp->v_type == VDIR) && (accmode & VEXEC)) { 690 if (coda_nc_lookup(cp, ".", 1, cred)) { 691 MARK_INT_SAT(CODA_ACCESS_STATS); 692 return(0); /* it was in the cache */ 693 } 694 } 695 } 696 697 error = venus_access(vtomi(vp), &cp->c_fid, accmode, cred, curlwp); 698 699 return(error); 700 } 701 702 /* 703 * CODA abort op, called after namei() when a CREATE/DELETE isn't actually 704 * done. If a buffer has been saved in anticipation of a coda_create or 705 * a coda_remove, delete it. 706 */ 707 /* ARGSUSED */ 708 int 709 coda_abortop(void *v) 710 { 711 /* true args */ 712 struct vop_abortop_args /* { 713 vnode_t *a_dvp; 714 struct componentname *a_cnp; 715 } */ *ap = v; 716 717 (void)ap; 718 /* upcall decl */ 719 /* locals */ 720 721 return (0); 722 } 723 724 int 725 coda_readlink(void *v) 726 { 727 /* true args */ 728 struct vop_readlink_args *ap = v; 729 vnode_t *vp = ap->a_vp; 730 struct cnode *cp = VTOC(vp); 731 struct uio *uiop = ap->a_uio; 732 kauth_cred_t cred = ap->a_cred; 733 /* locals */ 734 struct lwp *l = curlwp; 735 int error; 736 char *str; 737 int len; 738 739 MARK_ENTRY(CODA_READLINK_STATS); 740 741 /* Check for readlink of control object. */ 742 if (IS_CTL_VP(vp)) { 743 MARK_INT_FAIL(CODA_READLINK_STATS); 744 return(ENOENT); 745 } 746 747 if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) { /* symlink was cached */ 748 uiop->uio_rw = UIO_READ; 749 error = uiomove(cp->c_symlink, (int)cp->c_symlen, uiop); 750 if (error) 751 MARK_INT_FAIL(CODA_READLINK_STATS); 752 else 753 MARK_INT_SAT(CODA_READLINK_STATS); 754 return(error); 755 } 756 757 error = venus_readlink(vtomi(vp), &cp->c_fid, cred, l, &str, &len); 758 759 if (!error) { 760 uiop->uio_rw = UIO_READ; 761 error = uiomove(str, len, uiop); 762 763 if (coda_symlink_cache) { 764 cp->c_symlink = str; 765 cp->c_symlen = len; 766 cp->c_flags |= C_SYMLINK; 767 } else 768 CODA_FREE(str, len); 769 } 770 771 CODADEBUG(CODA_READLINK, myprintf(("in readlink result %d\n",error));) 772 return(error); 773 } 774 775 int 776 coda_fsync(void *v) 777 { 778 /* true args */ 779 struct vop_fsync_args *ap = v; 780 vnode_t *vp = ap->a_vp; 781 struct cnode *cp = VTOC(vp); 782 kauth_cred_t cred = ap->a_cred; 783 /* locals */ 784 vnode_t *convp = cp->c_ovp; 785 int error; 786 787 MARK_ENTRY(CODA_FSYNC_STATS); 788 789 /* Check for fsync on an unmounting object */ 790 /* The NetBSD kernel, in its infinite wisdom, can try to fsync 791 * after an unmount has been initiated. This is a Bad Thing, 792 * which we have to avoid. Not a legitimate failure for stats. 793 */ 794 if (IS_UNMOUNTING(cp)) { 795 return(ENODEV); 796 } 797 798 /* Check for fsync of control object or unitialized cnode. */ 799 if (IS_CTL_VP(vp) || vp->v_type == VNON) { 800 MARK_INT_SAT(CODA_FSYNC_STATS); 801 return(0); 802 } 803 804 if (convp) 805 VOP_FSYNC(convp, cred, MNT_WAIT, 0, 0); 806 807 /* 808 * We can expect fsync on any vnode at all if venus is pruging it. 809 * Venus can't very well answer the fsync request, now can it? 810 * Hopefully, it won't have to, because hopefully, venus preserves 811 * the (possibly untrue) invariant that it never purges an open 812 * vnode. Hopefully. 813 */ 814 if (cp->c_flags & C_PURGING) { 815 return(0); 816 } 817 818 error = venus_fsync(vtomi(vp), &cp->c_fid, cred, curlwp); 819 820 CODADEBUG(CODA_FSYNC, myprintf(("in fsync result %d\n",error)); ) 821 return(error); 822 } 823 824 /* 825 * vp is locked on entry, and we must unlock it. 826 * XXX This routine is suspect and probably needs rewriting. 827 */ 828 int 829 coda_inactive(void *v) 830 { 831 /* true args */ 832 struct vop_inactive_v2_args *ap = v; 833 vnode_t *vp = ap->a_vp; 834 struct cnode *cp = VTOC(vp); 835 kauth_cred_t cred __unused = NULL; 836 837 /* We don't need to send inactive to venus - DCS */ 838 MARK_ENTRY(CODA_INACTIVE_STATS); 839 840 if (IS_CTL_VP(vp)) { 841 MARK_INT_SAT(CODA_INACTIVE_STATS); 842 return 0; 843 } 844 845 CODADEBUG(CODA_INACTIVE, myprintf(("in inactive, %s, vfsp %p\n", 846 coda_f2s(&cp->c_fid), vp->v_mount));) 847 848 if (vp->v_mount->mnt_data == NULL) { 849 myprintf(("Help! vfsp->vfs_data was NULL, but vnode %p wasn't dying\n", vp)); 850 panic("badness in coda_inactive"); 851 } 852 853 #ifdef CODA_VERBOSE 854 /* Sanity checks that perhaps should be panic. */ 855 if (vrefcnt(vp) > 1) 856 printf("%s: %p usecount %d\n", __func__, vp, vrefcnt(vp)); 857 if (cp->c_ovp != NULL) 858 printf("%s: %p ovp != NULL\n", __func__, vp); 859 #endif 860 /* XXX Do we need to VOP_CLOSE container vnodes? */ 861 if (!IS_UNMOUNTING(cp)) 862 *ap->a_recycle = true; 863 864 MARK_INT_SAT(CODA_INACTIVE_STATS); 865 return(0); 866 } 867 868 /* 869 * Coda does not use the normal namecache, but a private version. 870 * Consider how to use the standard facility instead. 871 */ 872 int 873 coda_lookup(void *v) 874 { 875 /* true args */ 876 struct vop_lookup_v2_args *ap = v; 877 /* (locked) vnode of dir in which to do lookup */ 878 vnode_t *dvp = ap->a_dvp; 879 struct cnode *dcp = VTOC(dvp); 880 /* output variable for result */ 881 vnode_t **vpp = ap->a_vpp; 882 /* name to lookup */ 883 struct componentname *cnp = ap->a_cnp; 884 kauth_cred_t cred = cnp->cn_cred; 885 struct lwp *l = curlwp; 886 /* locals */ 887 struct cnode *cp; 888 const char *nm = cnp->cn_nameptr; 889 int len = cnp->cn_namelen; 890 CodaFid VFid; 891 int vtype; 892 int error = 0; 893 894 MARK_ENTRY(CODA_LOOKUP_STATS); 895 896 CODADEBUG(CODA_LOOKUP, myprintf(("%s: %s in %s\n", __func__, 897 nm, coda_f2s(&dcp->c_fid)));) 898 899 /* 900 * XXX componentname flags in MODMASK are not handled at all 901 */ 902 903 /* 904 * The overall strategy is to switch on the lookup type and get a 905 * result vnode that is vref'd but not locked. 906 */ 907 908 /* Check for lookup of control object. */ 909 if (IS_CTL_NAME(dvp, nm, len)) { 910 *vpp = coda_ctlvp; 911 vref(*vpp); 912 MARK_INT_SAT(CODA_LOOKUP_STATS); 913 goto exit; 914 } 915 916 /* Avoid trying to hand venus an unreasonably long name. */ 917 if (len+1 > CODA_MAXNAMLEN) { 918 MARK_INT_FAIL(CODA_LOOKUP_STATS); 919 CODADEBUG(CODA_LOOKUP, myprintf(("%s: name too long:, %s (%s)\n", 920 __func__, coda_f2s(&dcp->c_fid), nm));) 921 *vpp = (vnode_t *)0; 922 error = EINVAL; 923 goto exit; 924 } 925 926 /* 927 * Try to resolve the lookup in the minicache. If that fails, ask 928 * venus to do the lookup. XXX The interaction between vnode 929 * locking and any locking that coda does is not clear. 930 */ 931 cp = coda_nc_lookup(dcp, nm, len, cred); 932 if (cp) { 933 *vpp = CTOV(cp); 934 vref(*vpp); 935 CODADEBUG(CODA_LOOKUP, 936 myprintf(("lookup result %d vpp %p\n",error,*vpp));) 937 } else { 938 /* The name wasn't cached, so ask Venus. */ 939 error = venus_lookup(vtomi(dvp), &dcp->c_fid, nm, len, cred, l, &VFid, 940 &vtype); 941 942 if (error) { 943 MARK_INT_FAIL(CODA_LOOKUP_STATS); 944 CODADEBUG(CODA_LOOKUP, myprintf(("%s: lookup error on %s (%s)%d\n", 945 __func__, coda_f2s(&dcp->c_fid), nm, error));) 946 *vpp = (vnode_t *)0; 947 } else { 948 MARK_INT_SAT(CODA_LOOKUP_STATS); 949 CODADEBUG(CODA_LOOKUP, myprintf(("%s: %s type %o result %d\n", 950 __func__, coda_f2s(&VFid), vtype, error)); ) 951 952 cp = make_coda_node(&VFid, dvp->v_mount, vtype); 953 *vpp = CTOV(cp); 954 /* vpp is now vrefed. */ 955 956 /* 957 * Unless this vnode is marked CODA_NOCACHE, enter it into 958 * the coda name cache to avoid a future venus round-trip. 959 * XXX Interaction with componentname NOCACHE is unclear. 960 */ 961 if (!(vtype & CODA_NOCACHE)) 962 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp)); 963 } 964 } 965 966 exit: 967 /* 968 * If we are creating, and this was the last name to be looked up, 969 * and the error was ENOENT, then make the leaf NULL and return 970 * success. 971 * XXX Check against new lookup rules. 972 */ 973 if (((cnp->cn_nameiop == CREATE) || (cnp->cn_nameiop == RENAME)) 974 && (cnp->cn_flags & ISLASTCN) 975 && (error == ENOENT)) 976 { 977 error = EJUSTRETURN; 978 *ap->a_vpp = NULL; 979 } 980 981 return(error); 982 } 983 984 /*ARGSUSED*/ 985 int 986 coda_create(void *v) 987 { 988 /* true args */ 989 struct vop_create_v3_args *ap = v; 990 vnode_t *dvp = ap->a_dvp; 991 struct cnode *dcp = VTOC(dvp); 992 struct vattr *va = ap->a_vap; 993 int exclusive = 1; 994 int mode = ap->a_vap->va_mode; 995 vnode_t **vpp = ap->a_vpp; 996 struct componentname *cnp = ap->a_cnp; 997 kauth_cred_t cred = cnp->cn_cred; 998 struct lwp *l = curlwp; 999 /* locals */ 1000 int error; 1001 struct cnode *cp; 1002 const char *nm = cnp->cn_nameptr; 1003 int len = cnp->cn_namelen; 1004 CodaFid VFid; 1005 struct vattr attr; 1006 1007 MARK_ENTRY(CODA_CREATE_STATS); 1008 1009 /* All creates are exclusive XXX */ 1010 /* I'm assuming the 'mode' argument is the file mode bits XXX */ 1011 1012 /* Check for create of control object. */ 1013 if (IS_CTL_NAME(dvp, nm, len)) { 1014 *vpp = (vnode_t *)0; 1015 MARK_INT_FAIL(CODA_CREATE_STATS); 1016 return(EACCES); 1017 } 1018 1019 error = venus_create(vtomi(dvp), &dcp->c_fid, nm, len, exclusive, mode, va, cred, l, &VFid, &attr); 1020 1021 if (!error) { 1022 1023 /* 1024 * XXX Violation of venus/kernel invariants is a difficult case, 1025 * but venus should not be able to cause a panic. 1026 */ 1027 /* If this is an exclusive create, panic if the file already exists. */ 1028 /* Venus should have detected the file and reported EEXIST. */ 1029 1030 if ((exclusive == 1) && 1031 (coda_find(&VFid) != NULL)) 1032 panic("cnode existed for newly created file!"); 1033 1034 cp = make_coda_node(&VFid, dvp->v_mount, attr.va_type); 1035 *vpp = CTOV(cp); 1036 1037 /* XXX vnodeops doesn't say this argument can be changed. */ 1038 /* Update va to reflect the new attributes. */ 1039 (*va) = attr; 1040 1041 /* Update the attribute cache and mark it as valid */ 1042 if (coda_attr_cache) { 1043 VTOC(*vpp)->c_vattr = attr; 1044 VTOC(*vpp)->c_flags |= C_VATTR; 1045 } 1046 1047 /* Invalidate parent's attr cache (modification time has changed). */ 1048 VTOC(dvp)->c_flags &= ~C_VATTR; 1049 1050 /* enter the new vnode in the Name Cache */ 1051 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp)); 1052 1053 CODADEBUG(CODA_CREATE, myprintf(("%s: %s, result %d\n", __func__, 1054 coda_f2s(&VFid), error)); ) 1055 } else { 1056 *vpp = (vnode_t *)0; 1057 CODADEBUG(CODA_CREATE, myprintf(("%s: create error %d\n", __func__, 1058 error));) 1059 } 1060 1061 if (!error) { 1062 #ifdef CODA_VERBOSE 1063 if ((cnp->cn_flags & LOCKLEAF) == 0) 1064 /* This should not happen; flags are for lookup only. */ 1065 printf("%s: LOCKLEAF not set!\n", __func__); 1066 #endif 1067 } 1068 1069 return(error); 1070 } 1071 1072 int 1073 coda_remove(void *v) 1074 { 1075 /* true args */ 1076 struct vop_remove_v2_args *ap = v; 1077 vnode_t *dvp = ap->a_dvp; 1078 struct cnode *cp = VTOC(dvp); 1079 vnode_t *vp = ap->a_vp; 1080 struct componentname *cnp = ap->a_cnp; 1081 kauth_cred_t cred = cnp->cn_cred; 1082 struct lwp *l = curlwp; 1083 /* locals */ 1084 int error; 1085 const char *nm = cnp->cn_nameptr; 1086 int len = cnp->cn_namelen; 1087 struct cnode *tp; 1088 1089 MARK_ENTRY(CODA_REMOVE_STATS); 1090 1091 CODADEBUG(CODA_REMOVE, myprintf(("%s: %s in %s\n", __func__, 1092 nm, coda_f2s(&cp->c_fid)));) 1093 1094 /* Remove the file's entry from the CODA Name Cache */ 1095 /* We're being conservative here, it might be that this person 1096 * doesn't really have sufficient access to delete the file 1097 * but we feel zapping the entry won't really hurt anyone -- dcs 1098 */ 1099 /* I'm gonna go out on a limb here. If a file and a hardlink to it 1100 * exist, and one is removed, the link count on the other will be 1101 * off by 1. We could either invalidate the attrs if cached, or 1102 * fix them. I'll try to fix them. DCS 11/8/94 1103 */ 1104 tp = coda_nc_lookup(VTOC(dvp), nm, len, cred); 1105 if (tp) { 1106 if (VALID_VATTR(tp)) { /* If attrs are cached */ 1107 if (tp->c_vattr.va_nlink > 1) { /* If it's a hard link */ 1108 tp->c_vattr.va_nlink--; 1109 } 1110 } 1111 1112 coda_nc_zapfile(VTOC(dvp), nm, len); 1113 /* No need to flush it if it doesn't exist! */ 1114 } 1115 /* Invalidate the parent's attr cache, the modification time has changed */ 1116 VTOC(dvp)->c_flags &= ~C_VATTR; 1117 1118 /* Check for remove of control object. */ 1119 if (IS_CTL_NAME(dvp, nm, len)) { 1120 MARK_INT_FAIL(CODA_REMOVE_STATS); 1121 return(ENOENT); 1122 } 1123 1124 error = venus_remove(vtomi(dvp), &cp->c_fid, nm, len, cred, l); 1125 1126 CODADEBUG(CODA_REMOVE, myprintf(("in remove result %d\n",error)); ) 1127 1128 /* 1129 * Unlock and release child (avoiding double if "."). 1130 */ 1131 if (dvp == vp) { 1132 vrele(vp); 1133 } else { 1134 vput(vp); 1135 } 1136 1137 return(error); 1138 } 1139 1140 /* 1141 * dvp is the directory where the link is to go, and is locked. 1142 * vp is the object to be linked to, and is unlocked. 1143 * At exit, we must unlock dvp, and vput dvp. 1144 */ 1145 int 1146 coda_link(void *v) 1147 { 1148 /* true args */ 1149 struct vop_link_v2_args *ap = v; 1150 vnode_t *vp = ap->a_vp; 1151 struct cnode *cp = VTOC(vp); 1152 vnode_t *dvp = ap->a_dvp; 1153 struct cnode *dcp = VTOC(dvp); 1154 struct componentname *cnp = ap->a_cnp; 1155 kauth_cred_t cred = cnp->cn_cred; 1156 struct lwp *l = curlwp; 1157 /* locals */ 1158 int error; 1159 const char *nm = cnp->cn_nameptr; 1160 int len = cnp->cn_namelen; 1161 1162 MARK_ENTRY(CODA_LINK_STATS); 1163 1164 if (codadebug & CODADBGMSK(CODA_LINK)) { 1165 1166 myprintf(("%s: vp fid: %s\n", __func__, coda_f2s(&cp->c_fid))); 1167 myprintf(("%s: dvp fid: %s)\n", __func__, coda_f2s(&dcp->c_fid))); 1168 1169 } 1170 if (codadebug & CODADBGMSK(CODA_LINK)) { 1171 myprintf(("%s: vp fid: %s\n", __func__, coda_f2s(&cp->c_fid))); 1172 myprintf(("%s: dvp fid: %s\n", __func__, coda_f2s(&dcp->c_fid))); 1173 1174 } 1175 1176 /* Check for link to/from control object. */ 1177 if (IS_CTL_NAME(dvp, nm, len) || IS_CTL_VP(vp)) { 1178 MARK_INT_FAIL(CODA_LINK_STATS); 1179 return(EACCES); 1180 } 1181 1182 /* If linking . to a name, error out earlier. */ 1183 if (vp == dvp) { 1184 #ifdef CODA_VERBOSE 1185 printf("%s coda_link vp==dvp\n", __func__); 1186 #endif 1187 error = EISDIR; 1188 goto exit; 1189 } 1190 1191 /* XXX Why does venus_link need the vnode to be locked?*/ 1192 if ((error = vn_lock(vp, LK_EXCLUSIVE)) != 0) { 1193 #ifdef CODA_VERBOSE 1194 printf("%s: couldn't lock vnode %p\n", __func__, vp); 1195 #endif 1196 error = EFAULT; /* XXX better value */ 1197 goto exit; 1198 } 1199 error = venus_link(vtomi(vp), &cp->c_fid, &dcp->c_fid, nm, len, cred, l); 1200 VOP_UNLOCK(vp); 1201 1202 /* Invalidate parent's attr cache (the modification time has changed). */ 1203 VTOC(dvp)->c_flags &= ~C_VATTR; 1204 /* Invalidate child's attr cache (XXX why). */ 1205 VTOC(vp)->c_flags &= ~C_VATTR; 1206 1207 CODADEBUG(CODA_LINK, myprintf(("in link result %d\n",error)); ) 1208 1209 exit: 1210 return(error); 1211 } 1212 1213 int 1214 coda_rename(void *v) 1215 { 1216 /* true args */ 1217 struct vop_rename_args *ap = v; 1218 vnode_t *odvp = ap->a_fdvp; 1219 struct cnode *odcp = VTOC(odvp); 1220 struct componentname *fcnp = ap->a_fcnp; 1221 vnode_t *ndvp = ap->a_tdvp; 1222 struct cnode *ndcp = VTOC(ndvp); 1223 struct componentname *tcnp = ap->a_tcnp; 1224 kauth_cred_t cred = fcnp->cn_cred; 1225 struct lwp *l = curlwp; 1226 /* true args */ 1227 int error; 1228 const char *fnm = fcnp->cn_nameptr; 1229 int flen = fcnp->cn_namelen; 1230 const char *tnm = tcnp->cn_nameptr; 1231 int tlen = tcnp->cn_namelen; 1232 1233 MARK_ENTRY(CODA_RENAME_STATS); 1234 1235 /* Hmmm. The vnodes are already looked up. Perhaps they are locked? 1236 This could be Bad. XXX */ 1237 #ifdef OLD_DIAGNOSTIC 1238 if ((fcnp->cn_cred != tcnp->cn_cred) 1239 || (fcnp->cn_lwp != tcnp->cn_lwp)) 1240 { 1241 panic("%s: component names don't agree", __func__); 1242 } 1243 #endif 1244 1245 /* Check for rename involving control object. */ 1246 if (IS_CTL_NAME(odvp, fnm, flen) || IS_CTL_NAME(ndvp, tnm, tlen)) { 1247 MARK_INT_FAIL(CODA_RENAME_STATS); 1248 return(EACCES); 1249 } 1250 1251 /* Problem with moving directories -- need to flush entry for .. */ 1252 if (odvp != ndvp) { 1253 struct cnode *ovcp = coda_nc_lookup(VTOC(odvp), fnm, flen, cred); 1254 if (ovcp) { 1255 vnode_t *ovp = CTOV(ovcp); 1256 if ((ovp) && 1257 (ovp->v_type == VDIR)) /* If it's a directory */ 1258 coda_nc_zapfile(VTOC(ovp),"..", 2); 1259 } 1260 } 1261 1262 /* Remove the entries for both source and target files */ 1263 coda_nc_zapfile(VTOC(odvp), fnm, flen); 1264 coda_nc_zapfile(VTOC(ndvp), tnm, tlen); 1265 1266 /* Invalidate the parent's attr cache, the modification time has changed */ 1267 VTOC(odvp)->c_flags &= ~C_VATTR; 1268 VTOC(ndvp)->c_flags &= ~C_VATTR; 1269 1270 if (flen+1 > CODA_MAXNAMLEN) { 1271 MARK_INT_FAIL(CODA_RENAME_STATS); 1272 error = EINVAL; 1273 goto exit; 1274 } 1275 1276 if (tlen+1 > CODA_MAXNAMLEN) { 1277 MARK_INT_FAIL(CODA_RENAME_STATS); 1278 error = EINVAL; 1279 goto exit; 1280 } 1281 1282 error = venus_rename(vtomi(odvp), &odcp->c_fid, &ndcp->c_fid, fnm, flen, tnm, tlen, cred, l); 1283 1284 exit: 1285 CODADEBUG(CODA_RENAME, myprintf(("in rename result %d\n",error));) 1286 /* XXX - do we need to call cache pureg on the moved vnode? */ 1287 cache_purge(ap->a_fvp); 1288 1289 /* It seems to be incumbent on us to drop locks on all four vnodes */ 1290 /* From-vnodes are not locked, only ref'd. To-vnodes are locked. */ 1291 1292 vrele(ap->a_fvp); 1293 vrele(odvp); 1294 1295 if (ap->a_tvp) { 1296 if (ap->a_tvp == ndvp) { 1297 vrele(ap->a_tvp); 1298 } else { 1299 vput(ap->a_tvp); 1300 } 1301 } 1302 1303 vput(ndvp); 1304 return(error); 1305 } 1306 1307 int 1308 coda_mkdir(void *v) 1309 { 1310 /* true args */ 1311 struct vop_mkdir_v3_args *ap = v; 1312 vnode_t *dvp = ap->a_dvp; 1313 struct cnode *dcp = VTOC(dvp); 1314 struct componentname *cnp = ap->a_cnp; 1315 struct vattr *va = ap->a_vap; 1316 vnode_t **vpp = ap->a_vpp; 1317 kauth_cred_t cred = cnp->cn_cred; 1318 struct lwp *l = curlwp; 1319 /* locals */ 1320 int error; 1321 const char *nm = cnp->cn_nameptr; 1322 int len = cnp->cn_namelen; 1323 struct cnode *cp; 1324 CodaFid VFid; 1325 struct vattr ova; 1326 1327 MARK_ENTRY(CODA_MKDIR_STATS); 1328 1329 /* Check for mkdir of target object. */ 1330 if (IS_CTL_NAME(dvp, nm, len)) { 1331 *vpp = (vnode_t *)0; 1332 MARK_INT_FAIL(CODA_MKDIR_STATS); 1333 return(EACCES); 1334 } 1335 1336 if (len+1 > CODA_MAXNAMLEN) { 1337 *vpp = (vnode_t *)0; 1338 MARK_INT_FAIL(CODA_MKDIR_STATS); 1339 return(EACCES); 1340 } 1341 1342 error = venus_mkdir(vtomi(dvp), &dcp->c_fid, nm, len, va, cred, l, &VFid, &ova); 1343 1344 if (!error) { 1345 if (coda_find(&VFid) != NULL) 1346 panic("cnode existed for newly created directory!"); 1347 1348 1349 cp = make_coda_node(&VFid, dvp->v_mount, va->va_type); 1350 *vpp = CTOV(cp); 1351 1352 /* enter the new vnode in the Name Cache */ 1353 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp)); 1354 1355 /* as a side effect, enter "." and ".." for the directory */ 1356 coda_nc_enter(VTOC(*vpp), ".", 1, cred, VTOC(*vpp)); 1357 coda_nc_enter(VTOC(*vpp), "..", 2, cred, VTOC(dvp)); 1358 1359 if (coda_attr_cache) { 1360 VTOC(*vpp)->c_vattr = ova; /* update the attr cache */ 1361 VTOC(*vpp)->c_flags |= C_VATTR; /* Valid attributes in cnode */ 1362 } 1363 1364 /* Invalidate the parent's attr cache, the modification time has changed */ 1365 VTOC(dvp)->c_flags &= ~C_VATTR; 1366 1367 CODADEBUG( CODA_MKDIR, myprintf(("%s: %s result %d\n", __func__, 1368 coda_f2s(&VFid), error)); ) 1369 } else { 1370 *vpp = (vnode_t *)0; 1371 CODADEBUG(CODA_MKDIR, myprintf(("%s error %d\n", __func__, error));) 1372 } 1373 1374 return(error); 1375 } 1376 1377 int 1378 coda_rmdir(void *v) 1379 { 1380 /* true args */ 1381 struct vop_rmdir_v2_args *ap = v; 1382 vnode_t *dvp = ap->a_dvp; 1383 struct cnode *dcp = VTOC(dvp); 1384 vnode_t *vp = ap->a_vp; 1385 struct componentname *cnp = ap->a_cnp; 1386 kauth_cred_t cred = cnp->cn_cred; 1387 struct lwp *l = curlwp; 1388 /* true args */ 1389 int error; 1390 const char *nm = cnp->cn_nameptr; 1391 int len = cnp->cn_namelen; 1392 struct cnode *cp; 1393 1394 MARK_ENTRY(CODA_RMDIR_STATS); 1395 1396 /* Check for rmdir of control object. */ 1397 if (IS_CTL_NAME(dvp, nm, len)) { 1398 MARK_INT_FAIL(CODA_RMDIR_STATS); 1399 return(ENOENT); 1400 } 1401 1402 /* Can't remove . in self. */ 1403 if (dvp == vp) { 1404 #ifdef CODA_VERBOSE 1405 printf("%s: dvp == vp\n", __func__); 1406 #endif 1407 error = EINVAL; 1408 goto exit; 1409 } 1410 1411 /* 1412 * The caller may not have adequate permissions, and the venus 1413 * operation may fail, but it doesn't hurt from a correctness 1414 * viewpoint to invalidate cache entries. 1415 * XXX Why isn't this done after the venus_rmdir call? 1416 */ 1417 /* Look up child in name cache (by name, from parent). */ 1418 cp = coda_nc_lookup(dcp, nm, len, cred); 1419 /* If found, remove all children of the child (., ..). */ 1420 if (cp) coda_nc_zapParentfid(&(cp->c_fid), NOT_DOWNCALL); 1421 1422 /* Remove child's own entry. */ 1423 coda_nc_zapfile(dcp, nm, len); 1424 1425 /* Invalidate parent's attr cache (the modification time has changed). */ 1426 dcp->c_flags &= ~C_VATTR; 1427 1428 error = venus_rmdir(vtomi(dvp), &dcp->c_fid, nm, len, cred, l); 1429 1430 CODADEBUG(CODA_RMDIR, myprintf(("in rmdir result %d\n", error)); ) 1431 1432 exit: 1433 /* unlock and release child */ 1434 if (dvp == vp) { 1435 vrele(vp); 1436 } else { 1437 vput(vp); 1438 } 1439 1440 return(error); 1441 } 1442 1443 int 1444 coda_symlink(void *v) 1445 { 1446 /* true args */ 1447 struct vop_symlink_v3_args *ap = v; 1448 vnode_t *dvp = ap->a_dvp; 1449 struct cnode *dcp = VTOC(dvp); 1450 /* a_vpp is used in place below */ 1451 struct componentname *cnp = ap->a_cnp; 1452 struct vattr *tva = ap->a_vap; 1453 char *path = ap->a_target; 1454 kauth_cred_t cred = cnp->cn_cred; 1455 struct lwp *l = curlwp; 1456 /* locals */ 1457 int error; 1458 u_long saved_cn_flags; 1459 const char *nm = cnp->cn_nameptr; 1460 int len = cnp->cn_namelen; 1461 int plen = strlen(path); 1462 1463 /* 1464 * Here's the strategy for the moment: perform the symlink, then 1465 * do a lookup to grab the resulting vnode. I know this requires 1466 * two communications with Venus for a new sybolic link, but 1467 * that's the way the ball bounces. I don't yet want to change 1468 * the way the Mach symlink works. When Mach support is 1469 * deprecated, we should change symlink so that the common case 1470 * returns the resultant vnode in a vpp argument. 1471 */ 1472 1473 MARK_ENTRY(CODA_SYMLINK_STATS); 1474 1475 /* Check for symlink of control object. */ 1476 if (IS_CTL_NAME(dvp, nm, len)) { 1477 MARK_INT_FAIL(CODA_SYMLINK_STATS); 1478 error = EACCES; 1479 goto exit; 1480 } 1481 1482 if (plen+1 > CODA_MAXPATHLEN) { 1483 MARK_INT_FAIL(CODA_SYMLINK_STATS); 1484 error = EINVAL; 1485 goto exit; 1486 } 1487 1488 if (len+1 > CODA_MAXNAMLEN) { 1489 MARK_INT_FAIL(CODA_SYMLINK_STATS); 1490 error = EINVAL; 1491 goto exit; 1492 } 1493 1494 error = venus_symlink(vtomi(dvp), &dcp->c_fid, path, plen, nm, len, tva, cred, l); 1495 1496 /* Invalidate the parent's attr cache (modification time has changed). */ 1497 dcp->c_flags &= ~C_VATTR; 1498 1499 if (!error) { 1500 /* 1501 * VOP_SYMLINK is not defined to pay attention to cnp->cn_flags; 1502 * these are defined only for VOP_LOOKUP. We desire to reuse 1503 * cnp for a VOP_LOOKUP operation, and must be sure to not pass 1504 * stray flags passed to us. Such stray flags can occur because 1505 * sys_symlink makes a namei call and then reuses the 1506 * componentname structure. 1507 */ 1508 /* 1509 * XXX Arguably we should create our own componentname structure 1510 * and not reuse the one that was passed in. 1511 */ 1512 saved_cn_flags = cnp->cn_flags; 1513 cnp->cn_flags &= ~(MODMASK | OPMASK); 1514 cnp->cn_flags |= LOOKUP; 1515 error = VOP_LOOKUP(dvp, ap->a_vpp, cnp); 1516 cnp->cn_flags = saved_cn_flags; 1517 } 1518 1519 exit: 1520 CODADEBUG(CODA_SYMLINK, myprintf(("in symlink result %d\n",error)); ) 1521 return(error); 1522 } 1523 1524 /* 1525 * Read directory entries. 1526 */ 1527 int 1528 coda_readdir(void *v) 1529 { 1530 /* true args */ 1531 struct vop_readdir_args *ap = v; 1532 vnode_t *vp = ap->a_vp; 1533 struct cnode *cp = VTOC(vp); 1534 struct uio *uiop = ap->a_uio; 1535 kauth_cred_t cred = ap->a_cred; 1536 int *eofflag = ap->a_eofflag; 1537 off_t **cookies = ap->a_cookies; 1538 int *ncookies = ap->a_ncookies; 1539 /* upcall decl */ 1540 /* locals */ 1541 int error = 0; 1542 enum vtype saved_type; 1543 1544 MARK_ENTRY(CODA_READDIR_STATS); 1545 1546 CODADEBUG(CODA_READDIR, myprintf(("%s: (%p, %lu, %lld)\n", __func__, 1547 uiop->uio_iov->iov_base, (unsigned long) uiop->uio_resid, 1548 (long long) uiop->uio_offset)); ) 1549 1550 /* Check for readdir of control object. */ 1551 if (IS_CTL_VP(vp)) { 1552 MARK_INT_FAIL(CODA_READDIR_STATS); 1553 return(ENOENT); 1554 } 1555 1556 { 1557 /* Redirect the request to UFS. */ 1558 1559 /* If directory is not already open do an "internal open" on it. */ 1560 int opened_internally = 0; 1561 if (cp->c_ovp == NULL) { 1562 opened_internally = 1; 1563 MARK_INT_GEN(CODA_OPEN_STATS); 1564 error = VOP_OPEN(vp, FREAD, cred); 1565 #ifdef CODA_VERBOSE 1566 printf("%s: Internally Opening %p\n", __func__, vp); 1567 #endif 1568 if (error) return(error); 1569 } else 1570 vp = cp->c_ovp; 1571 1572 /* Have UFS handle the call. */ 1573 CODADEBUG(CODA_READDIR, myprintf(("%s: fid = %s, refcnt = %d\n", 1574 __func__, coda_f2s(&cp->c_fid), vrefcnt(vp))); ) 1575 saved_type = vp->v_type; 1576 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1577 vp->v_type = VDIR; /* pretend the container file is a dir */ 1578 error = VOP_READDIR(vp, uiop, cred, eofflag, cookies, ncookies); 1579 vp->v_type = saved_type; 1580 VOP_UNLOCK(vp); 1581 1582 if (error) 1583 MARK_INT_FAIL(CODA_READDIR_STATS); 1584 else 1585 MARK_INT_SAT(CODA_READDIR_STATS); 1586 1587 /* Do an "internal close" if necessary. */ 1588 if (opened_internally) { 1589 MARK_INT_GEN(CODA_CLOSE_STATS); 1590 (void)VOP_CLOSE(vp, FREAD, cred); 1591 } 1592 } 1593 1594 return(error); 1595 } 1596 1597 /* 1598 * Convert from file system blocks to device blocks 1599 */ 1600 int 1601 coda_bmap(void *v) 1602 { 1603 /* XXX on the global proc */ 1604 /* true args */ 1605 struct vop_bmap_args *ap = v; 1606 vnode_t *vp __unused = ap->a_vp; /* file's vnode */ 1607 daddr_t bn __unused = ap->a_bn; /* fs block number */ 1608 vnode_t **vpp = ap->a_vpp; /* RETURN vp of device */ 1609 daddr_t *bnp __unused = ap->a_bnp; /* RETURN device block number */ 1610 struct lwp *l __unused = curlwp; 1611 /* upcall decl */ 1612 /* locals */ 1613 1614 *vpp = (vnode_t *)0; 1615 myprintf(("coda_bmap called!\n")); 1616 return(EINVAL); 1617 } 1618 1619 /* 1620 * I don't think the following two things are used anywhere, so I've 1621 * commented them out 1622 * 1623 * struct buf *async_bufhead; 1624 * int async_daemon_count; 1625 */ 1626 int 1627 coda_strategy(void *v) 1628 { 1629 /* true args */ 1630 struct vop_strategy_args *ap = v; 1631 struct buf *bp __unused = ap->a_bp; 1632 struct lwp *l __unused = curlwp; 1633 /* upcall decl */ 1634 /* locals */ 1635 1636 myprintf(("coda_strategy called! ")); 1637 return(EINVAL); 1638 } 1639 1640 int 1641 coda_reclaim(void *v) 1642 { 1643 /* true args */ 1644 struct vop_reclaim_v2_args *ap = v; 1645 vnode_t *vp = ap->a_vp; 1646 struct cnode *cp = VTOC(vp); 1647 /* upcall decl */ 1648 /* locals */ 1649 1650 VOP_UNLOCK(vp); 1651 1652 /* 1653 * Forced unmount/flush will let vnodes with non zero use be destroyed! 1654 */ 1655 ENTRY; 1656 1657 if (IS_UNMOUNTING(cp)) { 1658 #ifdef DEBUG 1659 if (VTOC(vp)->c_ovp) { 1660 if (IS_UNMOUNTING(cp)) 1661 printf("%s: c_ovp not void: vp %p, cp %p\n", __func__, vp, cp); 1662 } 1663 #endif 1664 } else { 1665 #ifdef OLD_DIAGNOSTIC 1666 if (vrefcnt(vp) != 0) 1667 print("%s: pushing active %p\n", __func__, vp); 1668 if (VTOC(vp)->c_ovp) { 1669 panic("%s: c_ovp not void", __func__); 1670 } 1671 #endif 1672 } 1673 /* If an array has been allocated to hold the symlink, deallocate it */ 1674 if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) { 1675 if (cp->c_symlink == NULL) 1676 panic("%s: null symlink pointer in cnode", __func__); 1677 1678 CODA_FREE(cp->c_symlink, cp->c_symlen); 1679 cp->c_flags &= ~C_SYMLINK; 1680 cp->c_symlen = 0; 1681 } 1682 1683 mutex_enter(vp->v_interlock); 1684 mutex_enter(&cp->c_lock); 1685 SET_VTOC(vp) = NULL; 1686 mutex_exit(&cp->c_lock); 1687 mutex_exit(vp->v_interlock); 1688 mutex_destroy(&cp->c_lock); 1689 kmem_free(cp, sizeof(*cp)); 1690 1691 return (0); 1692 } 1693 1694 int 1695 coda_lock(void *v) 1696 { 1697 /* true args */ 1698 struct vop_lock_args *ap = v; 1699 vnode_t *vp = ap->a_vp; 1700 struct cnode *cp = VTOC(vp); 1701 /* upcall decl */ 1702 /* locals */ 1703 1704 ENTRY; 1705 1706 if (coda_lockdebug) { 1707 myprintf(("Attempting lock on %s\n", 1708 coda_f2s(&cp->c_fid))); 1709 } 1710 1711 return genfs_lock(v); 1712 } 1713 1714 int 1715 coda_unlock(void *v) 1716 { 1717 /* true args */ 1718 struct vop_unlock_args *ap = v; 1719 vnode_t *vp = ap->a_vp; 1720 struct cnode *cp = VTOC(vp); 1721 /* upcall decl */ 1722 /* locals */ 1723 1724 ENTRY; 1725 if (coda_lockdebug) { 1726 myprintf(("Attempting unlock on %s\n", 1727 coda_f2s(&cp->c_fid))); 1728 } 1729 1730 return genfs_unlock(v); 1731 } 1732 1733 int 1734 coda_islocked(void *v) 1735 { 1736 /* true args */ 1737 ENTRY; 1738 1739 return genfs_islocked(v); 1740 } 1741 1742 /* 1743 * Given a device and inode, obtain a locked vnode. One reference is 1744 * obtained and passed back to the caller. 1745 */ 1746 int 1747 coda_grab_vnode(vnode_t *uvp, dev_t dev, ino_t ino, vnode_t **vpp) 1748 { 1749 int error; 1750 struct mount *mp; 1751 1752 /* Obtain mount point structure from device. */ 1753 if (!(mp = devtomp(dev))) { 1754 myprintf(("%s: devtomp(0x%llx) returns NULL\n", __func__, 1755 (unsigned long long)dev)); 1756 return(ENXIO); 1757 } 1758 1759 /* 1760 * Obtain vnode from mount point and inode. 1761 */ 1762 error = VFS_VGET(mp, ino, LK_EXCLUSIVE, vpp); 1763 if (error) { 1764 myprintf(("%s: iget/vget(0x%llx, %llu) returns %p, err %d\n", __func__, 1765 (unsigned long long)dev, (unsigned long long)ino, *vpp, error)); 1766 return(ENOENT); 1767 } 1768 /* share the underlying vnode lock with the coda vnode */ 1769 vshareilock(*vpp, uvp); 1770 KASSERT(VOP_ISLOCKED(*vpp)); 1771 return(0); 1772 } 1773 1774 static void 1775 coda_print_vattr(struct vattr *attr) 1776 { 1777 const char *typestr; 1778 1779 switch (attr->va_type) { 1780 case VNON: 1781 typestr = "VNON"; 1782 break; 1783 case VREG: 1784 typestr = "VREG"; 1785 break; 1786 case VDIR: 1787 typestr = "VDIR"; 1788 break; 1789 case VBLK: 1790 typestr = "VBLK"; 1791 break; 1792 case VCHR: 1793 typestr = "VCHR"; 1794 break; 1795 case VLNK: 1796 typestr = "VLNK"; 1797 break; 1798 case VSOCK: 1799 typestr = "VSCK"; 1800 break; 1801 case VFIFO: 1802 typestr = "VFFO"; 1803 break; 1804 case VBAD: 1805 typestr = "VBAD"; 1806 break; 1807 default: 1808 typestr = "????"; 1809 break; 1810 } 1811 1812 1813 myprintf(("attr: type %s mode %d uid %d gid %d fsid %d rdev %d\n", 1814 typestr, (int)attr->va_mode, (int)attr->va_uid, 1815 (int)attr->va_gid, (int)attr->va_fsid, (int)attr->va_rdev)); 1816 1817 myprintf((" fileid %d nlink %d size %d blocksize %d bytes %d\n", 1818 (int)attr->va_fileid, (int)attr->va_nlink, 1819 (int)attr->va_size, 1820 (int)attr->va_blocksize,(int)attr->va_bytes)); 1821 myprintf((" gen %ld flags %ld vaflags %d\n", 1822 attr->va_gen, attr->va_flags, attr->va_vaflags)); 1823 myprintf((" atime sec %d nsec %d\n", 1824 (int)attr->va_atime.tv_sec, (int)attr->va_atime.tv_nsec)); 1825 myprintf((" mtime sec %d nsec %d\n", 1826 (int)attr->va_mtime.tv_sec, (int)attr->va_mtime.tv_nsec)); 1827 myprintf((" ctime sec %d nsec %d\n", 1828 (int)attr->va_ctime.tv_sec, (int)attr->va_ctime.tv_nsec)); 1829 } 1830 1831 /* 1832 * Return a vnode for the given fid. 1833 * If no cnode exists for this fid create one and put it 1834 * in a table hashed by coda_f2i(). If the cnode for 1835 * this fid is already in the table return it (ref count is 1836 * incremented by coda_find. The cnode will be flushed from the 1837 * table when coda_inactive calls coda_unsave. 1838 */ 1839 struct cnode * 1840 make_coda_node(CodaFid *fid, struct mount *fvsp, short type) 1841 { 1842 int error __diagused; 1843 struct vnode *vp; 1844 struct cnode *cp; 1845 1846 error = vcache_get(fvsp, fid, sizeof(CodaFid), &vp); 1847 KASSERT(error == 0); 1848 1849 mutex_enter(vp->v_interlock); 1850 cp = VTOC(vp); 1851 KASSERT(cp != NULL); 1852 mutex_enter(&cp->c_lock); 1853 mutex_exit(vp->v_interlock); 1854 1855 if (vp->v_type != type) { 1856 if (vp->v_type == VCHR || vp->v_type == VBLK) 1857 spec_node_destroy(vp); 1858 vp->v_type = type; 1859 if (type == VCHR || type == VBLK) 1860 spec_node_init(vp, NODEV); 1861 uvm_vnp_setsize(vp, 0); 1862 } 1863 mutex_exit(&cp->c_lock); 1864 1865 return cp; 1866 } 1867 1868 /* 1869 * coda_getpages may be called on a vnode which has not been opened, 1870 * e.g. to fault in pages to execute a program. In that case, we must 1871 * open the file to get the container. The vnode may or may not be 1872 * locked, and we must leave it in the same state. 1873 */ 1874 int 1875 coda_getpages(void *v) 1876 { 1877 struct vop_getpages_args /* { 1878 vnode_t *a_vp; 1879 voff_t a_offset; 1880 struct vm_page **a_m; 1881 int *a_count; 1882 int a_centeridx; 1883 vm_prot_t a_access_type; 1884 int a_advice; 1885 int a_flags; 1886 } */ *ap = v; 1887 vnode_t *vp = ap->a_vp, *cvp; 1888 struct cnode *cp = VTOC(vp); 1889 struct lwp *l = curlwp; 1890 kauth_cred_t cred = l->l_cred; 1891 int error, cerror; 1892 int waslocked; /* 1 if vnode lock was held on entry */ 1893 int didopen = 0; /* 1 if we opened container file */ 1894 krw_t op; 1895 1896 /* 1897 * Handle a case that uvm_fault doesn't quite use yet. 1898 * See layer_vnops.c. for inspiration. 1899 */ 1900 if (ap->a_flags & PGO_LOCKED) { 1901 return EBUSY; 1902 } 1903 1904 KASSERT(rw_lock_held(vp->v_uobj.vmobjlock)); 1905 1906 /* Check for control object. */ 1907 if (IS_CTL_VP(vp)) { 1908 #ifdef CODA_VERBOSE 1909 printf("%s: control object %p\n", __func__, vp); 1910 #endif 1911 return(EINVAL); 1912 } 1913 1914 /* 1915 * XXX It's really not ok to be releasing the lock we get, 1916 * because we could be overlapping with another call to 1917 * getpages and drop a lock they are relying on. We need to 1918 * figure out whether getpages ever is called holding the 1919 * lock, and if we should serialize getpages calls by some 1920 * mechanism. 1921 */ 1922 /* XXX VOP_ISLOCKED() may not be used for lock decisions. */ 1923 op = rw_lock_op(vp->v_uobj.vmobjlock); 1924 waslocked = VOP_ISLOCKED(vp); 1925 1926 /* Get container file if not already present. */ 1927 cvp = cp->c_ovp; 1928 if (cvp == NULL) { 1929 /* 1930 * VOP_OPEN requires a locked vnode. We must avoid 1931 * locking the vnode if it is already locked, and 1932 * leave it in the same state on exit. 1933 */ 1934 if (waslocked == 0) { 1935 rw_exit(vp->v_uobj.vmobjlock); 1936 cerror = vn_lock(vp, LK_EXCLUSIVE); 1937 if (cerror) { 1938 #ifdef CODA_VERBOSE 1939 printf("%s: can't lock vnode %p\n", 1940 __func__, vp); 1941 #endif 1942 return cerror; 1943 } 1944 #ifdef CODA_VERBOSE 1945 printf("%s: locked vnode %p\n", __func__, vp); 1946 #endif 1947 } 1948 1949 /* 1950 * Open file (causes upcall to venus). 1951 * XXX Perhaps we should not fully open the file, but 1952 * simply obtain a container file. 1953 */ 1954 /* XXX Is it ok to do this while holding the mutex? */ 1955 cerror = VOP_OPEN(vp, FREAD, cred); 1956 1957 if (cerror) { 1958 #ifdef CODA_VERBOSE 1959 printf("%s: cannot open vnode %p => %d\n", __func__, 1960 vp, cerror); 1961 #endif 1962 if (waslocked == 0) 1963 VOP_UNLOCK(vp); 1964 return cerror; 1965 } 1966 1967 #ifdef CODA_VERBOSE 1968 printf("%s: opened vnode %p\n", __func__, vp); 1969 #endif 1970 cvp = cp->c_ovp; 1971 didopen = 1; 1972 if (waslocked == 0) 1973 rw_enter(vp->v_uobj.vmobjlock, op); 1974 } 1975 KASSERT(cvp != NULL); 1976 1977 /* Munge the arg structure to refer to the container vnode. */ 1978 KASSERT(cvp->v_uobj.vmobjlock == vp->v_uobj.vmobjlock); 1979 ap->a_vp = cp->c_ovp; 1980 1981 /* Finally, call getpages on it. */ 1982 error = VCALL(ap->a_vp, VOFFSET(vop_getpages), ap); 1983 1984 /* If we opened the vnode, we must close it. */ 1985 if (didopen) { 1986 /* 1987 * VOP_CLOSE requires a locked vnode, but we are still 1988 * holding the lock (or riding a caller's lock). 1989 */ 1990 cerror = VOP_CLOSE(vp, FREAD, cred); 1991 #ifdef CODA_VERBOSE 1992 if (cerror != 0) 1993 /* XXX How should we handle this? */ 1994 printf("%s: closed vnode %p -> %d\n", __func__, 1995 vp, cerror); 1996 #endif 1997 1998 /* If we obtained a lock, drop it. */ 1999 if (waslocked == 0) 2000 VOP_UNLOCK(vp); 2001 } 2002 2003 return error; 2004 } 2005 2006 /* 2007 * The protocol requires v_interlock to be held by the caller. 2008 */ 2009 int 2010 coda_putpages(void *v) 2011 { 2012 struct vop_putpages_args /* { 2013 vnode_t *a_vp; 2014 voff_t a_offlo; 2015 voff_t a_offhi; 2016 int a_flags; 2017 } */ *ap = v; 2018 vnode_t *vp = ap->a_vp, *cvp; 2019 struct cnode *cp = VTOC(vp); 2020 int error; 2021 2022 KASSERT(rw_write_held(vp->v_uobj.vmobjlock)); 2023 2024 /* Check for control object. */ 2025 if (IS_CTL_VP(vp)) { 2026 rw_exit(vp->v_uobj.vmobjlock); 2027 #ifdef CODA_VERBOSE 2028 printf("%s: control object %p\n", __func__, vp); 2029 #endif 2030 return 0; 2031 } 2032 2033 /* 2034 * If container object is not present, then there are no pages 2035 * to put; just return without error. This happens all the 2036 * time, apparently during discard of a closed vnode (which 2037 * trivially can't have dirty pages). 2038 */ 2039 cvp = cp->c_ovp; 2040 if (cvp == NULL) { 2041 rw_exit(vp->v_uobj.vmobjlock); 2042 return 0; 2043 } 2044 2045 /* Munge the arg structure to refer to the container vnode. */ 2046 KASSERT(cvp->v_uobj.vmobjlock == vp->v_uobj.vmobjlock); 2047 ap->a_vp = cvp; 2048 2049 /* Finally, call putpages on it. */ 2050 error = VCALL(ap->a_vp, VOFFSET(vop_putpages), ap); 2051 2052 return error; 2053 } 2054