1 /* $NetBSD: coda_vnops.c,v 1.114 2020/11/14 11:42:56 hannken Exp $ */ 2 3 /* 4 * 5 * Coda: an Experimental Distributed File System 6 * Release 3.1 7 * 8 * Copyright (c) 1987-1998 Carnegie Mellon University 9 * All Rights Reserved 10 * 11 * Permission to use, copy, modify and distribute this software and its 12 * documentation is hereby granted, provided that both the copyright 13 * notice and this permission notice appear in all copies of the 14 * software, derivative works or modified versions, and any portions 15 * thereof, and that both notices appear in supporting documentation, and 16 * that credit is given to Carnegie Mellon University in all documents 17 * and publicity pertaining to direct or indirect use of this code or its 18 * derivatives. 19 * 20 * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS, 21 * SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS 22 * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON 23 * DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER 24 * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF 25 * ANY DERIVATIVE WORK. 26 * 27 * Carnegie Mellon encourages users of this software to return any 28 * improvements or extensions that they make, and to grant Carnegie 29 * Mellon the rights to redistribute these changes without encumbrance. 30 * 31 * @(#) coda/coda_vnops.c,v 1.1.1.1 1998/08/29 21:26:46 rvb Exp $ 32 */ 33 34 /* 35 * Mach Operating System 36 * Copyright (c) 1990 Carnegie-Mellon University 37 * Copyright (c) 1989 Carnegie-Mellon University 38 * All rights reserved. The CMU software License Agreement specifies 39 * the terms and conditions for use and redistribution. 40 */ 41 42 /* 43 * This code was written for the Coda file system at Carnegie Mellon 44 * University. Contributers include David Steere, James Kistler, and 45 * M. Satyanarayanan. 46 */ 47 48 #include <sys/cdefs.h> 49 __KERNEL_RCSID(0, "$NetBSD: coda_vnops.c,v 1.114 2020/11/14 11:42:56 hannken Exp $"); 50 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/malloc.h> 54 #include <sys/errno.h> 55 #include <sys/acct.h> 56 #include <sys/file.h> 57 #include <sys/uio.h> 58 #include <sys/namei.h> 59 #include <sys/ioctl.h> 60 #include <sys/mount.h> 61 #include <sys/proc.h> 62 #include <sys/select.h> 63 #include <sys/vnode.h> 64 #include <sys/kauth.h> 65 #include <sys/dirent.h> 66 67 #include <miscfs/genfs/genfs.h> 68 #include <miscfs/specfs/specdev.h> 69 70 #include <coda/coda.h> 71 #include <coda/cnode.h> 72 #include <coda/coda_vnops.h> 73 #include <coda/coda_venus.h> 74 #include <coda/coda_opstats.h> 75 #include <coda/coda_subr.h> 76 #include <coda/coda_namecache.h> 77 #include <coda/coda_pioctl.h> 78 79 /* 80 * These flags select various performance enhancements. 81 */ 82 int coda_attr_cache = 1; /* Set to cache attributes in the kernel */ 83 int coda_symlink_cache = 1; /* Set to cache symbolic link information */ 84 int coda_access_cache = 1; /* Set to handle some access checks directly */ 85 86 /* structure to keep track of vfs calls */ 87 88 struct coda_op_stats coda_vnodeopstats[CODA_VNODEOPS_SIZE]; 89 90 #define MARK_ENTRY(op) (coda_vnodeopstats[op].entries++) 91 #define MARK_INT_SAT(op) (coda_vnodeopstats[op].sat_intrn++) 92 #define MARK_INT_FAIL(op) (coda_vnodeopstats[op].unsat_intrn++) 93 #define MARK_INT_GEN(op) (coda_vnodeopstats[op].gen_intrn++) 94 95 /* What we are delaying for in printf */ 96 static int coda_lockdebug = 0; 97 98 #define ENTRY if(coda_vnop_print_entry) myprintf(("Entered %s\n",__func__)) 99 100 /* Definition of the vnode operation vector */ 101 102 const struct vnodeopv_entry_desc coda_vnodeop_entries[] = { 103 { &vop_default_desc, coda_vop_error }, 104 { &vop_lookup_desc, coda_lookup }, /* lookup */ 105 { &vop_create_desc, coda_create }, /* create */ 106 { &vop_mknod_desc, coda_vop_error }, /* mknod */ 107 { &vop_open_desc, coda_open }, /* open */ 108 { &vop_close_desc, coda_close }, /* close */ 109 { &vop_access_desc, coda_access }, /* access */ 110 { &vop_accessx_desc, genfs_accessx }, /* access */ 111 { &vop_getattr_desc, coda_getattr }, /* getattr */ 112 { &vop_setattr_desc, coda_setattr }, /* setattr */ 113 { &vop_read_desc, coda_read }, /* read */ 114 { &vop_write_desc, coda_write }, /* write */ 115 { &vop_fallocate_desc, genfs_eopnotsupp }, /* fallocate */ 116 { &vop_fdiscard_desc, genfs_eopnotsupp }, /* fdiscard */ 117 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */ 118 { &vop_ioctl_desc, coda_ioctl }, /* ioctl */ 119 { &vop_mmap_desc, genfs_mmap }, /* mmap */ 120 { &vop_fsync_desc, coda_fsync }, /* fsync */ 121 { &vop_remove_desc, coda_remove }, /* remove */ 122 { &vop_link_desc, coda_link }, /* link */ 123 { &vop_rename_desc, coda_rename }, /* rename */ 124 { &vop_mkdir_desc, coda_mkdir }, /* mkdir */ 125 { &vop_rmdir_desc, coda_rmdir }, /* rmdir */ 126 { &vop_symlink_desc, coda_symlink }, /* symlink */ 127 { &vop_readdir_desc, coda_readdir }, /* readdir */ 128 { &vop_readlink_desc, coda_readlink }, /* readlink */ 129 { &vop_abortop_desc, coda_abortop }, /* abortop */ 130 { &vop_inactive_desc, coda_inactive }, /* inactive */ 131 { &vop_reclaim_desc, coda_reclaim }, /* reclaim */ 132 { &vop_lock_desc, coda_lock }, /* lock */ 133 { &vop_unlock_desc, coda_unlock }, /* unlock */ 134 { &vop_bmap_desc, coda_bmap }, /* bmap */ 135 { &vop_strategy_desc, coda_strategy }, /* strategy */ 136 { &vop_print_desc, coda_vop_error }, /* print */ 137 { &vop_islocked_desc, coda_islocked }, /* islocked */ 138 { &vop_pathconf_desc, coda_pathconf }, /* pathconf */ 139 { &vop_advlock_desc, coda_vop_nop }, /* advlock */ 140 { &vop_bwrite_desc, coda_vop_error }, /* bwrite */ 141 { &vop_seek_desc, genfs_seek }, /* seek */ 142 { &vop_poll_desc, genfs_poll }, /* poll */ 143 { &vop_getpages_desc, coda_getpages }, /* getpages */ 144 { &vop_putpages_desc, coda_putpages }, /* putpages */ 145 { NULL, NULL } 146 }; 147 148 static void coda_print_vattr(struct vattr *); 149 150 int (**coda_vnodeop_p)(void *); 151 const struct vnodeopv_desc coda_vnodeop_opv_desc = 152 { &coda_vnodeop_p, coda_vnodeop_entries }; 153 154 /* Definitions of NetBSD vnodeop interfaces */ 155 156 /* 157 * A generic error routine. Return EIO without looking at arguments. 158 */ 159 int 160 coda_vop_error(void *anon) { 161 struct vnodeop_desc **desc = (struct vnodeop_desc **)anon; 162 163 if (codadebug) { 164 myprintf(("%s: Vnode operation %s called (error).\n", 165 __func__, (*desc)->vdesc_name)); 166 } 167 168 return EIO; 169 } 170 171 /* A generic do-nothing. */ 172 int 173 coda_vop_nop(void *anon) { 174 struct vnodeop_desc **desc = (struct vnodeop_desc **)anon; 175 176 if (codadebug) { 177 myprintf(("Vnode operation %s called, but unsupported\n", 178 (*desc)->vdesc_name)); 179 } 180 return (0); 181 } 182 183 int 184 coda_vnodeopstats_init(void) 185 { 186 int i; 187 188 for(i=0;i<CODA_VNODEOPS_SIZE;i++) { 189 coda_vnodeopstats[i].opcode = i; 190 coda_vnodeopstats[i].entries = 0; 191 coda_vnodeopstats[i].sat_intrn = 0; 192 coda_vnodeopstats[i].unsat_intrn = 0; 193 coda_vnodeopstats[i].gen_intrn = 0; 194 } 195 196 return 0; 197 } 198 199 /* 200 * XXX The entire relationship between VOP_OPEN and having a container 201 * file (via venus_open) needs to be reexamined. In particular, it's 202 * valid to open/mmap/close and then reference. Instead of doing 203 * VOP_OPEN when getpages needs a container, we should do the 204 * venus_open part, and record that the vnode has opened the container 205 * for getpages, and do the matching logical close on coda_inactive. 206 * Further, coda_rdwr needs a container file, and sometimes needs to 207 * do the equivalent of open (core dumps). 208 */ 209 /* 210 * coda_open calls Venus to return the device and inode of the 211 * container file, and then obtains a vnode for that file. The 212 * container vnode is stored in the coda vnode, and a reference is 213 * added for each open file. 214 */ 215 int 216 coda_open(void *v) 217 { 218 /* 219 * NetBSD can pass the O_EXCL flag in mode, even though the check 220 * has already happened. Venus defensively assumes that if open 221 * is passed the EXCL, it must be a bug. We strip the flag here. 222 */ 223 /* true args */ 224 struct vop_open_args *ap = v; 225 vnode_t *vp = ap->a_vp; 226 struct cnode *cp = VTOC(vp); 227 int flag = ap->a_mode & (~O_EXCL); 228 kauth_cred_t cred = ap->a_cred; 229 /* locals */ 230 int error; 231 dev_t dev; /* container file device, inode, vnode */ 232 ino_t inode; 233 vnode_t *container_vp; 234 235 MARK_ENTRY(CODA_OPEN_STATS); 236 237 KASSERT(VOP_ISLOCKED(vp)); 238 /* Check for open of control file. */ 239 if (IS_CTL_VP(vp)) { 240 /* if (WRITABLE(flag)) */ 241 if (flag & (FWRITE | O_TRUNC | O_CREAT | O_EXCL)) { 242 MARK_INT_FAIL(CODA_OPEN_STATS); 243 return(EACCES); 244 } 245 MARK_INT_SAT(CODA_OPEN_STATS); 246 return(0); 247 } 248 249 error = venus_open(vtomi(vp), &cp->c_fid, flag, cred, curlwp, &dev, &inode); 250 if (error) 251 return (error); 252 if (!error) { 253 CODADEBUG(CODA_OPEN, myprintf(( 254 "%s: dev 0x%llx inode %llu result %d\n", __func__, 255 (unsigned long long)dev, (unsigned long long)inode, error));) 256 } 257 258 /* 259 * Obtain locked and referenced container vnode from container 260 * device/inode. 261 */ 262 error = coda_grab_vnode(vp, dev, inode, &container_vp); 263 if (error) 264 return (error); 265 266 /* Save the vnode pointer for the container file. */ 267 if (cp->c_ovp == NULL) { 268 cp->c_ovp = container_vp; 269 } else { 270 if (cp->c_ovp != container_vp) 271 /* 272 * Perhaps venus returned a different container, or 273 * something else went wrong. 274 */ 275 panic("%s: cp->c_ovp != container_vp", __func__); 276 } 277 cp->c_ocount++; 278 279 /* Flush the attribute cache if writing the file. */ 280 if (flag & FWRITE) { 281 cp->c_owrite++; 282 cp->c_flags &= ~C_VATTR; 283 } 284 285 /* 286 * Save the <device, inode> pair for the container file to speed 287 * up subsequent reads while closed (mmap, program execution). 288 * This is perhaps safe because venus will invalidate the node 289 * before changing the container file mapping. 290 */ 291 cp->c_device = dev; 292 cp->c_inode = inode; 293 294 /* Open the container file. */ 295 error = VOP_OPEN(container_vp, flag, cred); 296 /* 297 * Drop the lock on the container, after we have done VOP_OPEN 298 * (which requires a locked vnode). 299 */ 300 VOP_UNLOCK(container_vp); 301 return(error); 302 } 303 304 /* 305 * Close the cache file used for I/O and notify Venus. 306 */ 307 int 308 coda_close(void *v) 309 { 310 /* true args */ 311 struct vop_close_args *ap = v; 312 vnode_t *vp = ap->a_vp; 313 struct cnode *cp = VTOC(vp); 314 int flag = ap->a_fflag; 315 kauth_cred_t cred = ap->a_cred; 316 /* locals */ 317 int error; 318 319 MARK_ENTRY(CODA_CLOSE_STATS); 320 321 /* Check for close of control file. */ 322 if (IS_CTL_VP(vp)) { 323 MARK_INT_SAT(CODA_CLOSE_STATS); 324 return(0); 325 } 326 327 /* 328 * XXX The IS_UNMOUNTING part of this is very suspect. 329 */ 330 if (IS_UNMOUNTING(cp)) { 331 if (cp->c_ovp) { 332 #ifdef CODA_VERBOSE 333 printf("%s: destroying container %d, ufs vp %p of vp %p/cp %p\n", 334 __func__, vrefcnt(vp), cp->c_ovp, vp, cp); 335 #endif 336 #ifdef hmm 337 vgone(cp->c_ovp); 338 #else 339 vn_lock(cp->c_ovp, LK_EXCLUSIVE | LK_RETRY); 340 VOP_CLOSE(cp->c_ovp, flag, cred); /* Do errors matter here? */ 341 vput(cp->c_ovp); 342 #endif 343 } else { 344 #ifdef CODA_VERBOSE 345 printf("%s: NO container vp %p/cp %p\n", __func__, vp, cp); 346 #endif 347 } 348 return ENODEV; 349 } 350 351 /* Lock the container node, and VOP_CLOSE it. */ 352 vn_lock(cp->c_ovp, LK_EXCLUSIVE | LK_RETRY); 353 VOP_CLOSE(cp->c_ovp, flag, cred); /* Do errors matter here? */ 354 /* 355 * Drop the lock we just obtained, and vrele the container vnode. 356 * Decrement reference counts, and clear container vnode pointer on 357 * last close. 358 */ 359 vput(cp->c_ovp); 360 if (flag & FWRITE) 361 --cp->c_owrite; 362 if (--cp->c_ocount == 0) 363 cp->c_ovp = NULL; 364 365 error = venus_close(vtomi(vp), &cp->c_fid, flag, cred, curlwp); 366 367 CODADEBUG(CODA_CLOSE, myprintf(("%s: result %d\n", __func__, error)); ) 368 return(error); 369 } 370 371 int 372 coda_read(void *v) 373 { 374 struct vop_read_args *ap = v; 375 376 ENTRY; 377 return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_READ, 378 ap->a_ioflag, ap->a_cred, curlwp)); 379 } 380 381 int 382 coda_write(void *v) 383 { 384 struct vop_write_args *ap = v; 385 386 ENTRY; 387 return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_WRITE, 388 ap->a_ioflag, ap->a_cred, curlwp)); 389 } 390 391 int 392 coda_rdwr(vnode_t *vp, struct uio *uiop, enum uio_rw rw, int ioflag, 393 kauth_cred_t cred, struct lwp *l) 394 { 395 /* upcall decl */ 396 /* NOTE: container file operation!!! */ 397 /* locals */ 398 struct cnode *cp = VTOC(vp); 399 vnode_t *cfvp = cp->c_ovp; 400 struct proc *p = l->l_proc; 401 int opened_internally = 0; 402 int error = 0; 403 404 MARK_ENTRY(CODA_RDWR_STATS); 405 406 CODADEBUG(CODA_RDWR, myprintf(("coda_rdwr(%d, %p, %lu, %lld)\n", rw, 407 uiop->uio_iov->iov_base, (unsigned long) uiop->uio_resid, 408 (long long) uiop->uio_offset)); ) 409 410 /* Check for rdwr of control object. */ 411 if (IS_CTL_VP(vp)) { 412 MARK_INT_FAIL(CODA_RDWR_STATS); 413 return(EINVAL); 414 } 415 416 /* Redirect the request to UFS. */ 417 418 /* 419 * If file is not already open this must be a page 420 * {read,write} request. Iget the cache file's inode 421 * pointer if we still have its <device, inode> pair. 422 * Otherwise, we must do an internal open to derive the 423 * pair. 424 * XXX Integrate this into a coherent strategy for container 425 * file acquisition. 426 */ 427 if (cfvp == NULL) { 428 /* 429 * If we're dumping core, do the internal open. Otherwise 430 * venus won't have the correct size of the core when 431 * it's completely written. 432 */ 433 if (cp->c_inode != 0 && !(p && (p->p_acflag & ACORE))) { 434 #ifdef CODA_VERBOSE 435 printf("%s: grabbing container vnode, losing reference\n", 436 __func__); 437 #endif 438 /* Get locked and refed vnode. */ 439 error = coda_grab_vnode(vp, cp->c_device, cp->c_inode, &cfvp); 440 if (error) { 441 MARK_INT_FAIL(CODA_RDWR_STATS); 442 return(error); 443 } 444 /* 445 * Drop lock. 446 * XXX Where is reference released. 447 */ 448 VOP_UNLOCK(cfvp); 449 } 450 else { 451 #ifdef CODA_VERBOSE 452 printf("%s: internal VOP_OPEN\n", __func__); 453 #endif 454 opened_internally = 1; 455 MARK_INT_GEN(CODA_OPEN_STATS); 456 error = VOP_OPEN(vp, (rw == UIO_READ ? FREAD : FWRITE), cred); 457 #ifdef CODA_VERBOSE 458 printf("%s: Internally Opening %p\n", __func__, vp); 459 #endif 460 if (error) { 461 MARK_INT_FAIL(CODA_RDWR_STATS); 462 return(error); 463 } 464 cfvp = cp->c_ovp; 465 } 466 } 467 468 /* Have UFS handle the call. */ 469 CODADEBUG(CODA_RDWR, myprintf(("%s: fid = %s, refcnt = %d\n", __func__, 470 coda_f2s(&cp->c_fid), vrefcnt(CTOV(cp)))); ) 471 472 if (rw == UIO_READ) { 473 error = VOP_READ(cfvp, uiop, ioflag, cred); 474 } else { 475 error = VOP_WRITE(cfvp, uiop, ioflag, cred); 476 } 477 478 if (error) 479 MARK_INT_FAIL(CODA_RDWR_STATS); 480 else 481 MARK_INT_SAT(CODA_RDWR_STATS); 482 483 /* Do an internal close if necessary. */ 484 if (opened_internally) { 485 MARK_INT_GEN(CODA_CLOSE_STATS); 486 (void)VOP_CLOSE(vp, (rw == UIO_READ ? FREAD : FWRITE), cred); 487 } 488 489 /* Invalidate cached attributes if writing. */ 490 if (rw == UIO_WRITE) 491 cp->c_flags &= ~C_VATTR; 492 return(error); 493 } 494 495 int 496 coda_ioctl(void *v) 497 { 498 /* true args */ 499 struct vop_ioctl_args *ap = v; 500 vnode_t *vp = ap->a_vp; 501 int com = ap->a_command; 502 void *data = ap->a_data; 503 int flag = ap->a_fflag; 504 kauth_cred_t cred = ap->a_cred; 505 /* locals */ 506 int error; 507 vnode_t *tvp; 508 struct PioctlData *iap = (struct PioctlData *)data; 509 namei_simple_flags_t sflags; 510 511 MARK_ENTRY(CODA_IOCTL_STATS); 512 513 CODADEBUG(CODA_IOCTL, myprintf(("in coda_ioctl on %s\n", iap->path));) 514 515 /* Don't check for operation on a dying object, for ctlvp it 516 shouldn't matter */ 517 518 /* Must be control object to succeed. */ 519 if (!IS_CTL_VP(vp)) { 520 MARK_INT_FAIL(CODA_IOCTL_STATS); 521 CODADEBUG(CODA_IOCTL, myprintf(("%s error: vp != ctlvp", __func__));) 522 return (EOPNOTSUPP); 523 } 524 /* Look up the pathname. */ 525 526 /* Should we use the name cache here? It would get it from 527 lookupname sooner or later anyway, right? */ 528 529 sflags = iap->follow ? NSM_FOLLOW_NOEMULROOT : NSM_NOFOLLOW_NOEMULROOT; 530 error = namei_simple_user(iap->path, sflags, &tvp); 531 532 if (error) { 533 MARK_INT_FAIL(CODA_IOCTL_STATS); 534 CODADEBUG(CODA_IOCTL, myprintf(("%s error: lookup returns %d\n", 535 __func__, error));) 536 return(error); 537 } 538 539 /* 540 * Make sure this is a coda style cnode, but it may be a 541 * different vfsp 542 */ 543 /* XXX: this totally violates the comment about vtagtype in vnode.h */ 544 if (tvp->v_tag != VT_CODA) { 545 vrele(tvp); 546 MARK_INT_FAIL(CODA_IOCTL_STATS); 547 CODADEBUG(CODA_IOCTL, myprintf(("%s error: %s not a coda object\n", 548 __func__, iap->path));) 549 return(EINVAL); 550 } 551 552 if (iap->vi.in_size > VC_MAXDATASIZE || iap->vi.out_size > VC_MAXDATASIZE) { 553 vrele(tvp); 554 return(EINVAL); 555 } 556 error = venus_ioctl(vtomi(tvp), &((VTOC(tvp))->c_fid), com, flag, data, 557 cred, curlwp); 558 559 if (error) 560 MARK_INT_FAIL(CODA_IOCTL_STATS); 561 else 562 CODADEBUG(CODA_IOCTL, myprintf(("Ioctl returns %d \n", error)); ) 563 564 vrele(tvp); 565 return(error); 566 } 567 568 /* 569 * To reduce the cost of a user-level venus;we cache attributes in 570 * the kernel. Each cnode has storage allocated for an attribute. If 571 * c_vattr is valid, return a reference to it. Otherwise, get the 572 * attributes from venus and store them in the cnode. There is some 573 * question if this method is a security leak. But I think that in 574 * order to make this call, the user must have done a lookup and 575 * opened the file, and therefore should already have access. 576 */ 577 int 578 coda_getattr(void *v) 579 { 580 /* true args */ 581 struct vop_getattr_args *ap = v; 582 vnode_t *vp = ap->a_vp; 583 struct cnode *cp = VTOC(vp); 584 struct vattr *vap = ap->a_vap; 585 kauth_cred_t cred = ap->a_cred; 586 /* locals */ 587 int error; 588 589 MARK_ENTRY(CODA_GETATTR_STATS); 590 591 /* Check for getattr of control object. */ 592 if (IS_CTL_VP(vp)) { 593 MARK_INT_FAIL(CODA_GETATTR_STATS); 594 return(ENOENT); 595 } 596 597 /* Check to see if the attributes have already been cached */ 598 if (VALID_VATTR(cp)) { 599 CODADEBUG(CODA_GETATTR, { myprintf(("%s: attr cache hit: %s\n", 600 __func__, coda_f2s(&cp->c_fid)));}) 601 CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR)) 602 coda_print_vattr(&cp->c_vattr); ) 603 604 *vap = cp->c_vattr; 605 MARK_INT_SAT(CODA_GETATTR_STATS); 606 return(0); 607 } 608 609 error = venus_getattr(vtomi(vp), &cp->c_fid, cred, curlwp, vap); 610 611 if (!error) { 612 CODADEBUG(CODA_GETATTR, myprintf(("%s miss %s: result %d\n", 613 __func__, coda_f2s(&cp->c_fid), error)); ) 614 615 CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR)) 616 coda_print_vattr(vap); ) 617 618 /* If not open for write, store attributes in cnode */ 619 if ((cp->c_owrite == 0) && (coda_attr_cache)) { 620 cp->c_vattr = *vap; 621 cp->c_flags |= C_VATTR; 622 } 623 624 } 625 return(error); 626 } 627 628 int 629 coda_setattr(void *v) 630 { 631 /* true args */ 632 struct vop_setattr_args *ap = v; 633 vnode_t *vp = ap->a_vp; 634 struct cnode *cp = VTOC(vp); 635 struct vattr *vap = ap->a_vap; 636 kauth_cred_t cred = ap->a_cred; 637 /* locals */ 638 int error; 639 640 MARK_ENTRY(CODA_SETATTR_STATS); 641 642 /* Check for setattr of control object. */ 643 if (IS_CTL_VP(vp)) { 644 MARK_INT_FAIL(CODA_SETATTR_STATS); 645 return(ENOENT); 646 } 647 648 if (codadebug & CODADBGMSK(CODA_SETATTR)) { 649 coda_print_vattr(vap); 650 } 651 error = venus_setattr(vtomi(vp), &cp->c_fid, vap, cred, curlwp); 652 653 if (!error) 654 cp->c_flags &= ~C_VATTR; 655 656 CODADEBUG(CODA_SETATTR, myprintf(("setattr %d\n", error)); ) 657 return(error); 658 } 659 660 int 661 coda_access(void *v) 662 { 663 /* true args */ 664 struct vop_access_args *ap = v; 665 vnode_t *vp = ap->a_vp; 666 struct cnode *cp = VTOC(vp); 667 accmode_t accmode = ap->a_accmode; 668 kauth_cred_t cred = ap->a_cred; 669 /* locals */ 670 int error; 671 672 MARK_ENTRY(CODA_ACCESS_STATS); 673 674 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0); 675 /* Check for access of control object. Only read access is 676 allowed on it. */ 677 if (IS_CTL_VP(vp)) { 678 /* bogus hack - all will be marked as successes */ 679 MARK_INT_SAT(CODA_ACCESS_STATS); 680 return(((accmode & VREAD) && !(accmode & (VWRITE | VEXEC))) 681 ? 0 : EACCES); 682 } 683 684 /* 685 * if the file is a directory, and we are checking exec (eg lookup) 686 * access, and the file is in the namecache, then the user must have 687 * lookup access to it. 688 */ 689 if (coda_access_cache) { 690 if ((vp->v_type == VDIR) && (accmode & VEXEC)) { 691 if (coda_nc_lookup(cp, ".", 1, cred)) { 692 MARK_INT_SAT(CODA_ACCESS_STATS); 693 return(0); /* it was in the cache */ 694 } 695 } 696 } 697 698 error = venus_access(vtomi(vp), &cp->c_fid, accmode, cred, curlwp); 699 700 return(error); 701 } 702 703 /* 704 * CODA abort op, called after namei() when a CREATE/DELETE isn't actually 705 * done. If a buffer has been saved in anticipation of a coda_create or 706 * a coda_remove, delete it. 707 */ 708 /* ARGSUSED */ 709 int 710 coda_abortop(void *v) 711 { 712 /* true args */ 713 struct vop_abortop_args /* { 714 vnode_t *a_dvp; 715 struct componentname *a_cnp; 716 } */ *ap = v; 717 718 (void)ap; 719 /* upcall decl */ 720 /* locals */ 721 722 return (0); 723 } 724 725 int 726 coda_readlink(void *v) 727 { 728 /* true args */ 729 struct vop_readlink_args *ap = v; 730 vnode_t *vp = ap->a_vp; 731 struct cnode *cp = VTOC(vp); 732 struct uio *uiop = ap->a_uio; 733 kauth_cred_t cred = ap->a_cred; 734 /* locals */ 735 struct lwp *l = curlwp; 736 int error; 737 char *str; 738 int len; 739 740 MARK_ENTRY(CODA_READLINK_STATS); 741 742 /* Check for readlink of control object. */ 743 if (IS_CTL_VP(vp)) { 744 MARK_INT_FAIL(CODA_READLINK_STATS); 745 return(ENOENT); 746 } 747 748 if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) { /* symlink was cached */ 749 uiop->uio_rw = UIO_READ; 750 error = uiomove(cp->c_symlink, (int)cp->c_symlen, uiop); 751 if (error) 752 MARK_INT_FAIL(CODA_READLINK_STATS); 753 else 754 MARK_INT_SAT(CODA_READLINK_STATS); 755 return(error); 756 } 757 758 error = venus_readlink(vtomi(vp), &cp->c_fid, cred, l, &str, &len); 759 760 if (!error) { 761 uiop->uio_rw = UIO_READ; 762 error = uiomove(str, len, uiop); 763 764 if (coda_symlink_cache) { 765 cp->c_symlink = str; 766 cp->c_symlen = len; 767 cp->c_flags |= C_SYMLINK; 768 } else 769 CODA_FREE(str, len); 770 } 771 772 CODADEBUG(CODA_READLINK, myprintf(("in readlink result %d\n",error));) 773 return(error); 774 } 775 776 int 777 coda_fsync(void *v) 778 { 779 /* true args */ 780 struct vop_fsync_args *ap = v; 781 vnode_t *vp = ap->a_vp; 782 struct cnode *cp = VTOC(vp); 783 kauth_cred_t cred = ap->a_cred; 784 /* locals */ 785 vnode_t *convp = cp->c_ovp; 786 int error; 787 788 MARK_ENTRY(CODA_FSYNC_STATS); 789 790 /* Check for fsync on an unmounting object */ 791 /* The NetBSD kernel, in its infinite wisdom, can try to fsync 792 * after an unmount has been initiated. This is a Bad Thing, 793 * which we have to avoid. Not a legitimate failure for stats. 794 */ 795 if (IS_UNMOUNTING(cp)) { 796 return(ENODEV); 797 } 798 799 /* Check for fsync of control object or unitialized cnode. */ 800 if (IS_CTL_VP(vp) || vp->v_type == VNON) { 801 MARK_INT_SAT(CODA_FSYNC_STATS); 802 return(0); 803 } 804 805 if (convp) 806 VOP_FSYNC(convp, cred, MNT_WAIT, 0, 0); 807 808 /* 809 * We can expect fsync on any vnode at all if venus is pruging it. 810 * Venus can't very well answer the fsync request, now can it? 811 * Hopefully, it won't have to, because hopefully, venus preserves 812 * the (possibly untrue) invariant that it never purges an open 813 * vnode. Hopefully. 814 */ 815 if (cp->c_flags & C_PURGING) { 816 return(0); 817 } 818 819 error = venus_fsync(vtomi(vp), &cp->c_fid, cred, curlwp); 820 821 CODADEBUG(CODA_FSYNC, myprintf(("in fsync result %d\n",error)); ) 822 return(error); 823 } 824 825 /* 826 * vp is locked on entry, and we must unlock it. 827 * XXX This routine is suspect and probably needs rewriting. 828 */ 829 int 830 coda_inactive(void *v) 831 { 832 /* true args */ 833 struct vop_inactive_v2_args *ap = v; 834 vnode_t *vp = ap->a_vp; 835 struct cnode *cp = VTOC(vp); 836 kauth_cred_t cred __unused = NULL; 837 838 /* We don't need to send inactive to venus - DCS */ 839 MARK_ENTRY(CODA_INACTIVE_STATS); 840 841 if (IS_CTL_VP(vp)) { 842 MARK_INT_SAT(CODA_INACTIVE_STATS); 843 return 0; 844 } 845 846 CODADEBUG(CODA_INACTIVE, myprintf(("in inactive, %s, vfsp %p\n", 847 coda_f2s(&cp->c_fid), vp->v_mount));) 848 849 if (vp->v_mount->mnt_data == NULL) { 850 myprintf(("Help! vfsp->vfs_data was NULL, but vnode %p wasn't dying\n", vp)); 851 panic("badness in coda_inactive"); 852 } 853 854 #ifdef CODA_VERBOSE 855 /* Sanity checks that perhaps should be panic. */ 856 if (vrefcnt(vp) > 1) 857 printf("%s: %p usecount %d\n", __func__, vp, vrefcnt(vp)); 858 if (cp->c_ovp != NULL) 859 printf("%s: %p ovp != NULL\n", __func__, vp); 860 #endif 861 /* XXX Do we need to VOP_CLOSE container vnodes? */ 862 if (!IS_UNMOUNTING(cp)) 863 *ap->a_recycle = true; 864 865 MARK_INT_SAT(CODA_INACTIVE_STATS); 866 return(0); 867 } 868 869 /* 870 * Coda does not use the normal namecache, but a private version. 871 * Consider how to use the standard facility instead. 872 */ 873 int 874 coda_lookup(void *v) 875 { 876 /* true args */ 877 struct vop_lookup_v2_args *ap = v; 878 /* (locked) vnode of dir in which to do lookup */ 879 vnode_t *dvp = ap->a_dvp; 880 struct cnode *dcp = VTOC(dvp); 881 /* output variable for result */ 882 vnode_t **vpp = ap->a_vpp; 883 /* name to lookup */ 884 struct componentname *cnp = ap->a_cnp; 885 kauth_cred_t cred = cnp->cn_cred; 886 struct lwp *l = curlwp; 887 /* locals */ 888 struct cnode *cp; 889 const char *nm = cnp->cn_nameptr; 890 int len = cnp->cn_namelen; 891 CodaFid VFid; 892 int vtype; 893 int error = 0; 894 895 MARK_ENTRY(CODA_LOOKUP_STATS); 896 897 CODADEBUG(CODA_LOOKUP, myprintf(("%s: %s in %s\n", __func__, 898 nm, coda_f2s(&dcp->c_fid)));) 899 900 /* 901 * XXX componentname flags in MODMASK are not handled at all 902 */ 903 904 /* 905 * The overall strategy is to switch on the lookup type and get a 906 * result vnode that is vref'd but not locked. 907 */ 908 909 /* Check for lookup of control object. */ 910 if (IS_CTL_NAME(dvp, nm, len)) { 911 *vpp = coda_ctlvp; 912 vref(*vpp); 913 MARK_INT_SAT(CODA_LOOKUP_STATS); 914 goto exit; 915 } 916 917 /* Avoid trying to hand venus an unreasonably long name. */ 918 if (len+1 > CODA_MAXNAMLEN) { 919 MARK_INT_FAIL(CODA_LOOKUP_STATS); 920 CODADEBUG(CODA_LOOKUP, myprintf(("%s: name too long:, %s (%s)\n", 921 __func__, coda_f2s(&dcp->c_fid), nm));) 922 *vpp = (vnode_t *)0; 923 error = EINVAL; 924 goto exit; 925 } 926 927 /* 928 * Try to resolve the lookup in the minicache. If that fails, ask 929 * venus to do the lookup. XXX The interaction between vnode 930 * locking and any locking that coda does is not clear. 931 */ 932 cp = coda_nc_lookup(dcp, nm, len, cred); 933 if (cp) { 934 *vpp = CTOV(cp); 935 vref(*vpp); 936 CODADEBUG(CODA_LOOKUP, 937 myprintf(("lookup result %d vpp %p\n",error,*vpp));) 938 } else { 939 /* The name wasn't cached, so ask Venus. */ 940 error = venus_lookup(vtomi(dvp), &dcp->c_fid, nm, len, cred, l, &VFid, 941 &vtype); 942 943 if (error) { 944 MARK_INT_FAIL(CODA_LOOKUP_STATS); 945 CODADEBUG(CODA_LOOKUP, myprintf(("%s: lookup error on %s (%s)%d\n", 946 __func__, coda_f2s(&dcp->c_fid), nm, error));) 947 *vpp = (vnode_t *)0; 948 } else { 949 MARK_INT_SAT(CODA_LOOKUP_STATS); 950 CODADEBUG(CODA_LOOKUP, myprintf(("%s: %s type %o result %d\n", 951 __func__, coda_f2s(&VFid), vtype, error)); ) 952 953 cp = make_coda_node(&VFid, dvp->v_mount, vtype); 954 *vpp = CTOV(cp); 955 /* vpp is now vrefed. */ 956 957 /* 958 * Unless this vnode is marked CODA_NOCACHE, enter it into 959 * the coda name cache to avoid a future venus round-trip. 960 * XXX Interaction with componentname NOCACHE is unclear. 961 */ 962 if (!(vtype & CODA_NOCACHE)) 963 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp)); 964 } 965 } 966 967 exit: 968 /* 969 * If we are creating, and this was the last name to be looked up, 970 * and the error was ENOENT, then make the leaf NULL and return 971 * success. 972 * XXX Check against new lookup rules. 973 */ 974 if (((cnp->cn_nameiop == CREATE) || (cnp->cn_nameiop == RENAME)) 975 && (cnp->cn_flags & ISLASTCN) 976 && (error == ENOENT)) 977 { 978 error = EJUSTRETURN; 979 *ap->a_vpp = NULL; 980 } 981 982 return(error); 983 } 984 985 /*ARGSUSED*/ 986 int 987 coda_create(void *v) 988 { 989 /* true args */ 990 struct vop_create_v3_args *ap = v; 991 vnode_t *dvp = ap->a_dvp; 992 struct cnode *dcp = VTOC(dvp); 993 struct vattr *va = ap->a_vap; 994 int exclusive = 1; 995 int mode = ap->a_vap->va_mode; 996 vnode_t **vpp = ap->a_vpp; 997 struct componentname *cnp = ap->a_cnp; 998 kauth_cred_t cred = cnp->cn_cred; 999 struct lwp *l = curlwp; 1000 /* locals */ 1001 int error; 1002 struct cnode *cp; 1003 const char *nm = cnp->cn_nameptr; 1004 int len = cnp->cn_namelen; 1005 CodaFid VFid; 1006 struct vattr attr; 1007 1008 MARK_ENTRY(CODA_CREATE_STATS); 1009 1010 /* All creates are exclusive XXX */ 1011 /* I'm assuming the 'mode' argument is the file mode bits XXX */ 1012 1013 /* Check for create of control object. */ 1014 if (IS_CTL_NAME(dvp, nm, len)) { 1015 *vpp = (vnode_t *)0; 1016 MARK_INT_FAIL(CODA_CREATE_STATS); 1017 return(EACCES); 1018 } 1019 1020 error = venus_create(vtomi(dvp), &dcp->c_fid, nm, len, exclusive, mode, va, cred, l, &VFid, &attr); 1021 1022 if (!error) { 1023 1024 /* 1025 * XXX Violation of venus/kernel invariants is a difficult case, 1026 * but venus should not be able to cause a panic. 1027 */ 1028 /* If this is an exclusive create, panic if the file already exists. */ 1029 /* Venus should have detected the file and reported EEXIST. */ 1030 1031 if ((exclusive == 1) && 1032 (coda_find(&VFid) != NULL)) 1033 panic("cnode existed for newly created file!"); 1034 1035 cp = make_coda_node(&VFid, dvp->v_mount, attr.va_type); 1036 *vpp = CTOV(cp); 1037 1038 /* XXX vnodeops doesn't say this argument can be changed. */ 1039 /* Update va to reflect the new attributes. */ 1040 (*va) = attr; 1041 1042 /* Update the attribute cache and mark it as valid */ 1043 if (coda_attr_cache) { 1044 VTOC(*vpp)->c_vattr = attr; 1045 VTOC(*vpp)->c_flags |= C_VATTR; 1046 } 1047 1048 /* Invalidate parent's attr cache (modification time has changed). */ 1049 VTOC(dvp)->c_flags &= ~C_VATTR; 1050 1051 /* enter the new vnode in the Name Cache */ 1052 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp)); 1053 1054 CODADEBUG(CODA_CREATE, myprintf(("%s: %s, result %d\n", __func__, 1055 coda_f2s(&VFid), error)); ) 1056 } else { 1057 *vpp = (vnode_t *)0; 1058 CODADEBUG(CODA_CREATE, myprintf(("%s: create error %d\n", __func__, 1059 error));) 1060 } 1061 1062 if (!error) { 1063 #ifdef CODA_VERBOSE 1064 if ((cnp->cn_flags & LOCKLEAF) == 0) 1065 /* This should not happen; flags are for lookup only. */ 1066 printf("%s: LOCKLEAF not set!\n", __func__); 1067 #endif 1068 } 1069 1070 return(error); 1071 } 1072 1073 int 1074 coda_remove(void *v) 1075 { 1076 /* true args */ 1077 struct vop_remove_v2_args *ap = v; 1078 vnode_t *dvp = ap->a_dvp; 1079 struct cnode *cp = VTOC(dvp); 1080 vnode_t *vp = ap->a_vp; 1081 struct componentname *cnp = ap->a_cnp; 1082 kauth_cred_t cred = cnp->cn_cred; 1083 struct lwp *l = curlwp; 1084 /* locals */ 1085 int error; 1086 const char *nm = cnp->cn_nameptr; 1087 int len = cnp->cn_namelen; 1088 struct cnode *tp; 1089 1090 MARK_ENTRY(CODA_REMOVE_STATS); 1091 1092 CODADEBUG(CODA_REMOVE, myprintf(("%s: %s in %s\n", __func__, 1093 nm, coda_f2s(&cp->c_fid)));) 1094 1095 /* Remove the file's entry from the CODA Name Cache */ 1096 /* We're being conservative here, it might be that this person 1097 * doesn't really have sufficient access to delete the file 1098 * but we feel zapping the entry won't really hurt anyone -- dcs 1099 */ 1100 /* I'm gonna go out on a limb here. If a file and a hardlink to it 1101 * exist, and one is removed, the link count on the other will be 1102 * off by 1. We could either invalidate the attrs if cached, or 1103 * fix them. I'll try to fix them. DCS 11/8/94 1104 */ 1105 tp = coda_nc_lookup(VTOC(dvp), nm, len, cred); 1106 if (tp) { 1107 if (VALID_VATTR(tp)) { /* If attrs are cached */ 1108 if (tp->c_vattr.va_nlink > 1) { /* If it's a hard link */ 1109 tp->c_vattr.va_nlink--; 1110 } 1111 } 1112 1113 coda_nc_zapfile(VTOC(dvp), nm, len); 1114 /* No need to flush it if it doesn't exist! */ 1115 } 1116 /* Invalidate the parent's attr cache, the modification time has changed */ 1117 VTOC(dvp)->c_flags &= ~C_VATTR; 1118 1119 /* Check for remove of control object. */ 1120 if (IS_CTL_NAME(dvp, nm, len)) { 1121 MARK_INT_FAIL(CODA_REMOVE_STATS); 1122 return(ENOENT); 1123 } 1124 1125 error = venus_remove(vtomi(dvp), &cp->c_fid, nm, len, cred, l); 1126 1127 CODADEBUG(CODA_REMOVE, myprintf(("in remove result %d\n",error)); ) 1128 1129 /* 1130 * Unlock and release child (avoiding double if "."). 1131 */ 1132 if (dvp == vp) { 1133 vrele(vp); 1134 } else { 1135 vput(vp); 1136 } 1137 1138 return(error); 1139 } 1140 1141 /* 1142 * dvp is the directory where the link is to go, and is locked. 1143 * vp is the object to be linked to, and is unlocked. 1144 * At exit, we must unlock dvp, and vput dvp. 1145 */ 1146 int 1147 coda_link(void *v) 1148 { 1149 /* true args */ 1150 struct vop_link_v2_args *ap = v; 1151 vnode_t *vp = ap->a_vp; 1152 struct cnode *cp = VTOC(vp); 1153 vnode_t *dvp = ap->a_dvp; 1154 struct cnode *dcp = VTOC(dvp); 1155 struct componentname *cnp = ap->a_cnp; 1156 kauth_cred_t cred = cnp->cn_cred; 1157 struct lwp *l = curlwp; 1158 /* locals */ 1159 int error; 1160 const char *nm = cnp->cn_nameptr; 1161 int len = cnp->cn_namelen; 1162 1163 MARK_ENTRY(CODA_LINK_STATS); 1164 1165 if (codadebug & CODADBGMSK(CODA_LINK)) { 1166 1167 myprintf(("%s: vp fid: %s\n", __func__, coda_f2s(&cp->c_fid))); 1168 myprintf(("%s: dvp fid: %s)\n", __func__, coda_f2s(&dcp->c_fid))); 1169 1170 } 1171 if (codadebug & CODADBGMSK(CODA_LINK)) { 1172 myprintf(("%s: vp fid: %s\n", __func__, coda_f2s(&cp->c_fid))); 1173 myprintf(("%s: dvp fid: %s\n", __func__, coda_f2s(&dcp->c_fid))); 1174 1175 } 1176 1177 /* Check for link to/from control object. */ 1178 if (IS_CTL_NAME(dvp, nm, len) || IS_CTL_VP(vp)) { 1179 MARK_INT_FAIL(CODA_LINK_STATS); 1180 return(EACCES); 1181 } 1182 1183 /* If linking . to a name, error out earlier. */ 1184 if (vp == dvp) { 1185 #ifdef CODA_VERBOSE 1186 printf("%s coda_link vp==dvp\n", __func__); 1187 #endif 1188 error = EISDIR; 1189 goto exit; 1190 } 1191 1192 /* XXX Why does venus_link need the vnode to be locked?*/ 1193 if ((error = vn_lock(vp, LK_EXCLUSIVE)) != 0) { 1194 #ifdef CODA_VERBOSE 1195 printf("%s: couldn't lock vnode %p\n", __func__, vp); 1196 #endif 1197 error = EFAULT; /* XXX better value */ 1198 goto exit; 1199 } 1200 error = venus_link(vtomi(vp), &cp->c_fid, &dcp->c_fid, nm, len, cred, l); 1201 VOP_UNLOCK(vp); 1202 1203 /* Invalidate parent's attr cache (the modification time has changed). */ 1204 VTOC(dvp)->c_flags &= ~C_VATTR; 1205 /* Invalidate child's attr cache (XXX why). */ 1206 VTOC(vp)->c_flags &= ~C_VATTR; 1207 1208 CODADEBUG(CODA_LINK, myprintf(("in link result %d\n",error)); ) 1209 1210 exit: 1211 return(error); 1212 } 1213 1214 int 1215 coda_rename(void *v) 1216 { 1217 /* true args */ 1218 struct vop_rename_args *ap = v; 1219 vnode_t *odvp = ap->a_fdvp; 1220 struct cnode *odcp = VTOC(odvp); 1221 struct componentname *fcnp = ap->a_fcnp; 1222 vnode_t *ndvp = ap->a_tdvp; 1223 struct cnode *ndcp = VTOC(ndvp); 1224 struct componentname *tcnp = ap->a_tcnp; 1225 kauth_cred_t cred = fcnp->cn_cred; 1226 struct lwp *l = curlwp; 1227 /* true args */ 1228 int error; 1229 const char *fnm = fcnp->cn_nameptr; 1230 int flen = fcnp->cn_namelen; 1231 const char *tnm = tcnp->cn_nameptr; 1232 int tlen = tcnp->cn_namelen; 1233 1234 MARK_ENTRY(CODA_RENAME_STATS); 1235 1236 /* Hmmm. The vnodes are already looked up. Perhaps they are locked? 1237 This could be Bad. XXX */ 1238 #ifdef OLD_DIAGNOSTIC 1239 if ((fcnp->cn_cred != tcnp->cn_cred) 1240 || (fcnp->cn_lwp != tcnp->cn_lwp)) 1241 { 1242 panic("%s: component names don't agree", __func__); 1243 } 1244 #endif 1245 1246 /* Check for rename involving control object. */ 1247 if (IS_CTL_NAME(odvp, fnm, flen) || IS_CTL_NAME(ndvp, tnm, tlen)) { 1248 MARK_INT_FAIL(CODA_RENAME_STATS); 1249 return(EACCES); 1250 } 1251 1252 /* Problem with moving directories -- need to flush entry for .. */ 1253 if (odvp != ndvp) { 1254 struct cnode *ovcp = coda_nc_lookup(VTOC(odvp), fnm, flen, cred); 1255 if (ovcp) { 1256 vnode_t *ovp = CTOV(ovcp); 1257 if ((ovp) && 1258 (ovp->v_type == VDIR)) /* If it's a directory */ 1259 coda_nc_zapfile(VTOC(ovp),"..", 2); 1260 } 1261 } 1262 1263 /* Remove the entries for both source and target files */ 1264 coda_nc_zapfile(VTOC(odvp), fnm, flen); 1265 coda_nc_zapfile(VTOC(ndvp), tnm, tlen); 1266 1267 /* Invalidate the parent's attr cache, the modification time has changed */ 1268 VTOC(odvp)->c_flags &= ~C_VATTR; 1269 VTOC(ndvp)->c_flags &= ~C_VATTR; 1270 1271 if (flen+1 > CODA_MAXNAMLEN) { 1272 MARK_INT_FAIL(CODA_RENAME_STATS); 1273 error = EINVAL; 1274 goto exit; 1275 } 1276 1277 if (tlen+1 > CODA_MAXNAMLEN) { 1278 MARK_INT_FAIL(CODA_RENAME_STATS); 1279 error = EINVAL; 1280 goto exit; 1281 } 1282 1283 error = venus_rename(vtomi(odvp), &odcp->c_fid, &ndcp->c_fid, fnm, flen, tnm, tlen, cred, l); 1284 1285 exit: 1286 CODADEBUG(CODA_RENAME, myprintf(("in rename result %d\n",error));) 1287 /* XXX - do we need to call cache pureg on the moved vnode? */ 1288 cache_purge(ap->a_fvp); 1289 1290 /* It seems to be incumbent on us to drop locks on all four vnodes */ 1291 /* From-vnodes are not locked, only ref'd. To-vnodes are locked. */ 1292 1293 vrele(ap->a_fvp); 1294 vrele(odvp); 1295 1296 if (ap->a_tvp) { 1297 if (ap->a_tvp == ndvp) { 1298 vrele(ap->a_tvp); 1299 } else { 1300 vput(ap->a_tvp); 1301 } 1302 } 1303 1304 vput(ndvp); 1305 return(error); 1306 } 1307 1308 int 1309 coda_mkdir(void *v) 1310 { 1311 /* true args */ 1312 struct vop_mkdir_v3_args *ap = v; 1313 vnode_t *dvp = ap->a_dvp; 1314 struct cnode *dcp = VTOC(dvp); 1315 struct componentname *cnp = ap->a_cnp; 1316 struct vattr *va = ap->a_vap; 1317 vnode_t **vpp = ap->a_vpp; 1318 kauth_cred_t cred = cnp->cn_cred; 1319 struct lwp *l = curlwp; 1320 /* locals */ 1321 int error; 1322 const char *nm = cnp->cn_nameptr; 1323 int len = cnp->cn_namelen; 1324 struct cnode *cp; 1325 CodaFid VFid; 1326 struct vattr ova; 1327 1328 MARK_ENTRY(CODA_MKDIR_STATS); 1329 1330 /* Check for mkdir of target object. */ 1331 if (IS_CTL_NAME(dvp, nm, len)) { 1332 *vpp = (vnode_t *)0; 1333 MARK_INT_FAIL(CODA_MKDIR_STATS); 1334 return(EACCES); 1335 } 1336 1337 if (len+1 > CODA_MAXNAMLEN) { 1338 *vpp = (vnode_t *)0; 1339 MARK_INT_FAIL(CODA_MKDIR_STATS); 1340 return(EACCES); 1341 } 1342 1343 error = venus_mkdir(vtomi(dvp), &dcp->c_fid, nm, len, va, cred, l, &VFid, &ova); 1344 1345 if (!error) { 1346 if (coda_find(&VFid) != NULL) 1347 panic("cnode existed for newly created directory!"); 1348 1349 1350 cp = make_coda_node(&VFid, dvp->v_mount, va->va_type); 1351 *vpp = CTOV(cp); 1352 1353 /* enter the new vnode in the Name Cache */ 1354 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp)); 1355 1356 /* as a side effect, enter "." and ".." for the directory */ 1357 coda_nc_enter(VTOC(*vpp), ".", 1, cred, VTOC(*vpp)); 1358 coda_nc_enter(VTOC(*vpp), "..", 2, cred, VTOC(dvp)); 1359 1360 if (coda_attr_cache) { 1361 VTOC(*vpp)->c_vattr = ova; /* update the attr cache */ 1362 VTOC(*vpp)->c_flags |= C_VATTR; /* Valid attributes in cnode */ 1363 } 1364 1365 /* Invalidate the parent's attr cache, the modification time has changed */ 1366 VTOC(dvp)->c_flags &= ~C_VATTR; 1367 1368 CODADEBUG( CODA_MKDIR, myprintf(("%s: %s result %d\n", __func__, 1369 coda_f2s(&VFid), error)); ) 1370 } else { 1371 *vpp = (vnode_t *)0; 1372 CODADEBUG(CODA_MKDIR, myprintf(("%s error %d\n", __func__, error));) 1373 } 1374 1375 return(error); 1376 } 1377 1378 int 1379 coda_rmdir(void *v) 1380 { 1381 /* true args */ 1382 struct vop_rmdir_v2_args *ap = v; 1383 vnode_t *dvp = ap->a_dvp; 1384 struct cnode *dcp = VTOC(dvp); 1385 vnode_t *vp = ap->a_vp; 1386 struct componentname *cnp = ap->a_cnp; 1387 kauth_cred_t cred = cnp->cn_cred; 1388 struct lwp *l = curlwp; 1389 /* true args */ 1390 int error; 1391 const char *nm = cnp->cn_nameptr; 1392 int len = cnp->cn_namelen; 1393 struct cnode *cp; 1394 1395 MARK_ENTRY(CODA_RMDIR_STATS); 1396 1397 /* Check for rmdir of control object. */ 1398 if (IS_CTL_NAME(dvp, nm, len)) { 1399 MARK_INT_FAIL(CODA_RMDIR_STATS); 1400 return(ENOENT); 1401 } 1402 1403 /* Can't remove . in self. */ 1404 if (dvp == vp) { 1405 #ifdef CODA_VERBOSE 1406 printf("%s: dvp == vp\n", __func__); 1407 #endif 1408 error = EINVAL; 1409 goto exit; 1410 } 1411 1412 /* 1413 * The caller may not have adequate permissions, and the venus 1414 * operation may fail, but it doesn't hurt from a correctness 1415 * viewpoint to invalidate cache entries. 1416 * XXX Why isn't this done after the venus_rmdir call? 1417 */ 1418 /* Look up child in name cache (by name, from parent). */ 1419 cp = coda_nc_lookup(dcp, nm, len, cred); 1420 /* If found, remove all children of the child (., ..). */ 1421 if (cp) coda_nc_zapParentfid(&(cp->c_fid), NOT_DOWNCALL); 1422 1423 /* Remove child's own entry. */ 1424 coda_nc_zapfile(dcp, nm, len); 1425 1426 /* Invalidate parent's attr cache (the modification time has changed). */ 1427 dcp->c_flags &= ~C_VATTR; 1428 1429 error = venus_rmdir(vtomi(dvp), &dcp->c_fid, nm, len, cred, l); 1430 1431 CODADEBUG(CODA_RMDIR, myprintf(("in rmdir result %d\n", error)); ) 1432 1433 exit: 1434 /* unlock and release child */ 1435 if (dvp == vp) { 1436 vrele(vp); 1437 } else { 1438 vput(vp); 1439 } 1440 1441 return(error); 1442 } 1443 1444 int 1445 coda_symlink(void *v) 1446 { 1447 /* true args */ 1448 struct vop_symlink_v3_args *ap = v; 1449 vnode_t *dvp = ap->a_dvp; 1450 struct cnode *dcp = VTOC(dvp); 1451 /* a_vpp is used in place below */ 1452 struct componentname *cnp = ap->a_cnp; 1453 struct vattr *tva = ap->a_vap; 1454 char *path = ap->a_target; 1455 kauth_cred_t cred = cnp->cn_cred; 1456 struct lwp *l = curlwp; 1457 /* locals */ 1458 int error; 1459 u_long saved_cn_flags; 1460 const char *nm = cnp->cn_nameptr; 1461 int len = cnp->cn_namelen; 1462 int plen = strlen(path); 1463 1464 /* 1465 * Here's the strategy for the moment: perform the symlink, then 1466 * do a lookup to grab the resulting vnode. I know this requires 1467 * two communications with Venus for a new sybolic link, but 1468 * that's the way the ball bounces. I don't yet want to change 1469 * the way the Mach symlink works. When Mach support is 1470 * deprecated, we should change symlink so that the common case 1471 * returns the resultant vnode in a vpp argument. 1472 */ 1473 1474 MARK_ENTRY(CODA_SYMLINK_STATS); 1475 1476 /* Check for symlink of control object. */ 1477 if (IS_CTL_NAME(dvp, nm, len)) { 1478 MARK_INT_FAIL(CODA_SYMLINK_STATS); 1479 error = EACCES; 1480 goto exit; 1481 } 1482 1483 if (plen+1 > CODA_MAXPATHLEN) { 1484 MARK_INT_FAIL(CODA_SYMLINK_STATS); 1485 error = EINVAL; 1486 goto exit; 1487 } 1488 1489 if (len+1 > CODA_MAXNAMLEN) { 1490 MARK_INT_FAIL(CODA_SYMLINK_STATS); 1491 error = EINVAL; 1492 goto exit; 1493 } 1494 1495 error = venus_symlink(vtomi(dvp), &dcp->c_fid, path, plen, nm, len, tva, cred, l); 1496 1497 /* Invalidate the parent's attr cache (modification time has changed). */ 1498 dcp->c_flags &= ~C_VATTR; 1499 1500 if (!error) { 1501 /* 1502 * VOP_SYMLINK is not defined to pay attention to cnp->cn_flags; 1503 * these are defined only for VOP_LOOKUP. We desire to reuse 1504 * cnp for a VOP_LOOKUP operation, and must be sure to not pass 1505 * stray flags passed to us. Such stray flags can occur because 1506 * sys_symlink makes a namei call and then reuses the 1507 * componentname structure. 1508 */ 1509 /* 1510 * XXX Arguably we should create our own componentname structure 1511 * and not reuse the one that was passed in. 1512 */ 1513 saved_cn_flags = cnp->cn_flags; 1514 cnp->cn_flags &= ~(MODMASK | OPMASK); 1515 cnp->cn_flags |= LOOKUP; 1516 error = VOP_LOOKUP(dvp, ap->a_vpp, cnp); 1517 cnp->cn_flags = saved_cn_flags; 1518 } 1519 1520 exit: 1521 CODADEBUG(CODA_SYMLINK, myprintf(("in symlink result %d\n",error)); ) 1522 return(error); 1523 } 1524 1525 /* 1526 * Read directory entries. 1527 */ 1528 int 1529 coda_readdir(void *v) 1530 { 1531 /* true args */ 1532 struct vop_readdir_args *ap = v; 1533 vnode_t *vp = ap->a_vp; 1534 struct cnode *cp = VTOC(vp); 1535 struct uio *uiop = ap->a_uio; 1536 kauth_cred_t cred = ap->a_cred; 1537 int *eofflag = ap->a_eofflag; 1538 /* upcall decl */ 1539 /* locals */ 1540 size_t initial_resid = uiop->uio_resid; 1541 int error = 0; 1542 int opened_internally = 0; 1543 int ncookies; 1544 char *buf; 1545 struct vnode *cvp; 1546 struct dirent *dirp; 1547 1548 MARK_ENTRY(CODA_READDIR_STATS); 1549 1550 CODADEBUG(CODA_READDIR, myprintf(("%s: (%p, %lu, %lld)\n", __func__, 1551 uiop->uio_iov->iov_base, (unsigned long) uiop->uio_resid, 1552 (long long) uiop->uio_offset)); ) 1553 1554 /* Check for readdir of control object. */ 1555 if (IS_CTL_VP(vp)) { 1556 MARK_INT_FAIL(CODA_READDIR_STATS); 1557 return ENOENT; 1558 } 1559 1560 /* If directory is not already open do an "internal open" on it. */ 1561 if (cp->c_ovp == NULL) { 1562 opened_internally = 1; 1563 MARK_INT_GEN(CODA_OPEN_STATS); 1564 error = VOP_OPEN(vp, FREAD, cred); 1565 #ifdef CODA_VERBOSE 1566 printf("%s: Internally Opening %p\n", __func__, vp); 1567 #endif 1568 if (error) 1569 return error; 1570 KASSERT(cp->c_ovp != NULL); 1571 } 1572 cvp = cp->c_ovp; 1573 1574 CODADEBUG(CODA_READDIR, myprintf(("%s: fid = %s, refcnt = %d\n", 1575 __func__, coda_f2s(&cp->c_fid), vrefcnt(cvp))); ) 1576 1577 if (ap->a_ncookies) { 1578 ncookies = ap->a_uio->uio_resid / _DIRENT_RECLEN(dirp, 1); 1579 *ap->a_ncookies = 0; 1580 *ap->a_cookies = malloc(ncookies * sizeof (off_t), 1581 M_TEMP, M_WAITOK); 1582 } 1583 buf = kmem_alloc(CODA_DIRBLKSIZ, KM_SLEEP); 1584 dirp = kmem_alloc(sizeof(*dirp), KM_SLEEP); 1585 vn_lock(cvp, LK_EXCLUSIVE | LK_RETRY); 1586 1587 while (error == 0) { 1588 size_t resid = 0; 1589 char *dp, *ep; 1590 1591 if (!ALIGNED_POINTER(uiop->uio_offset, uint32_t)) { 1592 error = EINVAL; 1593 break; 1594 } 1595 error = vn_rdwr(UIO_READ, cvp, buf, 1596 CODA_DIRBLKSIZ, uiop->uio_offset, 1597 UIO_SYSSPACE, IO_NODELOCKED, cred, &resid, curlwp); 1598 if (error || resid == CODA_DIRBLKSIZ) 1599 break; 1600 for (dp = buf, ep = dp + CODA_DIRBLKSIZ - resid; dp < ep; ) { 1601 off_t off; 1602 struct venus_dirent *vd = (struct venus_dirent *)dp; 1603 1604 if (!ALIGNED_POINTER(vd, uint32_t) || 1605 !ALIGNED_POINTER(vd->d_reclen, uint32_t) || 1606 vd->d_reclen == 0) { 1607 error = EINVAL; 1608 break; 1609 } 1610 if (dp + vd->d_reclen > ep) { 1611 error = ENAMETOOLONG; 1612 break; 1613 } 1614 if (vd->d_namlen == 0) { 1615 uiop->uio_offset += vd->d_reclen; 1616 dp += vd->d_reclen; 1617 continue; 1618 } 1619 1620 dirp->d_fileno = vd->d_fileno; 1621 dirp->d_type = vd->d_type; 1622 dirp->d_namlen = vd->d_namlen; 1623 dirp->d_reclen = _DIRENT_SIZE(dirp); 1624 strlcpy(dirp->d_name, vd->d_name, dirp->d_namlen + 1); 1625 1626 if (uiop->uio_resid < dirp->d_reclen) { 1627 error = ENAMETOOLONG; 1628 break; 1629 } 1630 1631 off = uiop->uio_offset; 1632 error = uiomove(dirp, dirp->d_reclen, uiop); 1633 uiop->uio_offset = off; 1634 if (error) 1635 break; 1636 1637 uiop->uio_offset += vd->d_reclen; 1638 dp += vd->d_reclen; 1639 if (ap->a_ncookies) 1640 (*ap->a_cookies)[(*ap->a_ncookies)++] = 1641 uiop->uio_offset; 1642 } 1643 } 1644 1645 VOP_UNLOCK(cvp); 1646 kmem_free(dirp, sizeof(*dirp)); 1647 kmem_free(buf, CODA_DIRBLKSIZ); 1648 if (eofflag && error == 0) 1649 *eofflag = 1; 1650 if (uiop->uio_resid < initial_resid && error == ENAMETOOLONG) 1651 error = 0; 1652 if (ap->a_ncookies && error) { 1653 free(*ap->a_cookies, M_TEMP); 1654 *ap->a_ncookies = 0; 1655 *ap->a_cookies = NULL; 1656 } 1657 if (error) 1658 MARK_INT_FAIL(CODA_READDIR_STATS); 1659 else 1660 MARK_INT_SAT(CODA_READDIR_STATS); 1661 1662 /* Do an "internal close" if necessary. */ 1663 if (opened_internally) { 1664 MARK_INT_GEN(CODA_CLOSE_STATS); 1665 (void)VOP_CLOSE(vp, FREAD, cred); 1666 } 1667 1668 return error; 1669 } 1670 1671 /* 1672 * Convert from file system blocks to device blocks 1673 */ 1674 int 1675 coda_bmap(void *v) 1676 { 1677 /* XXX on the global proc */ 1678 /* true args */ 1679 struct vop_bmap_args *ap = v; 1680 vnode_t *vp __unused = ap->a_vp; /* file's vnode */ 1681 daddr_t bn __unused = ap->a_bn; /* fs block number */ 1682 vnode_t **vpp = ap->a_vpp; /* RETURN vp of device */ 1683 daddr_t *bnp __unused = ap->a_bnp; /* RETURN device block number */ 1684 struct lwp *l __unused = curlwp; 1685 /* upcall decl */ 1686 /* locals */ 1687 1688 *vpp = (vnode_t *)0; 1689 myprintf(("coda_bmap called!\n")); 1690 return(EINVAL); 1691 } 1692 1693 /* 1694 * I don't think the following two things are used anywhere, so I've 1695 * commented them out 1696 * 1697 * struct buf *async_bufhead; 1698 * int async_daemon_count; 1699 */ 1700 int 1701 coda_strategy(void *v) 1702 { 1703 /* true args */ 1704 struct vop_strategy_args *ap = v; 1705 struct buf *bp __unused = ap->a_bp; 1706 struct lwp *l __unused = curlwp; 1707 /* upcall decl */ 1708 /* locals */ 1709 1710 myprintf(("coda_strategy called! ")); 1711 return(EINVAL); 1712 } 1713 1714 int 1715 coda_reclaim(void *v) 1716 { 1717 /* true args */ 1718 struct vop_reclaim_v2_args *ap = v; 1719 vnode_t *vp = ap->a_vp; 1720 struct cnode *cp = VTOC(vp); 1721 /* upcall decl */ 1722 /* locals */ 1723 1724 VOP_UNLOCK(vp); 1725 1726 /* 1727 * Forced unmount/flush will let vnodes with non zero use be destroyed! 1728 */ 1729 ENTRY; 1730 1731 if (IS_UNMOUNTING(cp)) { 1732 #ifdef DEBUG 1733 if (VTOC(vp)->c_ovp) { 1734 if (IS_UNMOUNTING(cp)) 1735 printf("%s: c_ovp not void: vp %p, cp %p\n", __func__, vp, cp); 1736 } 1737 #endif 1738 } else { 1739 #ifdef OLD_DIAGNOSTIC 1740 if (vrefcnt(vp) != 0) 1741 print("%s: pushing active %p\n", __func__, vp); 1742 if (VTOC(vp)->c_ovp) { 1743 panic("%s: c_ovp not void", __func__); 1744 } 1745 #endif 1746 } 1747 /* If an array has been allocated to hold the symlink, deallocate it */ 1748 if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) { 1749 if (cp->c_symlink == NULL) 1750 panic("%s: null symlink pointer in cnode", __func__); 1751 1752 CODA_FREE(cp->c_symlink, cp->c_symlen); 1753 cp->c_flags &= ~C_SYMLINK; 1754 cp->c_symlen = 0; 1755 } 1756 1757 mutex_enter(vp->v_interlock); 1758 mutex_enter(&cp->c_lock); 1759 SET_VTOC(vp) = NULL; 1760 mutex_exit(&cp->c_lock); 1761 mutex_exit(vp->v_interlock); 1762 mutex_destroy(&cp->c_lock); 1763 kmem_free(cp, sizeof(*cp)); 1764 1765 return (0); 1766 } 1767 1768 int 1769 coda_lock(void *v) 1770 { 1771 /* true args */ 1772 struct vop_lock_args *ap = v; 1773 vnode_t *vp = ap->a_vp; 1774 struct cnode *cp = VTOC(vp); 1775 /* upcall decl */ 1776 /* locals */ 1777 1778 ENTRY; 1779 1780 if (coda_lockdebug) { 1781 myprintf(("Attempting lock on %s\n", 1782 coda_f2s(&cp->c_fid))); 1783 } 1784 1785 return genfs_lock(v); 1786 } 1787 1788 int 1789 coda_unlock(void *v) 1790 { 1791 /* true args */ 1792 struct vop_unlock_args *ap = v; 1793 vnode_t *vp = ap->a_vp; 1794 struct cnode *cp = VTOC(vp); 1795 /* upcall decl */ 1796 /* locals */ 1797 1798 ENTRY; 1799 if (coda_lockdebug) { 1800 myprintf(("Attempting unlock on %s\n", 1801 coda_f2s(&cp->c_fid))); 1802 } 1803 1804 return genfs_unlock(v); 1805 } 1806 1807 int 1808 coda_islocked(void *v) 1809 { 1810 /* true args */ 1811 ENTRY; 1812 1813 return genfs_islocked(v); 1814 } 1815 1816 int 1817 coda_pathconf(void *v) 1818 { 1819 struct vop_pathconf_args *ap = v; 1820 1821 switch (ap->a_name) { 1822 default: 1823 return EINVAL; 1824 } 1825 /* NOTREACHED */ 1826 } 1827 1828 /* 1829 * Given a device and inode, obtain a locked vnode. One reference is 1830 * obtained and passed back to the caller. 1831 */ 1832 int 1833 coda_grab_vnode(vnode_t *uvp, dev_t dev, ino_t ino, vnode_t **vpp) 1834 { 1835 int error; 1836 struct mount *mp; 1837 1838 /* Obtain mount point structure from device. */ 1839 if (!(mp = devtomp(dev))) { 1840 myprintf(("%s: devtomp(0x%llx) returns NULL\n", __func__, 1841 (unsigned long long)dev)); 1842 return(ENXIO); 1843 } 1844 1845 /* 1846 * Obtain vnode from mount point and inode. 1847 */ 1848 error = VFS_VGET(mp, ino, LK_EXCLUSIVE, vpp); 1849 if (error) { 1850 myprintf(("%s: iget/vget(0x%llx, %llu) returns %p, err %d\n", __func__, 1851 (unsigned long long)dev, (unsigned long long)ino, *vpp, error)); 1852 return(ENOENT); 1853 } 1854 /* share the underlying vnode lock with the coda vnode */ 1855 vshareilock(*vpp, uvp); 1856 KASSERT(VOP_ISLOCKED(*vpp)); 1857 return(0); 1858 } 1859 1860 static void 1861 coda_print_vattr(struct vattr *attr) 1862 { 1863 const char *typestr; 1864 1865 switch (attr->va_type) { 1866 case VNON: 1867 typestr = "VNON"; 1868 break; 1869 case VREG: 1870 typestr = "VREG"; 1871 break; 1872 case VDIR: 1873 typestr = "VDIR"; 1874 break; 1875 case VBLK: 1876 typestr = "VBLK"; 1877 break; 1878 case VCHR: 1879 typestr = "VCHR"; 1880 break; 1881 case VLNK: 1882 typestr = "VLNK"; 1883 break; 1884 case VSOCK: 1885 typestr = "VSCK"; 1886 break; 1887 case VFIFO: 1888 typestr = "VFFO"; 1889 break; 1890 case VBAD: 1891 typestr = "VBAD"; 1892 break; 1893 default: 1894 typestr = "????"; 1895 break; 1896 } 1897 1898 1899 myprintf(("attr: type %s mode %d uid %d gid %d fsid %d rdev %d\n", 1900 typestr, (int)attr->va_mode, (int)attr->va_uid, 1901 (int)attr->va_gid, (int)attr->va_fsid, (int)attr->va_rdev)); 1902 1903 myprintf((" fileid %d nlink %d size %d blocksize %d bytes %d\n", 1904 (int)attr->va_fileid, (int)attr->va_nlink, 1905 (int)attr->va_size, 1906 (int)attr->va_blocksize,(int)attr->va_bytes)); 1907 myprintf((" gen %ld flags %ld vaflags %d\n", 1908 attr->va_gen, attr->va_flags, attr->va_vaflags)); 1909 myprintf((" atime sec %d nsec %d\n", 1910 (int)attr->va_atime.tv_sec, (int)attr->va_atime.tv_nsec)); 1911 myprintf((" mtime sec %d nsec %d\n", 1912 (int)attr->va_mtime.tv_sec, (int)attr->va_mtime.tv_nsec)); 1913 myprintf((" ctime sec %d nsec %d\n", 1914 (int)attr->va_ctime.tv_sec, (int)attr->va_ctime.tv_nsec)); 1915 } 1916 1917 /* 1918 * Return a vnode for the given fid. 1919 * If no cnode exists for this fid create one and put it 1920 * in a table hashed by coda_f2i(). If the cnode for 1921 * this fid is already in the table return it (ref count is 1922 * incremented by coda_find. The cnode will be flushed from the 1923 * table when coda_inactive calls coda_unsave. 1924 */ 1925 struct cnode * 1926 make_coda_node(CodaFid *fid, struct mount *fvsp, short type) 1927 { 1928 int error __diagused; 1929 struct vnode *vp; 1930 struct cnode *cp; 1931 1932 error = vcache_get(fvsp, fid, sizeof(CodaFid), &vp); 1933 KASSERT(error == 0); 1934 1935 mutex_enter(vp->v_interlock); 1936 cp = VTOC(vp); 1937 KASSERT(cp != NULL); 1938 mutex_enter(&cp->c_lock); 1939 mutex_exit(vp->v_interlock); 1940 1941 if (vp->v_type != type) { 1942 if (vp->v_type == VCHR || vp->v_type == VBLK) 1943 spec_node_destroy(vp); 1944 vp->v_type = type; 1945 if (type == VCHR || type == VBLK) 1946 spec_node_init(vp, NODEV); 1947 uvm_vnp_setsize(vp, 0); 1948 } 1949 mutex_exit(&cp->c_lock); 1950 1951 return cp; 1952 } 1953 1954 /* 1955 * coda_getpages may be called on a vnode which has not been opened, 1956 * e.g. to fault in pages to execute a program. In that case, we must 1957 * open the file to get the container. The vnode may or may not be 1958 * locked, and we must leave it in the same state. 1959 */ 1960 int 1961 coda_getpages(void *v) 1962 { 1963 struct vop_getpages_args /* { 1964 vnode_t *a_vp; 1965 voff_t a_offset; 1966 struct vm_page **a_m; 1967 int *a_count; 1968 int a_centeridx; 1969 vm_prot_t a_access_type; 1970 int a_advice; 1971 int a_flags; 1972 } */ *ap = v; 1973 vnode_t *vp = ap->a_vp, *cvp; 1974 struct cnode *cp = VTOC(vp); 1975 struct lwp *l = curlwp; 1976 kauth_cred_t cred = l->l_cred; 1977 int error, cerror; 1978 int waslocked; /* 1 if vnode lock was held on entry */ 1979 int didopen = 0; /* 1 if we opened container file */ 1980 krw_t op; 1981 1982 /* 1983 * Handle a case that uvm_fault doesn't quite use yet. 1984 * See layer_vnops.c. for inspiration. 1985 */ 1986 if (ap->a_flags & PGO_LOCKED) { 1987 return EBUSY; 1988 } 1989 1990 KASSERT(rw_lock_held(vp->v_uobj.vmobjlock)); 1991 1992 /* Check for control object. */ 1993 if (IS_CTL_VP(vp)) { 1994 #ifdef CODA_VERBOSE 1995 printf("%s: control object %p\n", __func__, vp); 1996 #endif 1997 return(EINVAL); 1998 } 1999 2000 /* 2001 * XXX It's really not ok to be releasing the lock we get, 2002 * because we could be overlapping with another call to 2003 * getpages and drop a lock they are relying on. We need to 2004 * figure out whether getpages ever is called holding the 2005 * lock, and if we should serialize getpages calls by some 2006 * mechanism. 2007 */ 2008 /* XXX VOP_ISLOCKED() may not be used for lock decisions. */ 2009 op = rw_lock_op(vp->v_uobj.vmobjlock); 2010 waslocked = VOP_ISLOCKED(vp); 2011 2012 /* Get container file if not already present. */ 2013 cvp = cp->c_ovp; 2014 if (cvp == NULL) { 2015 /* 2016 * VOP_OPEN requires a locked vnode. We must avoid 2017 * locking the vnode if it is already locked, and 2018 * leave it in the same state on exit. 2019 */ 2020 if (waslocked == 0) { 2021 rw_exit(vp->v_uobj.vmobjlock); 2022 cerror = vn_lock(vp, LK_EXCLUSIVE); 2023 if (cerror) { 2024 #ifdef CODA_VERBOSE 2025 printf("%s: can't lock vnode %p\n", 2026 __func__, vp); 2027 #endif 2028 return cerror; 2029 } 2030 #ifdef CODA_VERBOSE 2031 printf("%s: locked vnode %p\n", __func__, vp); 2032 #endif 2033 } 2034 2035 /* 2036 * Open file (causes upcall to venus). 2037 * XXX Perhaps we should not fully open the file, but 2038 * simply obtain a container file. 2039 */ 2040 /* XXX Is it ok to do this while holding the mutex? */ 2041 cerror = VOP_OPEN(vp, FREAD, cred); 2042 2043 if (cerror) { 2044 #ifdef CODA_VERBOSE 2045 printf("%s: cannot open vnode %p => %d\n", __func__, 2046 vp, cerror); 2047 #endif 2048 if (waslocked == 0) 2049 VOP_UNLOCK(vp); 2050 return cerror; 2051 } 2052 2053 #ifdef CODA_VERBOSE 2054 printf("%s: opened vnode %p\n", __func__, vp); 2055 #endif 2056 cvp = cp->c_ovp; 2057 didopen = 1; 2058 if (waslocked == 0) 2059 rw_enter(vp->v_uobj.vmobjlock, op); 2060 } 2061 KASSERT(cvp != NULL); 2062 2063 /* Munge the arg structure to refer to the container vnode. */ 2064 KASSERT(cvp->v_uobj.vmobjlock == vp->v_uobj.vmobjlock); 2065 ap->a_vp = cp->c_ovp; 2066 2067 /* Finally, call getpages on it. */ 2068 error = VCALL(ap->a_vp, VOFFSET(vop_getpages), ap); 2069 2070 /* If we opened the vnode, we must close it. */ 2071 if (didopen) { 2072 /* 2073 * VOP_CLOSE requires a locked vnode, but we are still 2074 * holding the lock (or riding a caller's lock). 2075 */ 2076 cerror = VOP_CLOSE(vp, FREAD, cred); 2077 #ifdef CODA_VERBOSE 2078 if (cerror != 0) 2079 /* XXX How should we handle this? */ 2080 printf("%s: closed vnode %p -> %d\n", __func__, 2081 vp, cerror); 2082 #endif 2083 2084 /* If we obtained a lock, drop it. */ 2085 if (waslocked == 0) 2086 VOP_UNLOCK(vp); 2087 } 2088 2089 return error; 2090 } 2091 2092 /* 2093 * The protocol requires v_interlock to be held by the caller. 2094 */ 2095 int 2096 coda_putpages(void *v) 2097 { 2098 struct vop_putpages_args /* { 2099 vnode_t *a_vp; 2100 voff_t a_offlo; 2101 voff_t a_offhi; 2102 int a_flags; 2103 } */ *ap = v; 2104 vnode_t *vp = ap->a_vp, *cvp; 2105 struct cnode *cp = VTOC(vp); 2106 int error; 2107 2108 KASSERT(rw_write_held(vp->v_uobj.vmobjlock)); 2109 2110 /* Check for control object. */ 2111 if (IS_CTL_VP(vp)) { 2112 rw_exit(vp->v_uobj.vmobjlock); 2113 #ifdef CODA_VERBOSE 2114 printf("%s: control object %p\n", __func__, vp); 2115 #endif 2116 return 0; 2117 } 2118 2119 /* 2120 * If container object is not present, then there are no pages 2121 * to put; just return without error. This happens all the 2122 * time, apparently during discard of a closed vnode (which 2123 * trivially can't have dirty pages). 2124 */ 2125 cvp = cp->c_ovp; 2126 if (cvp == NULL) { 2127 rw_exit(vp->v_uobj.vmobjlock); 2128 return 0; 2129 } 2130 2131 /* Munge the arg structure to refer to the container vnode. */ 2132 KASSERT(cvp->v_uobj.vmobjlock == vp->v_uobj.vmobjlock); 2133 ap->a_vp = cvp; 2134 2135 /* Finally, call putpages on it. */ 2136 error = VCALL(ap->a_vp, VOFFSET(vop_putpages), ap); 2137 2138 return error; 2139 } 2140