1 /* $NetBSD: coda_subr.c,v 1.14 2002/12/26 12:38:59 jdolecek Exp $ */ 2 3 /* 4 * 5 * Coda: an Experimental Distributed File System 6 * Release 3.1 7 * 8 * Copyright (c) 1987-1998 Carnegie Mellon University 9 * All Rights Reserved 10 * 11 * Permission to use, copy, modify and distribute this software and its 12 * documentation is hereby granted, provided that both the copyright 13 * notice and this permission notice appear in all copies of the 14 * software, derivative works or modified versions, and any portions 15 * thereof, and that both notices appear in supporting documentation, and 16 * that credit is given to Carnegie Mellon University in all documents 17 * and publicity pertaining to direct or indirect use of this code or its 18 * derivatives. 19 * 20 * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS, 21 * SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS 22 * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON 23 * DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER 24 * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF 25 * ANY DERIVATIVE WORK. 26 * 27 * Carnegie Mellon encourages users of this software to return any 28 * improvements or extensions that they make, and to grant Carnegie 29 * Mellon the rights to redistribute these changes without encumbrance. 30 * 31 * @(#) coda/coda_subr.c,v 1.1.1.1 1998/08/29 21:26:45 rvb Exp $ 32 */ 33 34 /* 35 * Mach Operating System 36 * Copyright (c) 1989 Carnegie-Mellon University 37 * All rights reserved. The CMU software License Agreement specifies 38 * the terms and conditions for use and redistribution. 39 */ 40 41 /* 42 * This code was written for the Coda file system at Carnegie Mellon 43 * University. Contributers include David Steere, James Kistler, and 44 * M. Satyanarayanan. */ 45 46 /* NOTES: rvb 47 * 1. Added coda_unmounting to mark all cnodes as being UNMOUNTING. This has to 48 * be done before dounmount is called. Because some of the routines that 49 * dounmount calls before coda_unmounted might try to force flushes to venus. 50 * The vnode pager does this. 51 * 2. coda_unmounting marks all cnodes scanning coda_cache. 52 * 3. cfs_checkunmounting (under DEBUG) checks all cnodes by chasing the vnodes 53 * under the /coda mount point. 54 * 4. coda_cacheprint (under DEBUG) prints names with vnode/cnode address 55 */ 56 57 #include <sys/cdefs.h> 58 __KERNEL_RCSID(0, "$NetBSD: coda_subr.c,v 1.14 2002/12/26 12:38:59 jdolecek Exp $"); 59 60 #include <sys/param.h> 61 #include <sys/systm.h> 62 #include <sys/malloc.h> 63 #include <sys/proc.h> 64 #include <sys/select.h> 65 #include <sys/mount.h> 66 67 #include <coda/coda.h> 68 #include <coda/cnode.h> 69 #include <coda/coda_subr.h> 70 #include <coda/coda_namecache.h> 71 72 int coda_active = 0; 73 int coda_reuse = 0; 74 int coda_new = 0; 75 76 struct cnode *coda_freelist = NULL; 77 struct cnode *coda_cache[CODA_CACHESIZE]; 78 79 #define coda_hash(fid) \ 80 (((fid)->Volume + (fid)->Vnode) & (CODA_CACHESIZE-1)) 81 82 #define CNODE_NEXT(cp) ((cp)->c_next) 83 84 #define ODD(vnode) ((vnode) & 0x1) 85 86 /* 87 * Allocate a cnode. 88 */ 89 struct cnode * 90 coda_alloc(void) 91 { 92 struct cnode *cp; 93 94 if (coda_freelist) { 95 cp = coda_freelist; 96 coda_freelist = CNODE_NEXT(cp); 97 coda_reuse++; 98 } 99 else { 100 CODA_ALLOC(cp, struct cnode *, sizeof(struct cnode)); 101 /* NetBSD vnodes don't have any Pager info in them ('cause there are 102 no external pagers, duh!) */ 103 #define VNODE_VM_INFO_INIT(vp) /* MT */ 104 VNODE_VM_INFO_INIT(CTOV(cp)); 105 coda_new++; 106 } 107 memset(cp, 0, sizeof (struct cnode)); 108 109 return(cp); 110 } 111 112 /* 113 * Deallocate a cnode. 114 */ 115 void 116 coda_free(cp) 117 struct cnode *cp; 118 { 119 120 CNODE_NEXT(cp) = coda_freelist; 121 coda_freelist = cp; 122 } 123 124 /* 125 * Put a cnode in the hash table 126 */ 127 void 128 coda_save(cp) 129 struct cnode *cp; 130 { 131 CNODE_NEXT(cp) = coda_cache[coda_hash(&cp->c_fid)]; 132 coda_cache[coda_hash(&cp->c_fid)] = cp; 133 } 134 135 /* 136 * Remove a cnode from the hash table 137 */ 138 void 139 coda_unsave(cp) 140 struct cnode *cp; 141 { 142 struct cnode *ptr; 143 struct cnode *ptrprev = NULL; 144 145 ptr = coda_cache[coda_hash(&cp->c_fid)]; 146 while (ptr != NULL) { 147 if (ptr == cp) { 148 if (ptrprev == NULL) { 149 coda_cache[coda_hash(&cp->c_fid)] 150 = CNODE_NEXT(ptr); 151 } else { 152 CNODE_NEXT(ptrprev) = CNODE_NEXT(ptr); 153 } 154 CNODE_NEXT(cp) = (struct cnode *)NULL; 155 156 return; 157 } 158 ptrprev = ptr; 159 ptr = CNODE_NEXT(ptr); 160 } 161 } 162 163 /* 164 * Lookup a cnode by fid. If the cnode is dying, it is bogus so skip it. 165 * NOTE: this allows multiple cnodes with same fid -- dcs 1/25/95 166 */ 167 struct cnode * 168 coda_find(fid) 169 ViceFid *fid; 170 { 171 struct cnode *cp; 172 173 cp = coda_cache[coda_hash(fid)]; 174 while (cp) { 175 if ((cp->c_fid.Vnode == fid->Vnode) && 176 (cp->c_fid.Volume == fid->Volume) && 177 (cp->c_fid.Unique == fid->Unique) && 178 (!IS_UNMOUNTING(cp))) 179 { 180 coda_active++; 181 return(cp); 182 } 183 cp = CNODE_NEXT(cp); 184 } 185 return(NULL); 186 } 187 188 /* 189 * coda_kill is called as a side effect to vcopen. To prevent any 190 * cnodes left around from an earlier run of a venus or warden from 191 * causing problems with the new instance, mark any outstanding cnodes 192 * as dying. Future operations on these cnodes should fail (excepting 193 * coda_inactive of course!). Since multiple venii/wardens can be 194 * running, only kill the cnodes for a particular entry in the 195 * coda_mnttbl. -- DCS 12/1/94 */ 196 197 int 198 coda_kill(whoIam, dcstat) 199 struct mount *whoIam; 200 enum dc_status dcstat; 201 { 202 int hash, count = 0; 203 struct cnode *cp; 204 205 /* 206 * Algorithm is as follows: 207 * Second, flush whatever vnodes we can from the name cache. 208 * 209 * Finally, step through whatever is left and mark them dying. 210 * This prevents any operation at all. 211 212 */ 213 214 /* This is slightly overkill, but should work. Eventually it'd be 215 * nice to only flush those entries from the namecache that 216 * reference a vnode in this vfs. */ 217 coda_nc_flush(dcstat); 218 219 for (hash = 0; hash < CODA_CACHESIZE; hash++) { 220 for (cp = coda_cache[hash]; cp != NULL; cp = CNODE_NEXT(cp)) { 221 if (CTOV(cp)->v_mount == whoIam) { 222 #ifdef DEBUG 223 printf("coda_kill: vp %p, cp %p\n", CTOV(cp), cp); 224 #endif 225 count++; 226 CODADEBUG(CODA_FLUSH, 227 myprintf(("Live cnode fid %lx.%lx.%lx flags %d count %d\n", 228 (cp->c_fid).Volume, 229 (cp->c_fid).Vnode, 230 (cp->c_fid).Unique, 231 cp->c_flags, 232 CTOV(cp)->v_usecount)); ); 233 } 234 } 235 } 236 return count; 237 } 238 239 /* 240 * There are two reasons why a cnode may be in use, it may be in the 241 * name cache or it may be executing. 242 */ 243 void 244 coda_flush(dcstat) 245 enum dc_status dcstat; 246 { 247 int hash; 248 struct cnode *cp; 249 250 coda_clstat.ncalls++; 251 coda_clstat.reqs[CODA_FLUSH]++; 252 253 coda_nc_flush(dcstat); /* flush files from the name cache */ 254 255 for (hash = 0; hash < CODA_CACHESIZE; hash++) { 256 for (cp = coda_cache[hash]; cp != NULL; cp = CNODE_NEXT(cp)) { 257 if (!ODD(cp->c_fid.Vnode)) /* only files can be executed */ 258 coda_vmflush(cp); 259 } 260 } 261 } 262 263 /* 264 * As a debugging measure, print out any cnodes that lived through a 265 * name cache flush. 266 */ 267 void 268 coda_testflush(void) 269 { 270 int hash; 271 struct cnode *cp; 272 273 for (hash = 0; hash < CODA_CACHESIZE; hash++) { 274 for (cp = coda_cache[hash]; 275 cp != NULL; 276 cp = CNODE_NEXT(cp)) { 277 myprintf(("Live cnode fid %lx.%lx.%lx count %d\n", 278 (cp->c_fid).Volume,(cp->c_fid).Vnode, 279 (cp->c_fid).Unique, CTOV(cp)->v_usecount)); 280 } 281 } 282 } 283 284 /* 285 * First, step through all cnodes and mark them unmounting. 286 * NetBSD kernels may try to fsync them now that venus 287 * is dead, which would be a bad thing. 288 * 289 */ 290 void 291 coda_unmounting(whoIam) 292 struct mount *whoIam; 293 { 294 int hash; 295 struct cnode *cp; 296 297 for (hash = 0; hash < CODA_CACHESIZE; hash++) { 298 for (cp = coda_cache[hash]; cp != NULL; cp = CNODE_NEXT(cp)) { 299 if (CTOV(cp)->v_mount == whoIam) { 300 if (cp->c_flags & (C_LOCKED|C_WANTED)) { 301 printf("coda_unmounting: Unlocking %p\n", cp); 302 cp->c_flags &= ~(C_LOCKED|C_WANTED); 303 wakeup((caddr_t) cp); 304 } 305 cp->c_flags |= C_UNMOUNTING; 306 } 307 } 308 } 309 } 310 311 #ifdef DEBUG 312 void 313 coda_checkunmounting(mp) 314 struct mount *mp; 315 { 316 struct vnode *vp, *nvp; 317 struct cnode *cp; 318 int count = 0, bad = 0; 319 loop: 320 for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) { 321 if (vp->v_mount != mp) 322 goto loop; 323 nvp = vp->v_mntvnodes.le_next; 324 cp = VTOC(vp); 325 count++; 326 if (!(cp->c_flags & C_UNMOUNTING)) { 327 bad++; 328 printf("vp %p, cp %p missed\n", vp, cp); 329 cp->c_flags |= C_UNMOUNTING; 330 } 331 } 332 } 333 334 void 335 coda_cacheprint(whoIam) 336 struct mount *whoIam; 337 { 338 int hash; 339 struct cnode *cp; 340 int count = 0; 341 342 printf("coda_cacheprint: coda_ctlvp %p, cp %p", coda_ctlvp, VTOC(coda_ctlvp)); 343 coda_nc_name(VTOC(coda_ctlvp)); 344 printf("\n"); 345 346 for (hash = 0; hash < CODA_CACHESIZE; hash++) { 347 for (cp = coda_cache[hash]; cp != NULL; cp = CNODE_NEXT(cp)) { 348 if (CTOV(cp)->v_mount == whoIam) { 349 printf("coda_cacheprint: vp %p, cp %p", CTOV(cp), cp); 350 coda_nc_name(cp); 351 printf("\n"); 352 count++; 353 } 354 } 355 } 356 printf("coda_cacheprint: count %d\n", count); 357 } 358 #endif 359 360 /* 361 * There are 6 cases where invalidations occur. The semantics of each 362 * is listed here. 363 * 364 * CODA_FLUSH -- flush all entries from the name cache and the cnode cache. 365 * CODA_PURGEUSER -- flush all entries from the name cache for a specific user 366 * This call is a result of token expiration. 367 * 368 * The next two are the result of callbacks on a file or directory. 369 * CODA_ZAPDIR -- flush the attributes for the dir from its cnode. 370 * Zap all children of this directory from the namecache. 371 * CODA_ZAPFILE -- flush the attributes for a file. 372 * 373 * The fifth is a result of Venus detecting an inconsistent file. 374 * CODA_PURGEFID -- flush the attribute for the file 375 * If it is a dir (odd vnode), purge its 376 * children from the namecache 377 * remove the file from the namecache. 378 * 379 * The sixth allows Venus to replace local fids with global ones 380 * during reintegration. 381 * 382 * CODA_REPLACE -- replace one ViceFid with another throughout the name cache 383 */ 384 385 int handleDownCall(opcode, out) 386 int opcode; union outputArgs *out; 387 { 388 int error; 389 390 /* Handle invalidate requests. */ 391 switch (opcode) { 392 case CODA_FLUSH : { 393 394 coda_flush(IS_DOWNCALL); 395 396 CODADEBUG(CODA_FLUSH,coda_testflush();) /* print remaining cnodes */ 397 return(0); 398 } 399 400 case CODA_PURGEUSER : { 401 coda_clstat.ncalls++; 402 coda_clstat.reqs[CODA_PURGEUSER]++; 403 404 /* XXX - need to prevent fsync's */ 405 coda_nc_purge_user(out->coda_purgeuser.cred.cr_uid, IS_DOWNCALL); 406 return(0); 407 } 408 409 case CODA_ZAPFILE : { 410 struct cnode *cp; 411 412 error = 0; 413 coda_clstat.ncalls++; 414 coda_clstat.reqs[CODA_ZAPFILE]++; 415 416 cp = coda_find(&out->coda_zapfile.CodaFid); 417 if (cp != NULL) { 418 vref(CTOV(cp)); 419 420 cp->c_flags &= ~C_VATTR; 421 if (CTOV(cp)->v_flag & VTEXT) 422 error = coda_vmflush(cp); 423 CODADEBUG(CODA_ZAPFILE, myprintf(( 424 "zapfile: fid = (%lx.%lx.%lx), refcnt = %d, error = %d\n", 425 cp->c_fid.Volume, 426 cp->c_fid.Vnode, 427 cp->c_fid.Unique, 428 CTOV(cp)->v_usecount - 1, error));); 429 if (CTOV(cp)->v_usecount == 1) { 430 cp->c_flags |= C_PURGING; 431 } 432 vrele(CTOV(cp)); 433 } 434 435 return(error); 436 } 437 438 case CODA_ZAPDIR : { 439 struct cnode *cp; 440 441 coda_clstat.ncalls++; 442 coda_clstat.reqs[CODA_ZAPDIR]++; 443 444 cp = coda_find(&out->coda_zapdir.CodaFid); 445 if (cp != NULL) { 446 vref(CTOV(cp)); 447 448 cp->c_flags &= ~C_VATTR; 449 coda_nc_zapParentfid(&out->coda_zapdir.CodaFid, IS_DOWNCALL); 450 451 CODADEBUG(CODA_ZAPDIR, myprintf(( 452 "zapdir: fid = (%lx.%lx.%lx), refcnt = %d\n", 453 cp->c_fid.Volume, 454 cp->c_fid.Vnode, 455 cp->c_fid.Unique, 456 CTOV(cp)->v_usecount - 1));); 457 if (CTOV(cp)->v_usecount == 1) { 458 cp->c_flags |= C_PURGING; 459 } 460 vrele(CTOV(cp)); 461 } 462 463 return(0); 464 } 465 466 case CODA_PURGEFID : { 467 struct cnode *cp; 468 469 error = 0; 470 coda_clstat.ncalls++; 471 coda_clstat.reqs[CODA_PURGEFID]++; 472 473 cp = coda_find(&out->coda_purgefid.CodaFid); 474 if (cp != NULL) { 475 vref(CTOV(cp)); 476 if (ODD(out->coda_purgefid.CodaFid.Vnode)) { /* Vnode is a directory */ 477 coda_nc_zapParentfid(&out->coda_purgefid.CodaFid, 478 IS_DOWNCALL); 479 } 480 cp->c_flags &= ~C_VATTR; 481 coda_nc_zapfid(&out->coda_purgefid.CodaFid, IS_DOWNCALL); 482 if (!(ODD(out->coda_purgefid.CodaFid.Vnode)) 483 && (CTOV(cp)->v_flag & VTEXT)) { 484 485 error = coda_vmflush(cp); 486 } 487 CODADEBUG(CODA_PURGEFID, myprintf(("purgefid: fid = (%lx.%lx.%lx), refcnt = %d, error = %d\n", 488 cp->c_fid.Volume, cp->c_fid.Vnode, 489 cp->c_fid.Unique, 490 CTOV(cp)->v_usecount - 1, error));); 491 if (CTOV(cp)->v_usecount == 1) { 492 cp->c_flags |= C_PURGING; 493 } 494 vrele(CTOV(cp)); 495 } 496 return(error); 497 } 498 499 case CODA_REPLACE : { 500 struct cnode *cp = NULL; 501 502 coda_clstat.ncalls++; 503 coda_clstat.reqs[CODA_REPLACE]++; 504 505 cp = coda_find(&out->coda_replace.OldFid); 506 if (cp != NULL) { 507 /* remove the cnode from the hash table, replace the fid, and reinsert */ 508 vref(CTOV(cp)); 509 coda_unsave(cp); 510 cp->c_fid = out->coda_replace.NewFid; 511 coda_save(cp); 512 513 CODADEBUG(CODA_REPLACE, myprintf(("replace: oldfid = (%lx.%lx.%lx), newfid = (%lx.%lx.%lx), cp = %p\n", 514 out->coda_replace.OldFid.Volume, 515 out->coda_replace.OldFid.Vnode, 516 out->coda_replace.OldFid.Unique, 517 cp->c_fid.Volume, cp->c_fid.Vnode, 518 cp->c_fid.Unique, cp));) 519 vrele(CTOV(cp)); 520 } 521 return (0); 522 } 523 default: 524 myprintf(("handleDownCall: unknown opcode %d\n", opcode)); 525 return (EINVAL); 526 } 527 } 528 529 /* coda_grab_vnode: lives in either cfs_mach.c or cfs_nbsd.c */ 530 531 int 532 coda_vmflush(cp) 533 struct cnode *cp; 534 { 535 return 0; 536 } 537 538 539 /* 540 * kernel-internal debugging switches 541 */ 542 543 void coda_debugon(void) 544 { 545 codadebug = -1; 546 coda_nc_debug = -1; 547 coda_vnop_print_entry = 1; 548 coda_psdev_print_entry = 1; 549 coda_vfsop_print_entry = 1; 550 } 551 552 void coda_debugoff(void) 553 { 554 codadebug = 0; 555 coda_nc_debug = 0; 556 coda_vnop_print_entry = 0; 557 coda_psdev_print_entry = 0; 558 coda_vfsop_print_entry = 0; 559 } 560 561 /* 562 * Utilities used by both client and server 563 * Standard levels: 564 * 0) no debugging 565 * 1) hard failures 566 * 2) soft failures 567 * 3) current test software 568 * 4) main procedure entry points 569 * 5) main procedure exit points 570 * 6) utility procedure entry points 571 * 7) utility procedure exit points 572 * 8) obscure procedure entry points 573 * 9) obscure procedure exit points 574 * 10) random stuff 575 * 11) all <= 1 576 * 12) all <= 2 577 * 13) all <= 3 578 * ... 579 */ 580