1 /* $NetBSD: coda_subr.c,v 1.33 2024/05/17 23:57:46 thorpej Exp $ */
2
3 /*
4 *
5 * Coda: an Experimental Distributed File System
6 * Release 3.1
7 *
8 * Copyright (c) 1987-1998 Carnegie Mellon University
9 * All Rights Reserved
10 *
11 * Permission to use, copy, modify and distribute this software and its
12 * documentation is hereby granted, provided that both the copyright
13 * notice and this permission notice appear in all copies of the
14 * software, derivative works or modified versions, and any portions
15 * thereof, and that both notices appear in supporting documentation, and
16 * that credit is given to Carnegie Mellon University in all documents
17 * and publicity pertaining to direct or indirect use of this code or its
18 * derivatives.
19 *
20 * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS,
21 * SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS
22 * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON
23 * DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
24 * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF
25 * ANY DERIVATIVE WORK.
26 *
27 * Carnegie Mellon encourages users of this software to return any
28 * improvements or extensions that they make, and to grant Carnegie
29 * Mellon the rights to redistribute these changes without encumbrance.
30 *
31 * @(#) coda/coda_subr.c,v 1.1.1.1 1998/08/29 21:26:45 rvb Exp $
32 */
33
34 /*
35 * Mach Operating System
36 * Copyright (c) 1989 Carnegie-Mellon University
37 * All rights reserved. The CMU software License Agreement specifies
38 * the terms and conditions for use and redistribution.
39 */
40
41 /*
42 * This code was written for the Coda file system at Carnegie Mellon
43 * University. Contributers include David Steere, James Kistler, and
44 * M. Satyanarayanan. */
45
46 /* NOTES: rvb
47 * 1. Added coda_unmounting to mark all cnodes as being UNMOUNTING. This has to
48 * be done before dounmount is called. Because some of the routines that
49 * dounmount calls before coda_unmounted might try to force flushes to venus.
50 * The vnode pager does this.
51 * 2. coda_unmounting marks all cnodes scanning coda_cache.
52 * 3. cfs_checkunmounting (under DEBUG) checks all cnodes by chasing the vnodes
53 * under the /coda mount point.
54 * 4. coda_cacheprint (under DEBUG) prints names with vnode/cnode address
55 */
56
57 #include <sys/cdefs.h>
58 __KERNEL_RCSID(0, "$NetBSD: coda_subr.c,v 1.33 2024/05/17 23:57:46 thorpej Exp $");
59
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/proc.h>
63 #include <sys/select.h>
64 #include <sys/mount.h>
65 #include <sys/kauth.h>
66
67 #include <coda/coda.h>
68 #include <coda/cnode.h>
69 #include <coda/coda_subr.h>
70 #include <coda/coda_namecache.h>
71
72 int codadebug = 0;
73 int coda_printf_delay = 0; /* in microseconds */
74 int coda_vnop_print_entry = 0;
75 int coda_vfsop_print_entry = 0;
76
77 #ifdef CODA_COMPAT_5
78 #define coda_hash(fid) \
79 (((fid)->Volume + (fid)->Vnode) & (CODA_CACHESIZE-1))
80 #define IS_DIR(cnode) (cnode.Vnode & 0x1)
81 #else
82 #define coda_hash(fid) \
83 (coda_f2i(fid) & (CODA_CACHESIZE-1))
84 #define IS_DIR(cnode) (cnode.opaque[2] & 0x1)
85 #endif
86
87 struct vnode *coda_ctlvp;
88
89 /*
90 * Lookup a cnode by fid. If the cnode is dying, it is bogus so skip it.
91 * The cnode is returned locked with the vnode referenced.
92 */
93 struct cnode *
coda_find(CodaFid * fid)94 coda_find(CodaFid *fid)
95 {
96 int i;
97 struct vnode *vp;
98 struct cnode *cp;
99
100 for (i = 0; i < NVCODA; i++) {
101 if (!coda_mnttbl[i].mi_started)
102 continue;
103 if (vcache_get(coda_mnttbl[i].mi_vfsp,
104 fid, sizeof(CodaFid), &vp) != 0)
105 continue;
106 mutex_enter(vp->v_interlock);
107 cp = VTOC(vp);
108 if (vp->v_type == VNON || cp == NULL || IS_UNMOUNTING(cp)) {
109 mutex_exit(vp->v_interlock);
110 vrele(vp);
111 continue;
112 }
113 mutex_enter(&cp->c_lock);
114 mutex_exit(vp->v_interlock);
115
116 return cp;
117 }
118
119 return NULL;
120 }
121
122 /*
123 * Iterate over all nodes attached to coda mounts.
124 */
125 static void
coda_iterate(bool (* f)(void *,struct vnode *),void * cl)126 coda_iterate(bool (*f)(void *, struct vnode *), void *cl)
127 {
128 int i;
129 struct vnode_iterator *marker;
130 struct vnode *vp;
131
132 for (i = 0; i < NVCODA; i++) {
133 if (coda_mnttbl[i].mi_vfsp == NULL)
134 continue;
135 vfs_vnode_iterator_init(coda_mnttbl[i].mi_vfsp, &marker);
136 while ((vp = vfs_vnode_iterator_next(marker, f, cl)) != NULL)
137 vrele(vp);
138 vfs_vnode_iterator_destroy(marker);
139 }
140 }
141
142 /*
143 * coda_kill is called as a side effect to vcopen. To prevent any
144 * cnodes left around from an earlier run of a venus or warden from
145 * causing problems with the new instance, mark any outstanding cnodes
146 * as dying. Future operations on these cnodes should fail (excepting
147 * coda_inactive of course!). Since multiple venii/wardens can be
148 * running, only kill the cnodes for a particular entry in the
149 * coda_mnttbl. -- DCS 12/1/94 */
150
151 static bool
coda_kill_selector(void * cl,struct vnode * vp)152 coda_kill_selector(void *cl, struct vnode *vp)
153 {
154 int *count = cl;
155
156 (*count)++;
157
158 return false;
159 }
160
161 int
coda_kill(struct mount * whoIam,enum dc_status dcstat)162 coda_kill(struct mount *whoIam, enum dc_status dcstat)
163 {
164 int count = 0;
165 struct vnode_iterator *marker;
166
167 /*
168 * Algorithm is as follows:
169 * Second, flush whatever vnodes we can from the name cache.
170 */
171
172 /* This is slightly overkill, but should work. Eventually it'd be
173 * nice to only flush those entries from the namecache that
174 * reference a vnode in this vfs. */
175 coda_nc_flush(dcstat);
176
177
178 vfs_vnode_iterator_init(whoIam, &marker);
179 vfs_vnode_iterator_next(marker, coda_kill_selector, &count);
180 vfs_vnode_iterator_destroy(marker);
181
182 return count;
183 }
184
185 /*
186 * There are two reasons why a cnode may be in use, it may be in the
187 * name cache or it may be executing.
188 */
189 static bool
coda_flush_selector(void * cl,struct vnode * vp)190 coda_flush_selector(void *cl, struct vnode *vp)
191 {
192 struct cnode *cp = VTOC(vp);
193
194 if (cp != NULL && !IS_DIR(cp->c_fid)) /* only files can be executed */
195 coda_vmflush(cp);
196
197 return false;
198 }
199 void
coda_flush(enum dc_status dcstat)200 coda_flush(enum dc_status dcstat)
201 {
202
203 coda_clstat.ncalls++;
204 coda_clstat.reqs[CODA_FLUSH]++;
205
206 coda_nc_flush(dcstat); /* flush files from the name cache */
207
208 coda_iterate(coda_flush_selector, NULL);
209 }
210
211 /*
212 * As a debugging measure, print out any cnodes that lived through a
213 * name cache flush.
214 */
215 static bool
coda_testflush_selector(void * cl,struct vnode * vp)216 coda_testflush_selector(void *cl, struct vnode *vp)
217 {
218 struct cnode *cp = VTOC(vp);
219
220 if (cp != NULL)
221 myprintf(("Live cnode fid %s count %d\n",
222 coda_f2s(&cp->c_fid), vrefcnt(CTOV(cp))));
223
224 return false;
225 }
226 void
coda_testflush(void)227 coda_testflush(void)
228 {
229
230 coda_iterate(coda_testflush_selector, NULL);
231 }
232
233 /*
234 * First, step through all cnodes and mark them unmounting.
235 * NetBSD kernels may try to fsync them now that venus
236 * is dead, which would be a bad thing.
237 *
238 */
239 static bool
coda_unmounting_selector(void * cl,struct vnode * vp)240 coda_unmounting_selector(void *cl, struct vnode *vp)
241 {
242 struct cnode *cp = VTOC(vp);
243
244 if (cp)
245 cp->c_flags |= C_UNMOUNTING;
246
247 return false;
248 }
249 void
coda_unmounting(struct mount * whoIam)250 coda_unmounting(struct mount *whoIam)
251 {
252 struct vnode_iterator *marker;
253
254 vfs_vnode_iterator_init(whoIam, &marker);
255 vfs_vnode_iterator_next(marker, coda_unmounting_selector, NULL);
256 vfs_vnode_iterator_destroy(marker);
257 }
258
259 #ifdef DEBUG
260 static bool
coda_checkunmounting_selector(void * cl,struct vnode * vp)261 coda_checkunmounting_selector(void *cl, struct vnode *vp)
262 {
263 struct cnode *cp = VTOC(vp);
264
265 if (cp && !(cp->c_flags & C_UNMOUNTING)) {
266 printf("vp %p, cp %p missed\n", vp, cp);
267 cp->c_flags |= C_UNMOUNTING;
268 }
269
270 return false;
271 }
272 void
coda_checkunmounting(struct mount * mp)273 coda_checkunmounting(struct mount *mp)
274 {
275 struct vnode_iterator *marker;
276
277 vfs_vnode_iterator_init(mp, &marker);
278 vfs_vnode_iterator_next(marker, coda_checkunmounting_selector, NULL);
279 vfs_vnode_iterator_destroy(marker);
280 }
281
282 void
coda_cacheprint(struct mount * whoIam)283 coda_cacheprint(struct mount *whoIam)
284 {
285 struct vnode *vp;
286 struct vnode_iterator *marker;
287 int count = 0;
288
289 printf("coda_cacheprint: coda_ctlvp %p, cp %p", coda_ctlvp, VTOC(coda_ctlvp));
290 coda_nc_name(VTOC(coda_ctlvp));
291 printf("\n");
292
293 vfs_vnode_iterator_init(whoIam, &marker);
294 while ((vp = vfs_vnode_iterator_next(marker, NULL, NULL)) != NULL) {
295 printf("coda_cacheprint: vp %p, cp %p", vp, VTOC(vp));
296 coda_nc_name(VTOC(vp));
297 printf("\n");
298 count++;
299 vrele(vp);
300 }
301 printf("coda_cacheprint: count %d\n", count);
302 vfs_vnode_iterator_destroy(marker);
303 }
304 #endif
305
306 /*
307 * There are 6 cases where invalidations occur. The semantics of each
308 * is listed here.
309 *
310 * CODA_FLUSH -- flush all entries from the name cache and the cnode cache.
311 * CODA_PURGEUSER -- flush all entries from the name cache for a specific user
312 * This call is a result of token expiration.
313 *
314 * The next two are the result of callbacks on a file or directory.
315 * CODA_ZAPDIR -- flush the attributes for the dir from its cnode.
316 * Zap all children of this directory from the namecache.
317 * CODA_ZAPFILE -- flush the attributes for a file.
318 *
319 * The fifth is a result of Venus detecting an inconsistent file.
320 * CODA_PURGEFID -- flush the attribute for the file
321 * If it is a dir (odd vnode), purge its
322 * children from the namecache
323 * remove the file from the namecache.
324 *
325 * The sixth allows Venus to replace local fids with global ones
326 * during reintegration.
327 *
328 * CODA_REPLACE -- replace one CodaFid with another throughout the name cache
329 */
330
handleDownCall(int opcode,union outputArgs * out)331 int handleDownCall(int opcode, union outputArgs *out)
332 {
333 int error;
334
335 /* Handle invalidate requests. */
336 switch (opcode) {
337 case CODA_FLUSH : {
338
339 coda_flush(IS_DOWNCALL);
340
341 CODADEBUG(CODA_FLUSH,coda_testflush();) /* print remaining cnodes */
342 return(0);
343 }
344
345 case CODA_PURGEUSER : {
346 coda_clstat.ncalls++;
347 coda_clstat.reqs[CODA_PURGEUSER]++;
348
349 /* XXX - need to prevent fsync's */
350 #ifdef CODA_COMPAT_5
351 coda_nc_purge_user(out->coda_purgeuser.cred.cr_uid, IS_DOWNCALL);
352 #else
353 coda_nc_purge_user(out->coda_purgeuser.uid, IS_DOWNCALL);
354 #endif
355 return(0);
356 }
357
358 case CODA_ZAPFILE : {
359 struct cnode *cp;
360
361 error = 0;
362 coda_clstat.ncalls++;
363 coda_clstat.reqs[CODA_ZAPFILE]++;
364
365 cp = coda_find(&out->coda_zapfile.Fid);
366 if (cp != NULL) {
367 cp->c_flags &= ~C_VATTR;
368 if (CTOV(cp)->v_iflag & VI_TEXT)
369 error = coda_vmflush(cp);
370 CODADEBUG(CODA_ZAPFILE, myprintf((
371 "zapfile: fid = %s, refcnt = %d, error = %d\n",
372 coda_f2s(&cp->c_fid), vrefcnt(CTOV(cp)) - 1, error)););
373 if (vrefcnt(CTOV(cp)) == 1) {
374 cp->c_flags |= C_PURGING;
375 }
376 mutex_exit(&cp->c_lock);
377 vrele(CTOV(cp));
378 }
379
380 return(error);
381 }
382
383 case CODA_ZAPDIR : {
384 struct cnode *cp;
385
386 coda_clstat.ncalls++;
387 coda_clstat.reqs[CODA_ZAPDIR]++;
388
389 cp = coda_find(&out->coda_zapdir.Fid);
390 if (cp != NULL) {
391 cp->c_flags &= ~C_VATTR;
392 coda_nc_zapParentfid(&out->coda_zapdir.Fid, IS_DOWNCALL);
393
394 CODADEBUG(CODA_ZAPDIR, myprintf((
395 "zapdir: fid = %s, refcnt = %d\n",
396 coda_f2s(&cp->c_fid), vrefcnt(CTOV(cp)) - 1)););
397 if (vrefcnt(CTOV(cp)) == 1) {
398 cp->c_flags |= C_PURGING;
399 }
400 mutex_exit(&cp->c_lock);
401 vrele(CTOV(cp));
402 }
403
404 return(0);
405 }
406
407 case CODA_PURGEFID : {
408 struct cnode *cp;
409
410 error = 0;
411 coda_clstat.ncalls++;
412 coda_clstat.reqs[CODA_PURGEFID]++;
413
414 cp = coda_find(&out->coda_purgefid.Fid);
415 if (cp != NULL) {
416 if (IS_DIR(out->coda_purgefid.Fid)) { /* Vnode is a directory */
417 coda_nc_zapParentfid(&out->coda_purgefid.Fid,
418 IS_DOWNCALL);
419 }
420 cp->c_flags &= ~C_VATTR;
421 coda_nc_zapfid(&out->coda_purgefid.Fid, IS_DOWNCALL);
422 if (!(IS_DIR(out->coda_purgefid.Fid))
423 && (CTOV(cp)->v_iflag & VI_TEXT)) {
424
425 error = coda_vmflush(cp);
426 }
427 CODADEBUG(CODA_PURGEFID, myprintf((
428 "purgefid: fid = %s, refcnt = %d, error = %d\n",
429 coda_f2s(&cp->c_fid), vrefcnt(CTOV(cp)) - 1, error)););
430 if (vrefcnt(CTOV(cp)) == 1) {
431 cp->c_flags |= C_PURGING;
432 }
433 mutex_exit(&cp->c_lock);
434 vrele(CTOV(cp));
435 }
436 return(error);
437 }
438
439 case CODA_REPLACE : {
440 struct cnode *cp = NULL;
441
442 coda_clstat.ncalls++;
443 coda_clstat.reqs[CODA_REPLACE]++;
444
445 cp = coda_find(&out->coda_replace.OldFid);
446 if (cp != NULL) {
447 error = vcache_rekey_enter(CTOV(cp)->v_mount, CTOV(cp),
448 &out->coda_replace.OldFid, sizeof(CodaFid),
449 &out->coda_replace.NewFid, sizeof(CodaFid));
450 if (error) {
451 mutex_exit(&cp->c_lock);
452 vrele(CTOV(cp));
453 return error;
454 }
455 cp->c_fid = out->coda_replace.NewFid;
456 vcache_rekey_exit(CTOV(cp)->v_mount, CTOV(cp),
457 &out->coda_replace.OldFid, sizeof(CodaFid),
458 &cp->c_fid, sizeof(CodaFid));
459
460 CODADEBUG(CODA_REPLACE, myprintf((
461 "replace: oldfid = %s, newfid = %s, cp = %p\n",
462 coda_f2s(&out->coda_replace.OldFid),
463 coda_f2s(&cp->c_fid), cp));)
464 mutex_exit(&cp->c_lock);
465 vrele(CTOV(cp));
466 }
467 return (0);
468 }
469 default:
470 myprintf(("handleDownCall: unknown opcode %d\n", opcode));
471 return (EINVAL);
472 }
473 }
474
475 /* coda_grab_vnode: lives in either cfs_mach.c or cfs_nbsd.c */
476
477 int
coda_vmflush(struct cnode * cp)478 coda_vmflush(struct cnode *cp)
479 {
480 return 0;
481 }
482
483
484 /*
485 * kernel-internal debugging switches
486 */
487
coda_debugon(void)488 void coda_debugon(void)
489 {
490 codadebug = -1;
491 coda_nc_debug = -1;
492 coda_vnop_print_entry = 1;
493 coda_psdev_print_entry = 1;
494 coda_vfsop_print_entry = 1;
495 }
496
coda_debugoff(void)497 void coda_debugoff(void)
498 {
499 codadebug = 0;
500 coda_nc_debug = 0;
501 coda_vnop_print_entry = 0;
502 coda_psdev_print_entry = 0;
503 coda_vfsop_print_entry = 0;
504 }
505
506 /* How to print a ucred */
507 void
coda_print_cred(kauth_cred_t cred)508 coda_print_cred(kauth_cred_t cred)
509 {
510
511 uint16_t ngroups;
512 int i;
513
514 myprintf(("ref %d\tuid %d\n", kauth_cred_getrefcnt(cred),
515 kauth_cred_geteuid(cred)));
516
517 ngroups = kauth_cred_ngroups(cred);
518 for (i=0; i < ngroups; i++)
519 myprintf(("\tgroup %d: (%d)\n", i, kauth_cred_group(cred, i)));
520 myprintf(("\n"));
521
522 }
523
524 /*
525 * Utilities used by both client and server
526 * Standard levels:
527 * 0) no debugging
528 * 1) hard failures
529 * 2) soft failures
530 * 3) current test software
531 * 4) main procedure entry points
532 * 5) main procedure exit points
533 * 6) utility procedure entry points
534 * 7) utility procedure exit points
535 * 8) obscure procedure entry points
536 * 9) obscure procedure exit points
537 * 10) random stuff
538 * 11) all <= 1
539 * 12) all <= 2
540 * 13) all <= 3
541 * ...
542 */
543