xref: /csrg-svn/sys/ufs/lfs/lfs_inode.c (revision 39440)
1 /*
2  * Copyright (c) 1982, 1986, 1989 Regents of the University of California.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms are permitted
6  * provided that the above copyright notice and this paragraph are
7  * duplicated in all such forms and that any documentation,
8  * advertising materials, and other materials related to such
9  * distribution and use acknowledge that the software was developed
10  * by the University of California, Berkeley.  The name of the
11  * University may not be used to endorse or promote products derived
12  * from this software without specific prior written permission.
13  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
14  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
15  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
16  *
17  *	@(#)lfs_inode.c	7.16 (Berkeley) 10/29/89
18  */
19 
20 #include "param.h"
21 #include "systm.h"
22 #include "mount.h"
23 #include "user.h"
24 #include "file.h"
25 #include "buf.h"
26 #include "cmap.h"
27 #include "vnode.h"
28 #include "../ufs/inode.h"
29 #include "../ufs/fs.h"
30 #include "../ufs/ufsmount.h"
31 #ifdef QUOTA
32 #include "../ufs/quota.h"
33 #endif
34 #include "kernel.h"
35 #include "malloc.h"
36 
37 #define	INOHSZ	512
38 #if	((INOHSZ&(INOHSZ-1)) == 0)
39 #define	INOHASH(dev,ino)	(((dev)+(ino))&(INOHSZ-1))
40 #else
41 #define	INOHASH(dev,ino)	(((unsigned)((dev)+(ino)))%INOHSZ)
42 #endif
43 
44 union ihead {
45 	union  ihead *ih_head[2];
46 	struct inode *ih_chain[2];
47 } ihead[INOHSZ];
48 
49 /*
50  * Initialize hash links for inodes.
51  */
52 ufs_init()
53 {
54 	register int i;
55 	register union ihead *ih = ihead;
56 
57 	if (VN_MAXPRIVATE < sizeof(struct inode))
58 		panic("ihinit: too small");
59 	for (i = INOHSZ; --i >= 0; ih++) {
60 		ih->ih_head[0] = ih;
61 		ih->ih_head[1] = ih;
62 	}
63 }
64 
65 /*
66  * Look up an vnode/inode by device,inumber.
67  * If it is in core (in the inode structure),
68  * honor the locking protocol.
69  * If it is not in core, read it in from the
70  * specified device.
71  * Callers must check for mount points!!
72  * In all cases, a pointer to a locked
73  * inode structure is returned.
74  */
75 iget(xp, ino, ipp)
76 	struct inode *xp;
77 	ino_t ino;
78 	struct inode **ipp;
79 {
80 	dev_t dev = xp->i_dev;
81 	struct mount *mntp = ITOV(xp)->v_mount;
82 	register struct fs *fs = VFSTOUFS(mntp)->um_fs;
83 	extern struct vnodeops ufs_vnodeops, spec_inodeops;
84 	register struct inode *ip, *iq;
85 	register struct vnode *vp;
86 	struct vnode *nvp;
87 	struct buf *bp;
88 	struct dinode *dp;
89 	union  ihead *ih;
90 	int error;
91 
92 	ih = &ihead[INOHASH(dev, ino)];
93 loop:
94 	for (ip = ih->ih_chain[0]; ip != (struct inode *)ih; ip = ip->i_forw) {
95 		if (ino != ip->i_number || dev != ip->i_dev)
96 			continue;
97 		if ((ip->i_flag&ILOCKED) != 0) {
98 			ip->i_flag |= IWANT;
99 			sleep((caddr_t)ip, PINOD);
100 			goto loop;
101 		}
102 		if (vget(ITOV(ip)))
103 			goto loop;
104 		*ipp = ip;
105 		return(0);
106 	}
107 	/*
108 	 * Allocate a new inode.
109 	 */
110 	if (error = getnewvnode(VT_UFS, mntp, &ufs_vnodeops, &nvp)) {
111 		*ipp = 0;
112 		return (error);
113 	}
114 	ip = VTOI(nvp);
115 	ip->i_vnode = nvp;
116 	ip->i_flag = 0;
117 	ip->i_devvp = 0;
118 	ip->i_lastr = 0;
119 	ip->i_mode = 0;
120 	ip->i_flags = 0;
121 #ifdef QUOTA
122 	ip->i_dquot = NODQUOT;
123 #endif
124 	/*
125 	 * Put it onto its hash chain and lock it so that other requests for
126 	 * this inode will block if they arrive while we are sleeping waiting
127 	 * for old data structures to be purged or for the contents of the
128 	 * disk portion of this inode to be read.
129 	 */
130 	ip->i_dev = dev;
131 	ip->i_number = ino;
132 	insque(ip, ih);
133 	ILOCK(ip);
134 	/*
135 	 * Read in the disk contents for the inode.
136 	 */
137 	if (error = bread(VFSTOUFS(mntp)->um_devvp, fsbtodb(fs, itod(fs, ino)),
138 	    (int)fs->fs_bsize, NOCRED, &bp)) {
139 		/*
140 		 * Unlock and discard unneeded inode.
141 		 */
142 		iput(ip);
143 		brelse(bp);
144 		*ipp = 0;
145 		return (error);
146 	}
147 	dp = bp->b_un.b_dino;
148 	dp += itoo(fs, ino);
149 	ip->i_din = *dp;
150 	brelse(bp);
151 	/*
152 	 * Initialize the associated vnode
153 	 */
154 	vp = ITOV(ip);
155 	vp->v_type = IFTOVT(ip->i_mode);
156 	if (vp->v_type == VCHR || vp->v_type == VBLK) {
157 		vp->v_rdev = ip->i_rdev;
158 		vp->v_op = &spec_inodeops;
159 		if (nvp = checkalias(vp, mntp)) {
160 			/*
161 			 * Reinitialize aliased inode.
162 			 */
163 			vp = nvp;
164 			iq = VTOI(vp);
165 			iq->i_vnode = vp;
166 			iq->i_lastr = 0;
167 			iq->i_flags = 0;
168 			ILOCK(iq);
169 			iq->i_din = ip->i_din;
170 			iq->i_dev = dev;
171 			iq->i_number = ino;
172 			insque(iq, ih);
173 			/*
174 			 * Discard unneeded vnode
175 			 */
176 			ip->i_mode = 0;
177 			iput(ip);
178 			ip = iq;
179 		}
180 	}
181 	if (ino == ROOTINO)
182 		vp->v_flag |= VROOT;
183 	/*
184 	 * Finish inode initialization.
185 	 */
186 	ip->i_fs = fs;
187 	ip->i_devvp = VFSTOUFS(mntp)->um_devvp;
188 	VREF(ip->i_devvp);
189 #ifdef QUOTA
190 	if (ip->i_mode != 0)
191 		ip->i_dquot = inoquota(ip);
192 #endif
193 	/*
194 	 * Set up a generation number for this inode if it does not
195 	 * already have one. This should only happen on old filesystems.
196 	 */
197 	if (ip->i_gen == 0) {
198 		if (++nextgennumber < (u_long)time.tv_sec)
199 			nextgennumber = time.tv_sec;
200 		ip->i_gen = nextgennumber;
201 		if ((vp->v_mount->m_flag & M_RDONLY) == 0)
202 			ip->i_flag |= IMOD;
203 	}
204 	*ipp = ip;
205 	return (0);
206 }
207 
208 /*
209  * Unlock and decrement the reference count of an inode structure.
210  */
211 iput(ip)
212 	register struct inode *ip;
213 {
214 
215 	if ((ip->i_flag & ILOCKED) == 0)
216 		panic("iput");
217 	IUNLOCK(ip);
218 	vrele(ITOV(ip));
219 }
220 
221 /*
222  * Last reference to an inode, write the inode out and if necessary,
223  * truncate and deallocate the file.
224  */
225 ufs_inactive(vp)
226 	struct vnode *vp;
227 {
228 	register struct inode *ip = VTOI(vp);
229 	int mode, error = 0;
230 
231 	if (vp->v_count != 0)
232 		printf("ufs_inactive: pushing active ino %d dev 0x%x\n",
233 			ip->i_number, ip->i_dev);
234 	/*
235 	 * Get rid of inodes related to stale file handles.
236 	 */
237 	if (ip->i_mode == 0) {
238 		vgone(vp);
239 		return (0);
240 	}
241 	ILOCK(ip);
242 	if (ip->i_nlink <= 0 && (vp->v_mount->m_flag & M_RDONLY) == 0) {
243 		error = itrunc(ip, (u_long)0);
244 		mode = ip->i_mode;
245 		ip->i_mode = 0;
246 		ip->i_rdev = 0;
247 		ip->i_flag |= IUPD|ICHG;
248 		ifree(ip, ip->i_number, mode);
249 #ifdef QUOTA
250 		(void) chkiq(ip->i_dev, ip, ip->i_uid, 0);
251 		dqrele(ip->i_dquot);
252 		ip->i_dquot = NODQUOT;
253 #endif
254 	}
255 	IUPDAT(ip, &time, &time, 0);
256 	IUNLOCK(ip);
257 	ip->i_flag = 0;
258 	/*
259 	 * If we are done with the inode, reclaim it
260 	 * so that it can be reused immediately.
261 	 */
262 	if (vp->v_count == 0 && ip->i_mode == 0)
263 		vgone(vp);
264 	return (error);
265 }
266 
267 /*
268  * Reclaim an inode so that it can be used for other purposes.
269  */
270 ufs_reclaim(vp)
271 	register struct vnode *vp;
272 {
273 	register struct inode *iq, *ip = VTOI(vp);
274 
275 	if (vp->v_count != 0)
276 		printf("ufs_reclaim: pushing active ino %d dev 0x%x\n",
277 			ip->i_number, ip->i_dev);
278 	/*
279 	 * Remove the inode from its hash chain.
280 	 */
281 	remque(ip);
282 	ip->i_forw = ip;
283 	ip->i_back = ip;
284 	/*
285 	 * Purge old data structures associated with the inode.
286 	 */
287 	cache_purge(vp);
288 	if (ip->i_devvp) {
289 		vrele(ip->i_devvp);
290 		ip->i_devvp = 0;
291 	}
292 #ifdef QUOTA
293 	dqrele(ip->i_dquot);
294 	ip->i_dquot = NODQUOT;
295 #endif
296 	ip->i_flag = 0;
297 	return (0);
298 }
299 
300 /*
301  * Check accessed and update flags on an inode structure.
302  * If any is on, update the inode with the current time.
303  * If waitfor is given, then must ensure I/O order,
304  * so wait for write to complete.
305  */
306 iupdat(ip, ta, tm, waitfor)
307 	register struct inode *ip;
308 	struct timeval *ta, *tm;
309 	int waitfor;
310 {
311 	struct buf *bp;
312 	struct vnode *vp = ITOV(ip);
313 	struct dinode *dp;
314 	register struct fs *fs;
315 	int error;
316 
317 	fs = ip->i_fs;
318 	if ((ip->i_flag & (IUPD|IACC|ICHG|IMOD)) == 0)
319 		return (0);
320 	if (vp->v_mount->m_flag & M_RDONLY)
321 		return (0);
322 	error = bread(ip->i_devvp, fsbtodb(fs, itod(fs, ip->i_number)),
323 		(int)fs->fs_bsize, NOCRED, &bp);
324 	if (error) {
325 		brelse(bp);
326 		return (error);
327 	}
328 	if (ip->i_flag&IACC)
329 		ip->i_atime = ta->tv_sec;
330 	if (ip->i_flag&IUPD)
331 		ip->i_mtime = tm->tv_sec;
332 	if (ip->i_flag&ICHG)
333 		ip->i_ctime = time.tv_sec;
334 	ip->i_flag &= ~(IUPD|IACC|ICHG|IMOD);
335 	dp = bp->b_un.b_dino + itoo(fs, ip->i_number);
336 	*dp = ip->i_din;
337 	if (waitfor) {
338 		return (bwrite(bp));
339 	} else {
340 		bdwrite(bp);
341 		return (0);
342 	}
343 }
344 
345 #define	SINGLE	0	/* index of single indirect block */
346 #define	DOUBLE	1	/* index of double indirect block */
347 #define	TRIPLE	2	/* index of triple indirect block */
348 /*
349  * Truncate the inode ip to at most length size.  Free affected disk
350  * blocks -- the blocks of the file are removed in reverse order.
351  *
352  * NB: triple indirect blocks are untested.
353  */
354 itrunc(oip, length)
355 	register struct inode *oip;
356 	u_long length;
357 {
358 	register daddr_t lastblock;
359 	daddr_t bn, lbn, lastiblock[NIADDR];
360 	register struct fs *fs;
361 	register struct inode *ip;
362 	struct buf *bp;
363 	int offset, osize, size, level;
364 	long count, nblocks, blocksreleased = 0;
365 	register int i;
366 	int error, allerror = 0;
367 	struct inode tip;
368 
369 	if (oip->i_size <= length) {
370 		oip->i_flag |= ICHG|IUPD;
371 		error = iupdat(oip, &time, &time, 1);
372 		return (error);
373 	}
374 	/*
375 	 * Calculate index into inode's block list of
376 	 * last direct and indirect blocks (if any)
377 	 * which we want to keep.  Lastblock is -1 when
378 	 * the file is truncated to 0.
379 	 */
380 	fs = oip->i_fs;
381 	lastblock = lblkno(fs, length + fs->fs_bsize - 1) - 1;
382 	lastiblock[SINGLE] = lastblock - NDADDR;
383 	lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs);
384 	lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs);
385 	nblocks = btodb(fs->fs_bsize);
386 	/*
387 	 * Update the size of the file. If the file is not being
388 	 * truncated to a block boundry, the contents of the
389 	 * partial block following the end of the file must be
390 	 * zero'ed in case it ever become accessable again because
391 	 * of subsequent file growth.
392 	 */
393 	osize = oip->i_size;
394 	offset = blkoff(fs, length);
395 	if (offset == 0) {
396 		oip->i_size = length;
397 	} else {
398 		lbn = lblkno(fs, length);
399 		error = balloc(oip, lbn, offset, &bn, B_CLRBUF);
400 		if (error)
401 			return (error);
402 		if ((long)bn < 0)
403 			panic("itrunc: hole");
404 		oip->i_size = length;
405 		size = blksize(fs, oip, lbn);
406 		count = howmany(size, CLBYTES);
407 		for (i = 0; i < count; i++)
408 			munhash(oip->i_devvp, bn + i * CLBYTES / DEV_BSIZE);
409 		error = bread(oip->i_devvp, bn, size, NOCRED, &bp);
410 		if (error) {
411 			oip->i_size = osize;
412 			brelse(bp);
413 			return (error);
414 		}
415 		bzero(bp->b_un.b_addr + offset, (unsigned)(size - offset));
416 		bdwrite(bp);
417 	}
418 	/*
419 	 * Update file and block pointers
420 	 * on disk before we start freeing blocks.
421 	 * If we crash before free'ing blocks below,
422 	 * the blocks will be returned to the free list.
423 	 * lastiblock values are also normalized to -1
424 	 * for calls to indirtrunc below.
425 	 */
426 	tip = *oip;
427 	tip.i_size = osize;
428 	for (level = TRIPLE; level >= SINGLE; level--)
429 		if (lastiblock[level] < 0) {
430 			oip->i_ib[level] = 0;
431 			lastiblock[level] = -1;
432 		}
433 	for (i = NDADDR - 1; i > lastblock; i--)
434 		oip->i_db[i] = 0;
435 	oip->i_flag |= ICHG|IUPD;
436 	allerror = syncip(oip);
437 
438 	/*
439 	 * Indirect blocks first.
440 	 */
441 	ip = &tip;
442 	for (level = TRIPLE; level >= SINGLE; level--) {
443 		bn = ip->i_ib[level];
444 		if (bn != 0) {
445 			error = indirtrunc(ip, bn, lastiblock[level], level,
446 				&count);
447 			if (error)
448 				allerror = error;
449 			blocksreleased += count;
450 			if (lastiblock[level] < 0) {
451 				ip->i_ib[level] = 0;
452 				blkfree(ip, bn, (off_t)fs->fs_bsize);
453 				blocksreleased += nblocks;
454 			}
455 		}
456 		if (lastiblock[level] >= 0)
457 			goto done;
458 	}
459 
460 	/*
461 	 * All whole direct blocks or frags.
462 	 */
463 	for (i = NDADDR - 1; i > lastblock; i--) {
464 		register off_t bsize;
465 
466 		bn = ip->i_db[i];
467 		if (bn == 0)
468 			continue;
469 		ip->i_db[i] = 0;
470 		bsize = (off_t)blksize(fs, ip, i);
471 		blkfree(ip, bn, bsize);
472 		blocksreleased += btodb(bsize);
473 	}
474 	if (lastblock < 0)
475 		goto done;
476 
477 	/*
478 	 * Finally, look for a change in size of the
479 	 * last direct block; release any frags.
480 	 */
481 	bn = ip->i_db[lastblock];
482 	if (bn != 0) {
483 		off_t oldspace, newspace;
484 
485 		/*
486 		 * Calculate amount of space we're giving
487 		 * back as old block size minus new block size.
488 		 */
489 		oldspace = blksize(fs, ip, lastblock);
490 		ip->i_size = length;
491 		newspace = blksize(fs, ip, lastblock);
492 		if (newspace == 0)
493 			panic("itrunc: newspace");
494 		if (oldspace - newspace > 0) {
495 			/*
496 			 * Block number of space to be free'd is
497 			 * the old block # plus the number of frags
498 			 * required for the storage we're keeping.
499 			 */
500 			bn += numfrags(fs, newspace);
501 			blkfree(ip, bn, oldspace - newspace);
502 			blocksreleased += btodb(oldspace - newspace);
503 		}
504 	}
505 done:
506 /* BEGIN PARANOIA */
507 	for (level = SINGLE; level <= TRIPLE; level++)
508 		if (ip->i_ib[level] != oip->i_ib[level])
509 			panic("itrunc1");
510 	for (i = 0; i < NDADDR; i++)
511 		if (ip->i_db[i] != oip->i_db[i])
512 			panic("itrunc2");
513 /* END PARANOIA */
514 	oip->i_blocks -= blocksreleased;
515 	if (oip->i_blocks < 0)			/* sanity */
516 		oip->i_blocks = 0;
517 	oip->i_flag |= ICHG;
518 #ifdef QUOTA
519 	(void) chkdq(oip, -blocksreleased, 0);
520 #endif
521 	return (allerror);
522 }
523 
524 /*
525  * Release blocks associated with the inode ip and
526  * stored in the indirect block bn.  Blocks are free'd
527  * in LIFO order up to (but not including) lastbn.  If
528  * level is greater than SINGLE, the block is an indirect
529  * block and recursive calls to indirtrunc must be used to
530  * cleanse other indirect blocks.
531  *
532  * NB: triple indirect blocks are untested.
533  */
534 indirtrunc(ip, bn, lastbn, level, countp)
535 	register struct inode *ip;
536 	daddr_t bn, lastbn;
537 	int level;
538 	long *countp;
539 {
540 	register int i;
541 	struct buf *bp;
542 	register struct fs *fs = ip->i_fs;
543 	register daddr_t *bap;
544 	daddr_t *copy, nb, last;
545 	long blkcount, factor;
546 	int nblocks, blocksreleased = 0;
547 	int error, allerror = 0;
548 
549 	/*
550 	 * Calculate index in current block of last
551 	 * block to be kept.  -1 indicates the entire
552 	 * block so we need not calculate the index.
553 	 */
554 	factor = 1;
555 	for (i = SINGLE; i < level; i++)
556 		factor *= NINDIR(fs);
557 	last = lastbn;
558 	if (lastbn > 0)
559 		last /= factor;
560 	nblocks = btodb(fs->fs_bsize);
561 	/*
562 	 * Get buffer of block pointers, zero those
563 	 * entries corresponding to blocks to be free'd,
564 	 * and update on disk copy first.
565 	 */
566 	error = bread(ip->i_devvp, fsbtodb(fs, bn), (int)fs->fs_bsize,
567 		NOCRED, &bp);
568 	if (error) {
569 		brelse(bp);
570 		*countp = 0;
571 		return (error);
572 	}
573 	bap = bp->b_un.b_daddr;
574 	MALLOC(copy, daddr_t *, fs->fs_bsize, M_TEMP, M_WAITOK);
575 	bcopy((caddr_t)bap, (caddr_t)copy, (u_int)fs->fs_bsize);
576 	bzero((caddr_t)&bap[last + 1],
577 	  (u_int)(NINDIR(fs) - (last + 1)) * sizeof (daddr_t));
578 	error = bwrite(bp);
579 	if (error)
580 		allerror = error;
581 	bap = copy;
582 
583 	/*
584 	 * Recursively free totally unused blocks.
585 	 */
586 	for (i = NINDIR(fs) - 1; i > last; i--) {
587 		nb = bap[i];
588 		if (nb == 0)
589 			continue;
590 		if (level > SINGLE) {
591 			error = indirtrunc(ip, nb, (daddr_t)-1, level - 1,
592 				&blkcount);
593 			if (error)
594 				allerror = error;
595 			blocksreleased += blkcount;
596 		}
597 		blkfree(ip, nb, (off_t)fs->fs_bsize);
598 		blocksreleased += nblocks;
599 	}
600 
601 	/*
602 	 * Recursively free last partial block.
603 	 */
604 	if (level > SINGLE && lastbn >= 0) {
605 		last = lastbn % factor;
606 		nb = bap[i];
607 		if (nb != 0) {
608 			error = indirtrunc(ip, nb, last, level - 1, &blkcount);
609 			if (error)
610 				allerror = error;
611 			blocksreleased += blkcount;
612 		}
613 	}
614 	FREE(copy, M_TEMP);
615 	*countp = blocksreleased;
616 	return (allerror);
617 }
618 
619 /*
620  * Remove any inodes in the inode cache belonging to dev.
621  *
622  * There should not be any active ones, return error if any are found
623  * (nb: this is a user error, not a system err).
624  */
625 int busyprt = 0;	/* patch to print out busy inodes */
626 
627 #ifdef QUOTA
628 iflush(mp, iq)
629 	struct mount *mp;
630 	struct inode *iq;
631 #else
632 iflush(mp)
633 	struct mount *mp;
634 #endif
635 {
636 	register struct vnode *vp, *nvp;
637 	register struct inode *ip;
638 	int busy = 0;
639 
640 	for (vp = mp->m_mounth; vp; vp = nvp) {
641 		nvp = vp->v_mountf;
642 		ip = VTOI(vp);
643 #ifdef QUOTA
644 		if (ip == iq)
645 			continue;
646 #endif
647 		if (vp->v_count) {
648 			busy++;
649 			if (!busyprt)
650 				continue;
651 			printf("%s %d on dev 0x%x count %d type %d\n",
652 			    "iflush: busy inode ", ip->i_number, ip->i_dev,
653 			    vp->v_count, vp->v_type);
654 			continue;
655 		}
656 		/*
657 		 * With v_count == 0, all we need to do is clear out the
658 		 * vnode data structures and we are done.
659 		 */
660 		vgone(vp);
661 	}
662 	if (busy)
663 		return (EBUSY);
664 	return (0);
665 }
666 
667 /*
668  * Lock an inode. If its already locked, set the WANT bit and sleep.
669  */
670 ilock(ip)
671 	register struct inode *ip;
672 {
673 
674 	while (ip->i_flag & ILOCKED) {
675 		ip->i_flag |= IWANT;
676 		(void) sleep((caddr_t)ip, PINOD);
677 	}
678 	ip->i_flag |= ILOCKED;
679 }
680 
681 /*
682  * Unlock an inode.  If WANT bit is on, wakeup.
683  */
684 iunlock(ip)
685 	register struct inode *ip;
686 {
687 
688 	if ((ip->i_flag & ILOCKED) == 0)
689 		printf("unlocking unlocked inode %d on dev 0x%x\n",
690 			ip->i_number, ip->i_dev);
691 	ip->i_flag &= ~ILOCKED;
692 	if (ip->i_flag&IWANT) {
693 		ip->i_flag &= ~IWANT;
694 		wakeup((caddr_t)ip);
695 	}
696 }
697 
698 /*
699  * Check mode permission on inode pointer. Mode is READ, WRITE or EXEC.
700  * The mode is shifted to select the owner/group/other fields. The
701  * super user is granted all permissions.
702  *
703  * NB: Called from vnode op table. It seems this could all be done
704  * using vattr's but...
705  */
706 iaccess(ip, mode, cred)
707 	register struct inode *ip;
708 	register int mode;
709 	struct ucred *cred;
710 {
711 	register gid_t *gp;
712 	int i;
713 
714 	/*
715 	 * If you're the super-user, you always get access.
716 	 */
717 	if (cred->cr_uid == 0)
718 		return (0);
719 	/*
720 	 * Access check is based on only one of owner, group, public.
721 	 * If not owner, then check group. If not a member of the
722 	 * group, then check public access.
723 	 */
724 	if (cred->cr_uid != ip->i_uid) {
725 		mode >>= 3;
726 		gp = cred->cr_groups;
727 		for (i = 0; i < cred->cr_ngroups; i++, gp++)
728 			if (ip->i_gid == *gp)
729 				goto found;
730 		mode >>= 3;
731 found:
732 		;
733 	}
734 	if ((ip->i_mode & mode) != 0)
735 		return (0);
736 	return (EACCES);
737 }
738