10Sstevel@tonic-gate /*
20Sstevel@tonic-gate * CDDL HEADER START
30Sstevel@tonic-gate *
40Sstevel@tonic-gate * The contents of this file are subject to the terms of the
51925Srsb * Common Development and Distribution License (the "License").
61925Srsb * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate *
80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate * See the License for the specific language governing permissions
110Sstevel@tonic-gate * and limitations under the License.
120Sstevel@tonic-gate *
130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate *
190Sstevel@tonic-gate * CDDL HEADER END
200Sstevel@tonic-gate */
210Sstevel@tonic-gate /*
22*6224Smarks * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
230Sstevel@tonic-gate * Use is subject to license terms.
240Sstevel@tonic-gate */
250Sstevel@tonic-gate
260Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI"
270Sstevel@tonic-gate
280Sstevel@tonic-gate /*
290Sstevel@tonic-gate * The idea behind composition-based stacked filesystems is to add a
300Sstevel@tonic-gate * vnode to the stack of vnodes for each mount. These vnodes have their
310Sstevel@tonic-gate * own set of mount options and filesystem-specific functions, so they
320Sstevel@tonic-gate * can modify data or operations before they are passed along. Such a
330Sstevel@tonic-gate * filesystem must maintain a mapping from the underlying vnodes to its
340Sstevel@tonic-gate * interposing vnodes.
350Sstevel@tonic-gate *
360Sstevel@tonic-gate * In lofs, this mapping is implemented by a hashtable. Each bucket
370Sstevel@tonic-gate * contains a count of the number of nodes currently contained, the
380Sstevel@tonic-gate * chain of vnodes, and a lock to protect the list of vnodes. The
390Sstevel@tonic-gate * hashtable dynamically grows if the number of vnodes in the table as a
400Sstevel@tonic-gate * whole exceeds the size of the table left-shifted by
410Sstevel@tonic-gate * lo_resize_threshold. In order to minimize lock contention, there is
420Sstevel@tonic-gate * no global lock protecting the hashtable, hence obtaining the
430Sstevel@tonic-gate * per-bucket locks consists of a dance to make sure we've actually
440Sstevel@tonic-gate * locked the correct bucket. Acquiring a bucket lock doesn't involve
450Sstevel@tonic-gate * locking the hashtable itself, so we refrain from freeing old
460Sstevel@tonic-gate * hashtables, and store them in a linked list of retired hashtables;
470Sstevel@tonic-gate * the list is freed when the filesystem is unmounted.
480Sstevel@tonic-gate */
490Sstevel@tonic-gate
500Sstevel@tonic-gate #include <sys/param.h>
510Sstevel@tonic-gate #include <sys/kmem.h>
520Sstevel@tonic-gate #include <sys/vfs.h>
530Sstevel@tonic-gate #include <sys/vnode.h>
540Sstevel@tonic-gate #include <sys/cmn_err.h>
550Sstevel@tonic-gate #include <sys/systm.h>
560Sstevel@tonic-gate #include <sys/t_lock.h>
570Sstevel@tonic-gate #include <sys/debug.h>
580Sstevel@tonic-gate #include <sys/atomic.h>
590Sstevel@tonic-gate
600Sstevel@tonic-gate #include <sys/fs/lofs_node.h>
610Sstevel@tonic-gate #include <sys/fs/lofs_info.h>
620Sstevel@tonic-gate /*
630Sstevel@tonic-gate * Due to the hashing algorithm, the size of the hash table needs to be a
640Sstevel@tonic-gate * power of 2.
650Sstevel@tonic-gate */
660Sstevel@tonic-gate #define LOFS_DEFAULT_HTSIZE (1 << 6)
670Sstevel@tonic-gate
680Sstevel@tonic-gate #define ltablehash(vp, tblsz) ((((intptr_t)(vp))>>10) & ((tblsz)-1))
690Sstevel@tonic-gate
700Sstevel@tonic-gate /*
710Sstevel@tonic-gate * The following macros can only be safely used when the desired bucket
720Sstevel@tonic-gate * is already locked.
730Sstevel@tonic-gate */
740Sstevel@tonic-gate /*
750Sstevel@tonic-gate * The lock in the hashtable associated with the given vnode.
760Sstevel@tonic-gate */
770Sstevel@tonic-gate #define TABLE_LOCK(vp, li) \
780Sstevel@tonic-gate (&(li)->li_hashtable[ltablehash((vp), (li)->li_htsize)].lh_lock)
790Sstevel@tonic-gate
800Sstevel@tonic-gate /*
810Sstevel@tonic-gate * The bucket in the hashtable that the given vnode hashes to.
820Sstevel@tonic-gate */
830Sstevel@tonic-gate #define TABLE_BUCKET(vp, li) \
840Sstevel@tonic-gate ((li)->li_hashtable[ltablehash((vp), (li)->li_htsize)].lh_chain)
850Sstevel@tonic-gate
860Sstevel@tonic-gate /*
870Sstevel@tonic-gate * Number of elements currently in the bucket that the vnode hashes to.
880Sstevel@tonic-gate */
890Sstevel@tonic-gate #define TABLE_COUNT(vp, li) \
900Sstevel@tonic-gate ((li)->li_hashtable[ltablehash((vp), (li)->li_htsize)].lh_count)
910Sstevel@tonic-gate
920Sstevel@tonic-gate /*
930Sstevel@tonic-gate * Grab/Drop the lock for the bucket this vnode hashes to.
940Sstevel@tonic-gate */
950Sstevel@tonic-gate #define TABLE_LOCK_ENTER(vp, li) table_lock_enter(vp, li)
960Sstevel@tonic-gate #define TABLE_LOCK_EXIT(vp, li) \
970Sstevel@tonic-gate mutex_exit(&(li)->li_hashtable[ltablehash((vp), \
980Sstevel@tonic-gate (li)->li_htsize)].lh_lock)
990Sstevel@tonic-gate
1000Sstevel@tonic-gate static lnode_t *lfind(struct vnode *, struct loinfo *);
1010Sstevel@tonic-gate static void lsave(lnode_t *, struct loinfo *);
1020Sstevel@tonic-gate static struct vfs *makelfsnode(struct vfs *, struct loinfo *);
1030Sstevel@tonic-gate static struct lfsnode *lfsfind(struct vfs *, struct loinfo *);
1040Sstevel@tonic-gate
1050Sstevel@tonic-gate uint_t lo_resize_threshold = 1;
1060Sstevel@tonic-gate uint_t lo_resize_factor = 2;
1070Sstevel@tonic-gate
1080Sstevel@tonic-gate static kmem_cache_t *lnode_cache;
1090Sstevel@tonic-gate
1100Sstevel@tonic-gate /*
1110Sstevel@tonic-gate * Since the hashtable itself isn't protected by a lock, obtaining a
1120Sstevel@tonic-gate * per-bucket lock proceeds as follows:
1130Sstevel@tonic-gate *
1140Sstevel@tonic-gate * (a) li->li_htlock protects li->li_hashtable, li->li_htsize, and
1150Sstevel@tonic-gate * li->li_retired.
1160Sstevel@tonic-gate *
1170Sstevel@tonic-gate * (b) Per-bucket locks (lh_lock) protect the contents of the bucket.
1180Sstevel@tonic-gate *
1190Sstevel@tonic-gate * (c) Locking order for resizing the hashtable is li_htlock then
1200Sstevel@tonic-gate * lh_lock.
1210Sstevel@tonic-gate *
1220Sstevel@tonic-gate * To grab the bucket lock we:
1230Sstevel@tonic-gate *
1240Sstevel@tonic-gate * (1) Stash away the htsize and the pointer to the hashtable to make
1250Sstevel@tonic-gate * sure neither change while we're using them.
1260Sstevel@tonic-gate *
1270Sstevel@tonic-gate * (2) lgrow() updates the pointer to the hashtable before it updates
1280Sstevel@tonic-gate * the size: the worst case scenario is that we have the wrong size (but
1290Sstevel@tonic-gate * the correct table), so we hash to the wrong bucket, grab the wrong
1300Sstevel@tonic-gate * lock, and then realize that things have changed, rewind and start
1310Sstevel@tonic-gate * again. If both the size and the table changed since we loaded them,
1320Sstevel@tonic-gate * we'll realize that too and restart.
1330Sstevel@tonic-gate *
1340Sstevel@tonic-gate * (3) The protocol for growing the hashtable involves holding *all* the
1350Sstevel@tonic-gate * locks in the table, hence the unlocking code (TABLE_LOCK_EXIT())
1360Sstevel@tonic-gate * doesn't need to do any dances, since neither the table nor the size
1370Sstevel@tonic-gate * can change while any bucket lock is held.
1380Sstevel@tonic-gate *
1390Sstevel@tonic-gate * (4) If the hashtable is growing (by thread t1) while another thread
1400Sstevel@tonic-gate * (t2) is trying to grab a bucket lock, t2 might have a stale reference
1410Sstevel@tonic-gate * to li->li_htsize:
1420Sstevel@tonic-gate *
1430Sstevel@tonic-gate * - t1 grabs all locks in lgrow()
1440Sstevel@tonic-gate * - t2 loads li->li_htsize and li->li_hashtable
1450Sstevel@tonic-gate * - t1 changes li->hashtable
1460Sstevel@tonic-gate * - t2 loads from an offset in the "stale" hashtable and tries to grab
1470Sstevel@tonic-gate * the relevant mutex.
1480Sstevel@tonic-gate *
1490Sstevel@tonic-gate * If t1 had free'd the stale hashtable, t2 would be in trouble. Hence,
1500Sstevel@tonic-gate * stale hashtables are not freed but stored in a list of "retired"
1510Sstevel@tonic-gate * hashtables, which is emptied when the filesystem is unmounted.
1520Sstevel@tonic-gate */
1530Sstevel@tonic-gate static void
table_lock_enter(vnode_t * vp,struct loinfo * li)1540Sstevel@tonic-gate table_lock_enter(vnode_t *vp, struct loinfo *li)
1550Sstevel@tonic-gate {
1560Sstevel@tonic-gate struct lobucket *chain;
1570Sstevel@tonic-gate uint_t htsize;
1580Sstevel@tonic-gate uint_t hash;
1590Sstevel@tonic-gate
1600Sstevel@tonic-gate for (;;) {
1610Sstevel@tonic-gate htsize = li->li_htsize;
1620Sstevel@tonic-gate membar_consumer();
1630Sstevel@tonic-gate chain = (struct lobucket *)li->li_hashtable;
1640Sstevel@tonic-gate hash = ltablehash(vp, htsize);
1650Sstevel@tonic-gate mutex_enter(&chain[hash].lh_lock);
1660Sstevel@tonic-gate if (li->li_hashtable == chain && li->li_htsize == htsize)
1670Sstevel@tonic-gate break;
1680Sstevel@tonic-gate mutex_exit(&chain[hash].lh_lock);
1690Sstevel@tonic-gate }
1700Sstevel@tonic-gate }
1710Sstevel@tonic-gate
1720Sstevel@tonic-gate void
lofs_subrinit(void)1730Sstevel@tonic-gate lofs_subrinit(void)
1740Sstevel@tonic-gate {
1750Sstevel@tonic-gate /*
1760Sstevel@tonic-gate * Initialize the cache.
1770Sstevel@tonic-gate */
1780Sstevel@tonic-gate lnode_cache = kmem_cache_create("lnode_cache", sizeof (lnode_t),
1790Sstevel@tonic-gate 0, NULL, NULL, NULL, NULL, NULL, 0);
1800Sstevel@tonic-gate }
1810Sstevel@tonic-gate
1820Sstevel@tonic-gate void
lofs_subrfini(void)1830Sstevel@tonic-gate lofs_subrfini(void)
1840Sstevel@tonic-gate {
1850Sstevel@tonic-gate kmem_cache_destroy(lnode_cache);
1860Sstevel@tonic-gate }
1870Sstevel@tonic-gate
1880Sstevel@tonic-gate /*
1890Sstevel@tonic-gate * Initialize a (struct loinfo), and initialize the hashtable to have
1900Sstevel@tonic-gate * htsize buckets.
1910Sstevel@tonic-gate */
1920Sstevel@tonic-gate void
lsetup(struct loinfo * li,uint_t htsize)1930Sstevel@tonic-gate lsetup(struct loinfo *li, uint_t htsize)
1940Sstevel@tonic-gate {
1950Sstevel@tonic-gate li->li_refct = 0;
1960Sstevel@tonic-gate li->li_lfs = NULL;
1970Sstevel@tonic-gate if (htsize == 0)
1980Sstevel@tonic-gate htsize = LOFS_DEFAULT_HTSIZE;
1990Sstevel@tonic-gate li->li_htsize = htsize;
2000Sstevel@tonic-gate li->li_hashtable = kmem_zalloc(htsize * sizeof (*li->li_hashtable),
2010Sstevel@tonic-gate KM_SLEEP);
2020Sstevel@tonic-gate mutex_init(&li->li_lfslock, NULL, MUTEX_DEFAULT, NULL);
2030Sstevel@tonic-gate mutex_init(&li->li_htlock, NULL, MUTEX_DEFAULT, NULL);
2040Sstevel@tonic-gate li->li_retired = NULL;
2050Sstevel@tonic-gate }
2060Sstevel@tonic-gate
2070Sstevel@tonic-gate /*
2080Sstevel@tonic-gate * Destroy a (struct loinfo)
2090Sstevel@tonic-gate */
2100Sstevel@tonic-gate void
ldestroy(struct loinfo * li)2110Sstevel@tonic-gate ldestroy(struct loinfo *li)
2120Sstevel@tonic-gate {
2130Sstevel@tonic-gate uint_t i, htsize;
2140Sstevel@tonic-gate struct lobucket *table;
2150Sstevel@tonic-gate struct lo_retired_ht *lrhp, *trhp;
2160Sstevel@tonic-gate
2170Sstevel@tonic-gate mutex_destroy(&li->li_htlock);
2180Sstevel@tonic-gate mutex_destroy(&li->li_lfslock);
2190Sstevel@tonic-gate htsize = li->li_htsize;
2200Sstevel@tonic-gate table = li->li_hashtable;
2210Sstevel@tonic-gate for (i = 0; i < htsize; i++)
2220Sstevel@tonic-gate mutex_destroy(&table[i].lh_lock);
2230Sstevel@tonic-gate kmem_free(table, htsize * sizeof (*li->li_hashtable));
2240Sstevel@tonic-gate
2250Sstevel@tonic-gate /*
2260Sstevel@tonic-gate * Free the retired hashtables.
2270Sstevel@tonic-gate */
2280Sstevel@tonic-gate lrhp = li->li_retired;
2290Sstevel@tonic-gate while (lrhp != NULL) {
2300Sstevel@tonic-gate trhp = lrhp;
2310Sstevel@tonic-gate lrhp = lrhp->lrh_next;
2320Sstevel@tonic-gate kmem_free(trhp->lrh_table,
2330Sstevel@tonic-gate trhp->lrh_size * sizeof (*li->li_hashtable));
2340Sstevel@tonic-gate kmem_free(trhp, sizeof (*trhp));
2350Sstevel@tonic-gate }
2360Sstevel@tonic-gate li->li_retired = NULL;
2370Sstevel@tonic-gate }
2380Sstevel@tonic-gate
2390Sstevel@tonic-gate /*
2400Sstevel@tonic-gate * Return a looped back vnode for the given vnode.
2410Sstevel@tonic-gate * If no lnode exists for this vnode create one and put it
2420Sstevel@tonic-gate * in a table hashed by vnode. If the lnode for
2430Sstevel@tonic-gate * this vnode is already in the table return it (ref count is
2440Sstevel@tonic-gate * incremented by lfind). The lnode will be flushed from the
245324Sowenr * table when lo_inactive calls freelonode. The creation of
246324Sowenr * a new lnode can be forced via the LOF_FORCE flag even if
247324Sowenr * the vnode exists in the table. This is used in the creation
248324Sowenr * of a terminating lnode when looping is detected. A unique
249324Sowenr * lnode is required for the correct evaluation of the current
250324Sowenr * working directory.
2510Sstevel@tonic-gate * NOTE: vp is assumed to be a held vnode.
2520Sstevel@tonic-gate */
2530Sstevel@tonic-gate struct vnode *
makelonode(struct vnode * vp,struct loinfo * li,int flag)254324Sowenr makelonode(struct vnode *vp, struct loinfo *li, int flag)
2550Sstevel@tonic-gate {
2560Sstevel@tonic-gate lnode_t *lp, *tlp;
2570Sstevel@tonic-gate struct vfs *vfsp;
2580Sstevel@tonic-gate vnode_t *nvp;
2590Sstevel@tonic-gate
260324Sowenr lp = NULL;
2610Sstevel@tonic-gate TABLE_LOCK_ENTER(vp, li);
262324Sowenr if (flag != LOF_FORCE)
263324Sowenr lp = lfind(vp, li);
264324Sowenr if ((flag == LOF_FORCE) || (lp == NULL)) {
2650Sstevel@tonic-gate /*
2660Sstevel@tonic-gate * Optimistically assume that we won't need to sleep.
2670Sstevel@tonic-gate */
2680Sstevel@tonic-gate lp = kmem_cache_alloc(lnode_cache, KM_NOSLEEP);
2690Sstevel@tonic-gate nvp = vn_alloc(KM_NOSLEEP);
2700Sstevel@tonic-gate if (lp == NULL || nvp == NULL) {
2710Sstevel@tonic-gate TABLE_LOCK_EXIT(vp, li);
2720Sstevel@tonic-gate /* The lnode allocation may have succeeded, save it */
2730Sstevel@tonic-gate tlp = lp;
2740Sstevel@tonic-gate if (tlp == NULL) {
2750Sstevel@tonic-gate tlp = kmem_cache_alloc(lnode_cache, KM_SLEEP);
2760Sstevel@tonic-gate }
2770Sstevel@tonic-gate if (nvp == NULL) {
2780Sstevel@tonic-gate nvp = vn_alloc(KM_SLEEP);
2790Sstevel@tonic-gate }
280324Sowenr lp = NULL;
2810Sstevel@tonic-gate TABLE_LOCK_ENTER(vp, li);
282324Sowenr if (flag != LOF_FORCE)
283324Sowenr lp = lfind(vp, li);
284324Sowenr if (lp != NULL) {
2850Sstevel@tonic-gate kmem_cache_free(lnode_cache, tlp);
2860Sstevel@tonic-gate vn_free(nvp);
2870Sstevel@tonic-gate VN_RELE(vp);
2880Sstevel@tonic-gate goto found_lnode;
2890Sstevel@tonic-gate }
2900Sstevel@tonic-gate lp = tlp;
2910Sstevel@tonic-gate }
2920Sstevel@tonic-gate atomic_add_32(&li->li_refct, 1);
2930Sstevel@tonic-gate vfsp = makelfsnode(vp->v_vfsp, li);
2940Sstevel@tonic-gate lp->lo_vnode = nvp;
2950Sstevel@tonic-gate VN_SET_VFS_TYPE_DEV(nvp, vfsp, vp->v_type, vp->v_rdev);
2960Sstevel@tonic-gate nvp->v_flag |= (vp->v_flag & (VNOMOUNT|VNOMAP|VDIROPEN));
2970Sstevel@tonic-gate vn_setops(nvp, lo_vnodeops);
2980Sstevel@tonic-gate nvp->v_data = (caddr_t)lp;
2990Sstevel@tonic-gate lp->lo_vp = vp;
3000Sstevel@tonic-gate lp->lo_looping = 0;
3010Sstevel@tonic-gate lsave(lp, li);
3020Sstevel@tonic-gate vn_exists(vp);
3030Sstevel@tonic-gate } else {
3040Sstevel@tonic-gate VN_RELE(vp);
3050Sstevel@tonic-gate }
3060Sstevel@tonic-gate
3070Sstevel@tonic-gate found_lnode:
3080Sstevel@tonic-gate TABLE_LOCK_EXIT(vp, li);
3090Sstevel@tonic-gate return (ltov(lp));
3100Sstevel@tonic-gate }
3110Sstevel@tonic-gate
3120Sstevel@tonic-gate /*
3130Sstevel@tonic-gate * Get/Make vfs structure for given real vfs
3140Sstevel@tonic-gate */
3150Sstevel@tonic-gate static struct vfs *
makelfsnode(struct vfs * vfsp,struct loinfo * li)3160Sstevel@tonic-gate makelfsnode(struct vfs *vfsp, struct loinfo *li)
3170Sstevel@tonic-gate {
3180Sstevel@tonic-gate struct lfsnode *lfs;
3190Sstevel@tonic-gate struct lfsnode *tlfs;
3200Sstevel@tonic-gate
3210Sstevel@tonic-gate /*
3220Sstevel@tonic-gate * Don't grab any locks for the fast (common) case.
3230Sstevel@tonic-gate */
3240Sstevel@tonic-gate if (vfsp == li->li_realvfs)
3250Sstevel@tonic-gate return (li->li_mountvfs);
3260Sstevel@tonic-gate ASSERT(li->li_refct > 0);
3270Sstevel@tonic-gate mutex_enter(&li->li_lfslock);
3280Sstevel@tonic-gate if ((lfs = lfsfind(vfsp, li)) == NULL) {
3290Sstevel@tonic-gate mutex_exit(&li->li_lfslock);
3300Sstevel@tonic-gate lfs = kmem_zalloc(sizeof (*lfs), KM_SLEEP);
3310Sstevel@tonic-gate mutex_enter(&li->li_lfslock);
3320Sstevel@tonic-gate if ((tlfs = lfsfind(vfsp, li)) != NULL) {
3330Sstevel@tonic-gate kmem_free(lfs, sizeof (*lfs));
3340Sstevel@tonic-gate lfs = tlfs;
3350Sstevel@tonic-gate goto found_lfs;
3360Sstevel@tonic-gate }
3370Sstevel@tonic-gate lfs->lfs_realvfs = vfsp;
3380Sstevel@tonic-gate
3390Sstevel@tonic-gate /*
3400Sstevel@tonic-gate * Even though the lfsnode is strictly speaking a private
3410Sstevel@tonic-gate * implementation detail of lofs, it should behave as a regular
3420Sstevel@tonic-gate * vfs_t for the benefit of the rest of the kernel.
3430Sstevel@tonic-gate */
3440Sstevel@tonic-gate VFS_INIT(&lfs->lfs_vfs, lo_vfsops, (caddr_t)li);
3450Sstevel@tonic-gate lfs->lfs_vfs.vfs_fstype = li->li_mountvfs->vfs_fstype;
3460Sstevel@tonic-gate lfs->lfs_vfs.vfs_flag =
347*6224Smarks ((vfsp->vfs_flag | li->li_mflag) & ~li->li_dflag) &
348*6224Smarks INHERIT_VFS_FLAG;
3490Sstevel@tonic-gate lfs->lfs_vfs.vfs_bsize = vfsp->vfs_bsize;
3500Sstevel@tonic-gate lfs->lfs_vfs.vfs_dev = vfsp->vfs_dev;
3510Sstevel@tonic-gate lfs->lfs_vfs.vfs_fsid = vfsp->vfs_fsid;
3520Sstevel@tonic-gate
3530Sstevel@tonic-gate if (vfsp->vfs_mntpt != NULL) {
3540Sstevel@tonic-gate lfs->lfs_vfs.vfs_mntpt = vfs_getmntpoint(vfsp);
3550Sstevel@tonic-gate /* Leave a reference to the mountpoint */
3560Sstevel@tonic-gate }
3570Sstevel@tonic-gate
3580Sstevel@tonic-gate (void) VFS_ROOT(vfsp, &lfs->lfs_realrootvp);
3590Sstevel@tonic-gate
3600Sstevel@tonic-gate /*
3610Sstevel@tonic-gate * We use 1 instead of 0 as the value to associate with
3620Sstevel@tonic-gate * an idle lfs_vfs. This is to prevent VFS_RELE()
3630Sstevel@tonic-gate * trying to kmem_free() our lfs_t (which is the wrong
3640Sstevel@tonic-gate * size).
3650Sstevel@tonic-gate */
3660Sstevel@tonic-gate VFS_HOLD(&lfs->lfs_vfs);
3670Sstevel@tonic-gate lfs->lfs_next = li->li_lfs;
3680Sstevel@tonic-gate li->li_lfs = lfs;
369*6224Smarks vfs_propagate_features(vfsp, &lfs->lfs_vfs);
3700Sstevel@tonic-gate }
3710Sstevel@tonic-gate
3720Sstevel@tonic-gate found_lfs:
3730Sstevel@tonic-gate VFS_HOLD(&lfs->lfs_vfs);
3740Sstevel@tonic-gate mutex_exit(&li->li_lfslock);
3750Sstevel@tonic-gate return (&lfs->lfs_vfs);
3760Sstevel@tonic-gate }
3770Sstevel@tonic-gate
3780Sstevel@tonic-gate /*
3790Sstevel@tonic-gate * Free lfs node since no longer in use
3800Sstevel@tonic-gate */
3810Sstevel@tonic-gate static void
freelfsnode(struct lfsnode * lfs,struct loinfo * li)3820Sstevel@tonic-gate freelfsnode(struct lfsnode *lfs, struct loinfo *li)
3830Sstevel@tonic-gate {
3840Sstevel@tonic-gate struct lfsnode *prev = NULL;
3850Sstevel@tonic-gate struct lfsnode *this;
3860Sstevel@tonic-gate
3870Sstevel@tonic-gate ASSERT(MUTEX_HELD(&li->li_lfslock));
3880Sstevel@tonic-gate ASSERT(li->li_refct > 0);
3890Sstevel@tonic-gate for (this = li->li_lfs; this != NULL; this = this->lfs_next) {
3900Sstevel@tonic-gate if (this == lfs) {
3910Sstevel@tonic-gate ASSERT(lfs->lfs_vfs.vfs_count == 1);
3920Sstevel@tonic-gate if (prev == NULL)
3930Sstevel@tonic-gate li->li_lfs = lfs->lfs_next;
3940Sstevel@tonic-gate else
3950Sstevel@tonic-gate prev->lfs_next = lfs->lfs_next;
3960Sstevel@tonic-gate if (lfs->lfs_realrootvp != NULL) {
3970Sstevel@tonic-gate VN_RELE(lfs->lfs_realrootvp);
3980Sstevel@tonic-gate }
3990Sstevel@tonic-gate if (lfs->lfs_vfs.vfs_mntpt != NULL)
4000Sstevel@tonic-gate refstr_rele(lfs->lfs_vfs.vfs_mntpt);
4011925Srsb if (lfs->lfs_vfs.vfs_implp != NULL) {
4021925Srsb ASSERT(lfs->lfs_vfs.vfs_femhead == NULL);
4031925Srsb ASSERT(lfs->lfs_vfs.vfs_vskap == NULL);
4041925Srsb ASSERT(lfs->lfs_vfs.vfs_fstypevsp == NULL);
4051925Srsb kmem_free(lfs->lfs_vfs.vfs_implp,
4061925Srsb sizeof (vfs_impl_t));
4071925Srsb }
4080Sstevel@tonic-gate sema_destroy(&lfs->lfs_vfs.vfs_reflock);
4090Sstevel@tonic-gate kmem_free(lfs, sizeof (struct lfsnode));
4100Sstevel@tonic-gate return;
4110Sstevel@tonic-gate }
4120Sstevel@tonic-gate prev = this;
4130Sstevel@tonic-gate }
4140Sstevel@tonic-gate panic("freelfsnode");
4150Sstevel@tonic-gate /*NOTREACHED*/
4160Sstevel@tonic-gate }
4170Sstevel@tonic-gate
4180Sstevel@tonic-gate /*
4190Sstevel@tonic-gate * Find lfs given real vfs and mount instance(li)
4200Sstevel@tonic-gate */
4210Sstevel@tonic-gate static struct lfsnode *
lfsfind(struct vfs * vfsp,struct loinfo * li)4220Sstevel@tonic-gate lfsfind(struct vfs *vfsp, struct loinfo *li)
4230Sstevel@tonic-gate {
4240Sstevel@tonic-gate struct lfsnode *lfs;
4250Sstevel@tonic-gate
4260Sstevel@tonic-gate ASSERT(MUTEX_HELD(&li->li_lfslock));
4270Sstevel@tonic-gate
4280Sstevel@tonic-gate /*
4290Sstevel@tonic-gate * We need to handle the case where a UFS filesystem was forced
4300Sstevel@tonic-gate * unmounted and then a subsequent mount got the same vfs
4310Sstevel@tonic-gate * structure. If the new mount lies in the lofs hierarchy, then
4320Sstevel@tonic-gate * this will confuse lofs, because the original vfsp (of the
4330Sstevel@tonic-gate * forced unmounted filesystem) is still around. We check for
4340Sstevel@tonic-gate * this condition here.
4350Sstevel@tonic-gate *
4360Sstevel@tonic-gate * If we find a cache vfsp hit, then we check to see if the
4370Sstevel@tonic-gate * cached filesystem was forced unmounted. Skip all such
4380Sstevel@tonic-gate * entries. This should be safe to do since no
4390Sstevel@tonic-gate * makelonode()->makelfsnode()->lfsfind() calls should be
4400Sstevel@tonic-gate * generated for such force-unmounted filesystems (because (ufs)
4410Sstevel@tonic-gate * lookup would've returned an error).
4420Sstevel@tonic-gate */
4430Sstevel@tonic-gate for (lfs = li->li_lfs; lfs != NULL; lfs = lfs->lfs_next) {
4440Sstevel@tonic-gate if (lfs->lfs_realvfs == vfsp) {
4450Sstevel@tonic-gate struct vnode *realvp;
4460Sstevel@tonic-gate
4470Sstevel@tonic-gate realvp = lfs->lfs_realrootvp;
4480Sstevel@tonic-gate if (realvp == NULL)
4490Sstevel@tonic-gate continue;
4500Sstevel@tonic-gate if (realvp->v_vfsp == NULL || realvp->v_type == VBAD)
4510Sstevel@tonic-gate continue;
4520Sstevel@tonic-gate return (lfs);
4530Sstevel@tonic-gate }
4540Sstevel@tonic-gate }
4550Sstevel@tonic-gate return (NULL);
4560Sstevel@tonic-gate }
4570Sstevel@tonic-gate
4580Sstevel@tonic-gate /*
4590Sstevel@tonic-gate * Find real vfs given loopback vfs
4600Sstevel@tonic-gate */
4610Sstevel@tonic-gate struct vfs *
lo_realvfs(struct vfs * vfsp,struct vnode ** realrootvpp)4620Sstevel@tonic-gate lo_realvfs(struct vfs *vfsp, struct vnode **realrootvpp)
4630Sstevel@tonic-gate {
4640Sstevel@tonic-gate struct loinfo *li = vtoli(vfsp);
4650Sstevel@tonic-gate struct lfsnode *lfs;
4660Sstevel@tonic-gate
4670Sstevel@tonic-gate ASSERT(li->li_refct > 0);
4680Sstevel@tonic-gate if (vfsp == li->li_mountvfs) {
4690Sstevel@tonic-gate if (realrootvpp != NULL)
4700Sstevel@tonic-gate *realrootvpp = vtol(li->li_rootvp)->lo_vp;
4710Sstevel@tonic-gate return (li->li_realvfs);
4720Sstevel@tonic-gate }
4730Sstevel@tonic-gate mutex_enter(&li->li_lfslock);
4740Sstevel@tonic-gate for (lfs = li->li_lfs; lfs != NULL; lfs = lfs->lfs_next) {
4750Sstevel@tonic-gate if (vfsp == &lfs->lfs_vfs) {
4760Sstevel@tonic-gate if (realrootvpp != NULL)
4770Sstevel@tonic-gate *realrootvpp = lfs->lfs_realrootvp;
4780Sstevel@tonic-gate mutex_exit(&li->li_lfslock);
4790Sstevel@tonic-gate return (lfs->lfs_realvfs);
4800Sstevel@tonic-gate }
4810Sstevel@tonic-gate }
4820Sstevel@tonic-gate panic("lo_realvfs");
4830Sstevel@tonic-gate /*NOTREACHED*/
4840Sstevel@tonic-gate }
4850Sstevel@tonic-gate
4860Sstevel@tonic-gate /*
4870Sstevel@tonic-gate * Lnode lookup stuff.
4880Sstevel@tonic-gate * These routines maintain a table of lnodes hashed by vp so
4890Sstevel@tonic-gate * that the lnode for a vp can be found if it already exists.
4900Sstevel@tonic-gate *
4910Sstevel@tonic-gate * NB: A lofs shadow vnode causes exactly one VN_HOLD() on the
4920Sstevel@tonic-gate * underlying vnode.
4930Sstevel@tonic-gate */
4940Sstevel@tonic-gate
4950Sstevel@tonic-gate /*
4960Sstevel@tonic-gate * Retire old hashtables.
4970Sstevel@tonic-gate */
4980Sstevel@tonic-gate static void
lretire(struct loinfo * li,struct lobucket * table,uint_t size)4990Sstevel@tonic-gate lretire(struct loinfo *li, struct lobucket *table, uint_t size)
5000Sstevel@tonic-gate {
5010Sstevel@tonic-gate struct lo_retired_ht *lrhp;
5020Sstevel@tonic-gate
5030Sstevel@tonic-gate lrhp = kmem_alloc(sizeof (*lrhp), KM_SLEEP);
5040Sstevel@tonic-gate lrhp->lrh_table = table;
5050Sstevel@tonic-gate lrhp->lrh_size = size;
5060Sstevel@tonic-gate
5070Sstevel@tonic-gate mutex_enter(&li->li_htlock);
5080Sstevel@tonic-gate lrhp->lrh_next = li->li_retired;
5090Sstevel@tonic-gate li->li_retired = lrhp;
5100Sstevel@tonic-gate mutex_exit(&li->li_htlock);
5110Sstevel@tonic-gate }
5120Sstevel@tonic-gate
5130Sstevel@tonic-gate /*
5140Sstevel@tonic-gate * Grow the hashtable.
5150Sstevel@tonic-gate */
5160Sstevel@tonic-gate static void
lgrow(struct loinfo * li,uint_t newsize)5170Sstevel@tonic-gate lgrow(struct loinfo *li, uint_t newsize)
5180Sstevel@tonic-gate {
5190Sstevel@tonic-gate uint_t oldsize;
5200Sstevel@tonic-gate uint_t i;
5210Sstevel@tonic-gate struct lobucket *oldtable, *newtable;
5220Sstevel@tonic-gate
5230Sstevel@tonic-gate /*
5240Sstevel@tonic-gate * It's OK to not have enough memory to resize the hashtable.
5250Sstevel@tonic-gate * We'll go down this path the next time we add something to the
5260Sstevel@tonic-gate * table, and retry the allocation then.
5270Sstevel@tonic-gate */
5280Sstevel@tonic-gate if ((newtable = kmem_zalloc(newsize * sizeof (*li->li_hashtable),
5290Sstevel@tonic-gate KM_NOSLEEP)) == NULL)
5300Sstevel@tonic-gate return;
5310Sstevel@tonic-gate
5320Sstevel@tonic-gate mutex_enter(&li->li_htlock);
5330Sstevel@tonic-gate if (newsize <= li->li_htsize) {
5340Sstevel@tonic-gate mutex_exit(&li->li_htlock);
5350Sstevel@tonic-gate kmem_free(newtable, newsize * sizeof (*li->li_hashtable));
5360Sstevel@tonic-gate return;
5370Sstevel@tonic-gate }
5380Sstevel@tonic-gate oldsize = li->li_htsize;
5390Sstevel@tonic-gate oldtable = li->li_hashtable;
5400Sstevel@tonic-gate
5410Sstevel@tonic-gate /*
5420Sstevel@tonic-gate * Grab all locks so TABLE_LOCK_ENTER() calls block until the
5430Sstevel@tonic-gate * resize is complete.
5440Sstevel@tonic-gate */
5450Sstevel@tonic-gate for (i = 0; i < oldsize; i++)
5460Sstevel@tonic-gate mutex_enter(&oldtable[i].lh_lock);
5470Sstevel@tonic-gate /*
5480Sstevel@tonic-gate * li->li_hashtable gets set before li->li_htsize, so in the
5490Sstevel@tonic-gate * time between the two assignments, callers of
5500Sstevel@tonic-gate * TABLE_LOCK_ENTER() cannot hash to a bucket beyond oldsize,
5510Sstevel@tonic-gate * hence we only need to grab the locks up to oldsize.
5520Sstevel@tonic-gate */
5530Sstevel@tonic-gate for (i = 0; i < oldsize; i++)
5540Sstevel@tonic-gate mutex_enter(&newtable[i].lh_lock);
5550Sstevel@tonic-gate /*
5560Sstevel@tonic-gate * Rehash.
5570Sstevel@tonic-gate */
5580Sstevel@tonic-gate for (i = 0; i < oldsize; i++) {
5590Sstevel@tonic-gate lnode_t *tlp, *nlp;
5600Sstevel@tonic-gate
5610Sstevel@tonic-gate for (tlp = oldtable[i].lh_chain; tlp != NULL; tlp = nlp) {
5620Sstevel@tonic-gate uint_t hash = ltablehash(tlp->lo_vp, newsize);
5630Sstevel@tonic-gate
5640Sstevel@tonic-gate nlp = tlp->lo_next;
5650Sstevel@tonic-gate tlp->lo_next = newtable[hash].lh_chain;
5660Sstevel@tonic-gate newtable[hash].lh_chain = tlp;
5670Sstevel@tonic-gate newtable[hash].lh_count++;
5680Sstevel@tonic-gate }
5690Sstevel@tonic-gate }
5700Sstevel@tonic-gate
5710Sstevel@tonic-gate /*
5720Sstevel@tonic-gate * As soon as we store the new hashtable, future locking operations
5730Sstevel@tonic-gate * will use it. Therefore, we must ensure that all the state we've
5740Sstevel@tonic-gate * just established reaches global visibility before the new hashtable
5750Sstevel@tonic-gate * does.
5760Sstevel@tonic-gate */
5770Sstevel@tonic-gate membar_producer();
5780Sstevel@tonic-gate li->li_hashtable = newtable;
5790Sstevel@tonic-gate
5800Sstevel@tonic-gate /*
5810Sstevel@tonic-gate * table_lock_enter() relies on the fact that li->li_hashtable
5820Sstevel@tonic-gate * is set to its new value before li->li_htsize.
5830Sstevel@tonic-gate */
5840Sstevel@tonic-gate membar_producer();
5850Sstevel@tonic-gate li->li_htsize = newsize;
5860Sstevel@tonic-gate
5870Sstevel@tonic-gate /*
5880Sstevel@tonic-gate * The new state is consistent now, so we can drop all the locks.
5890Sstevel@tonic-gate */
5900Sstevel@tonic-gate for (i = 0; i < oldsize; i++) {
5910Sstevel@tonic-gate mutex_exit(&newtable[i].lh_lock);
5920Sstevel@tonic-gate mutex_exit(&oldtable[i].lh_lock);
5930Sstevel@tonic-gate }
5940Sstevel@tonic-gate mutex_exit(&li->li_htlock);
5950Sstevel@tonic-gate
5960Sstevel@tonic-gate lretire(li, oldtable, oldsize);
5970Sstevel@tonic-gate }
5980Sstevel@tonic-gate
5990Sstevel@tonic-gate /*
6000Sstevel@tonic-gate * Put a lnode in the table
6010Sstevel@tonic-gate */
6020Sstevel@tonic-gate static void
lsave(lnode_t * lp,struct loinfo * li)6030Sstevel@tonic-gate lsave(lnode_t *lp, struct loinfo *li)
6040Sstevel@tonic-gate {
6050Sstevel@tonic-gate ASSERT(lp->lo_vp);
6060Sstevel@tonic-gate ASSERT(MUTEX_HELD(TABLE_LOCK(lp->lo_vp, li)));
6070Sstevel@tonic-gate
6080Sstevel@tonic-gate #ifdef LODEBUG
6090Sstevel@tonic-gate lo_dprint(4, "lsave lp %p hash %d\n",
610*6224Smarks lp, ltablehash(lp->lo_vp, li));
6110Sstevel@tonic-gate #endif
6120Sstevel@tonic-gate
6130Sstevel@tonic-gate TABLE_COUNT(lp->lo_vp, li)++;
6140Sstevel@tonic-gate lp->lo_next = TABLE_BUCKET(lp->lo_vp, li);
6150Sstevel@tonic-gate TABLE_BUCKET(lp->lo_vp, li) = lp;
6160Sstevel@tonic-gate
6170Sstevel@tonic-gate if (li->li_refct > (li->li_htsize << lo_resize_threshold)) {
6180Sstevel@tonic-gate TABLE_LOCK_EXIT(lp->lo_vp, li);
6190Sstevel@tonic-gate lgrow(li, li->li_htsize << lo_resize_factor);
6200Sstevel@tonic-gate TABLE_LOCK_ENTER(lp->lo_vp, li);
6210Sstevel@tonic-gate }
6220Sstevel@tonic-gate }
6230Sstevel@tonic-gate
6240Sstevel@tonic-gate /*
6250Sstevel@tonic-gate * Our version of vfs_rele() that stops at 1 instead of 0, and calls
6260Sstevel@tonic-gate * freelfsnode() instead of kmem_free().
6270Sstevel@tonic-gate */
6280Sstevel@tonic-gate static void
lfs_rele(struct lfsnode * lfs,struct loinfo * li)6290Sstevel@tonic-gate lfs_rele(struct lfsnode *lfs, struct loinfo *li)
6300Sstevel@tonic-gate {
6310Sstevel@tonic-gate vfs_t *vfsp = &lfs->lfs_vfs;
6320Sstevel@tonic-gate
6330Sstevel@tonic-gate ASSERT(MUTEX_HELD(&li->li_lfslock));
6340Sstevel@tonic-gate ASSERT(vfsp->vfs_count > 1);
6350Sstevel@tonic-gate if (atomic_add_32_nv(&vfsp->vfs_count, -1) == 1)
6360Sstevel@tonic-gate freelfsnode(lfs, li);
6370Sstevel@tonic-gate }
6380Sstevel@tonic-gate
6390Sstevel@tonic-gate /*
6400Sstevel@tonic-gate * Remove a lnode from the table
6410Sstevel@tonic-gate */
6420Sstevel@tonic-gate void
freelonode(lnode_t * lp)6430Sstevel@tonic-gate freelonode(lnode_t *lp)
6440Sstevel@tonic-gate {
6450Sstevel@tonic-gate lnode_t *lt;
6460Sstevel@tonic-gate lnode_t *ltprev = NULL;
6470Sstevel@tonic-gate struct lfsnode *lfs, *nextlfs;
6480Sstevel@tonic-gate struct vfs *vfsp;
6490Sstevel@tonic-gate struct vnode *vp = ltov(lp);
6500Sstevel@tonic-gate struct vnode *realvp = realvp(vp);
6510Sstevel@tonic-gate struct loinfo *li = vtoli(vp->v_vfsp);
6520Sstevel@tonic-gate
6530Sstevel@tonic-gate #ifdef LODEBUG
6540Sstevel@tonic-gate lo_dprint(4, "freelonode lp %p hash %d\n",
655*6224Smarks lp, ltablehash(lp->lo_vp, li));
6560Sstevel@tonic-gate #endif
6570Sstevel@tonic-gate TABLE_LOCK_ENTER(lp->lo_vp, li);
6580Sstevel@tonic-gate
6590Sstevel@tonic-gate mutex_enter(&vp->v_lock);
6600Sstevel@tonic-gate if (vp->v_count > 1) {
6610Sstevel@tonic-gate vp->v_count--; /* release our hold from vn_rele */
6620Sstevel@tonic-gate mutex_exit(&vp->v_lock);
6630Sstevel@tonic-gate TABLE_LOCK_EXIT(lp->lo_vp, li);
6640Sstevel@tonic-gate return;
6650Sstevel@tonic-gate }
6660Sstevel@tonic-gate mutex_exit(&vp->v_lock);
6670Sstevel@tonic-gate
6680Sstevel@tonic-gate for (lt = TABLE_BUCKET(lp->lo_vp, li); lt != NULL;
6690Sstevel@tonic-gate ltprev = lt, lt = lt->lo_next) {
6700Sstevel@tonic-gate if (lt == lp) {
6710Sstevel@tonic-gate #ifdef LODEBUG
6720Sstevel@tonic-gate lo_dprint(4, "freeing %p, vfsp %p\n",
673*6224Smarks vp, vp->v_vfsp);
6740Sstevel@tonic-gate #endif
6750Sstevel@tonic-gate atomic_add_32(&li->li_refct, -1);
6760Sstevel@tonic-gate vfsp = vp->v_vfsp;
6770Sstevel@tonic-gate vn_invalid(vp);
6780Sstevel@tonic-gate if (vfsp != li->li_mountvfs) {
6790Sstevel@tonic-gate mutex_enter(&li->li_lfslock);
6800Sstevel@tonic-gate /*
6810Sstevel@tonic-gate * Check for unused lfs
6820Sstevel@tonic-gate */
6830Sstevel@tonic-gate lfs = li->li_lfs;
6840Sstevel@tonic-gate while (lfs != NULL) {
6850Sstevel@tonic-gate nextlfs = lfs->lfs_next;
6860Sstevel@tonic-gate if (vfsp == &lfs->lfs_vfs) {
6870Sstevel@tonic-gate lfs_rele(lfs, li);
6880Sstevel@tonic-gate break;
6890Sstevel@tonic-gate }
6900Sstevel@tonic-gate if (lfs->lfs_vfs.vfs_count == 1) {
6910Sstevel@tonic-gate /*
6920Sstevel@tonic-gate * Lfs is idle
6930Sstevel@tonic-gate */
6940Sstevel@tonic-gate freelfsnode(lfs, li);
6950Sstevel@tonic-gate }
6960Sstevel@tonic-gate lfs = nextlfs;
6970Sstevel@tonic-gate }
6980Sstevel@tonic-gate mutex_exit(&li->li_lfslock);
6990Sstevel@tonic-gate }
7000Sstevel@tonic-gate if (ltprev == NULL) {
7010Sstevel@tonic-gate TABLE_BUCKET(lt->lo_vp, li) = lt->lo_next;
7020Sstevel@tonic-gate } else {
7030Sstevel@tonic-gate ltprev->lo_next = lt->lo_next;
7040Sstevel@tonic-gate }
7050Sstevel@tonic-gate TABLE_COUNT(lt->lo_vp, li)--;
7060Sstevel@tonic-gate TABLE_LOCK_EXIT(lt->lo_vp, li);
7070Sstevel@tonic-gate kmem_cache_free(lnode_cache, lt);
7080Sstevel@tonic-gate vn_free(vp);
7090Sstevel@tonic-gate VN_RELE(realvp);
7100Sstevel@tonic-gate return;
7110Sstevel@tonic-gate }
7120Sstevel@tonic-gate }
7130Sstevel@tonic-gate panic("freelonode");
7140Sstevel@tonic-gate /*NOTREACHED*/
7150Sstevel@tonic-gate }
7160Sstevel@tonic-gate
7170Sstevel@tonic-gate /*
7180Sstevel@tonic-gate * Lookup a lnode by vp
7190Sstevel@tonic-gate */
7200Sstevel@tonic-gate static lnode_t *
lfind(struct vnode * vp,struct loinfo * li)7210Sstevel@tonic-gate lfind(struct vnode *vp, struct loinfo *li)
7220Sstevel@tonic-gate {
7230Sstevel@tonic-gate lnode_t *lt;
7240Sstevel@tonic-gate
7250Sstevel@tonic-gate ASSERT(MUTEX_HELD(TABLE_LOCK(vp, li)));
7260Sstevel@tonic-gate
7270Sstevel@tonic-gate lt = TABLE_BUCKET(vp, li);
7280Sstevel@tonic-gate while (lt != NULL) {
7290Sstevel@tonic-gate if (lt->lo_vp == vp) {
7300Sstevel@tonic-gate VN_HOLD(ltov(lt));
7310Sstevel@tonic-gate return (lt);
7320Sstevel@tonic-gate }
7330Sstevel@tonic-gate lt = lt->lo_next;
7340Sstevel@tonic-gate }
7350Sstevel@tonic-gate return (NULL);
7360Sstevel@tonic-gate }
7370Sstevel@tonic-gate
7380Sstevel@tonic-gate #ifdef LODEBUG
7390Sstevel@tonic-gate static int lofsdebug;
7400Sstevel@tonic-gate #endif /* LODEBUG */
7410Sstevel@tonic-gate
7420Sstevel@tonic-gate /*
7430Sstevel@tonic-gate * Utilities used by both client and server
7440Sstevel@tonic-gate * Standard levels:
7450Sstevel@tonic-gate * 0) no debugging
7460Sstevel@tonic-gate * 1) hard failures
7470Sstevel@tonic-gate * 2) soft failures
7480Sstevel@tonic-gate * 3) current test software
7490Sstevel@tonic-gate * 4) main procedure entry points
7500Sstevel@tonic-gate * 5) main procedure exit points
7510Sstevel@tonic-gate * 6) utility procedure entry points
7520Sstevel@tonic-gate * 7) utility procedure exit points
7530Sstevel@tonic-gate * 8) obscure procedure entry points
7540Sstevel@tonic-gate * 9) obscure procedure exit points
7550Sstevel@tonic-gate * 10) random stuff
7560Sstevel@tonic-gate * 11) all <= 1
7570Sstevel@tonic-gate * 12) all <= 2
7580Sstevel@tonic-gate * 13) all <= 3
7590Sstevel@tonic-gate * ...
7600Sstevel@tonic-gate */
7610Sstevel@tonic-gate
7620Sstevel@tonic-gate #ifdef LODEBUG
7630Sstevel@tonic-gate /*VARARGS2*/
lo_dprint(level,str,a1,a2,a3,a4,a5,a6,a7,a8,a9)7640Sstevel@tonic-gate lo_dprint(level, str, a1, a2, a3, a4, a5, a6, a7, a8, a9)
7650Sstevel@tonic-gate int level;
7660Sstevel@tonic-gate char *str;
7670Sstevel@tonic-gate int a1, a2, a3, a4, a5, a6, a7, a8, a9;
7680Sstevel@tonic-gate {
7690Sstevel@tonic-gate
7700Sstevel@tonic-gate if (lofsdebug == level || (lofsdebug > 10 && (lofsdebug - 10) >= level))
7710Sstevel@tonic-gate printf(str, a1, a2, a3, a4, a5, a6, a7, a8, a9);
7720Sstevel@tonic-gate }
7730Sstevel@tonic-gate #endif
774