1*0Sstevel@tonic-gate /* 2*0Sstevel@tonic-gate * CDDL HEADER START 3*0Sstevel@tonic-gate * 4*0Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*0Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 6*0Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 7*0Sstevel@tonic-gate * with the License. 8*0Sstevel@tonic-gate * 9*0Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10*0Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 11*0Sstevel@tonic-gate * See the License for the specific language governing permissions 12*0Sstevel@tonic-gate * and limitations under the License. 13*0Sstevel@tonic-gate * 14*0Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 15*0Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16*0Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 17*0Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 18*0Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 19*0Sstevel@tonic-gate * 20*0Sstevel@tonic-gate * CDDL HEADER END 21*0Sstevel@tonic-gate */ 22*0Sstevel@tonic-gate /* 23*0Sstevel@tonic-gate * Copyright 2004 Sun Microsystems, Inc. All rights reserved. 24*0Sstevel@tonic-gate * Use is subject to license terms. 25*0Sstevel@tonic-gate */ 26*0Sstevel@tonic-gate 27*0Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 28*0Sstevel@tonic-gate 29*0Sstevel@tonic-gate /* 30*0Sstevel@tonic-gate * The idea behind composition-based stacked filesystems is to add a 31*0Sstevel@tonic-gate * vnode to the stack of vnodes for each mount. These vnodes have their 32*0Sstevel@tonic-gate * own set of mount options and filesystem-specific functions, so they 33*0Sstevel@tonic-gate * can modify data or operations before they are passed along. Such a 34*0Sstevel@tonic-gate * filesystem must maintain a mapping from the underlying vnodes to its 35*0Sstevel@tonic-gate * interposing vnodes. 36*0Sstevel@tonic-gate * 37*0Sstevel@tonic-gate * In lofs, this mapping is implemented by a hashtable. Each bucket 38*0Sstevel@tonic-gate * contains a count of the number of nodes currently contained, the 39*0Sstevel@tonic-gate * chain of vnodes, and a lock to protect the list of vnodes. The 40*0Sstevel@tonic-gate * hashtable dynamically grows if the number of vnodes in the table as a 41*0Sstevel@tonic-gate * whole exceeds the size of the table left-shifted by 42*0Sstevel@tonic-gate * lo_resize_threshold. In order to minimize lock contention, there is 43*0Sstevel@tonic-gate * no global lock protecting the hashtable, hence obtaining the 44*0Sstevel@tonic-gate * per-bucket locks consists of a dance to make sure we've actually 45*0Sstevel@tonic-gate * locked the correct bucket. Acquiring a bucket lock doesn't involve 46*0Sstevel@tonic-gate * locking the hashtable itself, so we refrain from freeing old 47*0Sstevel@tonic-gate * hashtables, and store them in a linked list of retired hashtables; 48*0Sstevel@tonic-gate * the list is freed when the filesystem is unmounted. 49*0Sstevel@tonic-gate */ 50*0Sstevel@tonic-gate 51*0Sstevel@tonic-gate #include <sys/param.h> 52*0Sstevel@tonic-gate #include <sys/kmem.h> 53*0Sstevel@tonic-gate #include <sys/vfs.h> 54*0Sstevel@tonic-gate #include <sys/vnode.h> 55*0Sstevel@tonic-gate #include <sys/cmn_err.h> 56*0Sstevel@tonic-gate #include <sys/systm.h> 57*0Sstevel@tonic-gate #include <sys/t_lock.h> 58*0Sstevel@tonic-gate #include <sys/debug.h> 59*0Sstevel@tonic-gate #include <sys/atomic.h> 60*0Sstevel@tonic-gate 61*0Sstevel@tonic-gate #include <sys/fs/lofs_node.h> 62*0Sstevel@tonic-gate #include <sys/fs/lofs_info.h> 63*0Sstevel@tonic-gate /* 64*0Sstevel@tonic-gate * Due to the hashing algorithm, the size of the hash table needs to be a 65*0Sstevel@tonic-gate * power of 2. 66*0Sstevel@tonic-gate */ 67*0Sstevel@tonic-gate #define LOFS_DEFAULT_HTSIZE (1 << 6) 68*0Sstevel@tonic-gate 69*0Sstevel@tonic-gate #define ltablehash(vp, tblsz) ((((intptr_t)(vp))>>10) & ((tblsz)-1)) 70*0Sstevel@tonic-gate 71*0Sstevel@tonic-gate /* 72*0Sstevel@tonic-gate * The following macros can only be safely used when the desired bucket 73*0Sstevel@tonic-gate * is already locked. 74*0Sstevel@tonic-gate */ 75*0Sstevel@tonic-gate /* 76*0Sstevel@tonic-gate * The lock in the hashtable associated with the given vnode. 77*0Sstevel@tonic-gate */ 78*0Sstevel@tonic-gate #define TABLE_LOCK(vp, li) \ 79*0Sstevel@tonic-gate (&(li)->li_hashtable[ltablehash((vp), (li)->li_htsize)].lh_lock) 80*0Sstevel@tonic-gate 81*0Sstevel@tonic-gate /* 82*0Sstevel@tonic-gate * The bucket in the hashtable that the given vnode hashes to. 83*0Sstevel@tonic-gate */ 84*0Sstevel@tonic-gate #define TABLE_BUCKET(vp, li) \ 85*0Sstevel@tonic-gate ((li)->li_hashtable[ltablehash((vp), (li)->li_htsize)].lh_chain) 86*0Sstevel@tonic-gate 87*0Sstevel@tonic-gate /* 88*0Sstevel@tonic-gate * Number of elements currently in the bucket that the vnode hashes to. 89*0Sstevel@tonic-gate */ 90*0Sstevel@tonic-gate #define TABLE_COUNT(vp, li) \ 91*0Sstevel@tonic-gate ((li)->li_hashtable[ltablehash((vp), (li)->li_htsize)].lh_count) 92*0Sstevel@tonic-gate 93*0Sstevel@tonic-gate /* 94*0Sstevel@tonic-gate * Grab/Drop the lock for the bucket this vnode hashes to. 95*0Sstevel@tonic-gate */ 96*0Sstevel@tonic-gate #define TABLE_LOCK_ENTER(vp, li) table_lock_enter(vp, li) 97*0Sstevel@tonic-gate #define TABLE_LOCK_EXIT(vp, li) \ 98*0Sstevel@tonic-gate mutex_exit(&(li)->li_hashtable[ltablehash((vp), \ 99*0Sstevel@tonic-gate (li)->li_htsize)].lh_lock) 100*0Sstevel@tonic-gate 101*0Sstevel@tonic-gate static lnode_t *lfind(struct vnode *, struct loinfo *); 102*0Sstevel@tonic-gate static void lsave(lnode_t *, struct loinfo *); 103*0Sstevel@tonic-gate static struct vfs *makelfsnode(struct vfs *, struct loinfo *); 104*0Sstevel@tonic-gate static struct lfsnode *lfsfind(struct vfs *, struct loinfo *); 105*0Sstevel@tonic-gate 106*0Sstevel@tonic-gate uint_t lo_resize_threshold = 1; 107*0Sstevel@tonic-gate uint_t lo_resize_factor = 2; 108*0Sstevel@tonic-gate 109*0Sstevel@tonic-gate static kmem_cache_t *lnode_cache; 110*0Sstevel@tonic-gate 111*0Sstevel@tonic-gate /* 112*0Sstevel@tonic-gate * Since the hashtable itself isn't protected by a lock, obtaining a 113*0Sstevel@tonic-gate * per-bucket lock proceeds as follows: 114*0Sstevel@tonic-gate * 115*0Sstevel@tonic-gate * (a) li->li_htlock protects li->li_hashtable, li->li_htsize, and 116*0Sstevel@tonic-gate * li->li_retired. 117*0Sstevel@tonic-gate * 118*0Sstevel@tonic-gate * (b) Per-bucket locks (lh_lock) protect the contents of the bucket. 119*0Sstevel@tonic-gate * 120*0Sstevel@tonic-gate * (c) Locking order for resizing the hashtable is li_htlock then 121*0Sstevel@tonic-gate * lh_lock. 122*0Sstevel@tonic-gate * 123*0Sstevel@tonic-gate * To grab the bucket lock we: 124*0Sstevel@tonic-gate * 125*0Sstevel@tonic-gate * (1) Stash away the htsize and the pointer to the hashtable to make 126*0Sstevel@tonic-gate * sure neither change while we're using them. 127*0Sstevel@tonic-gate * 128*0Sstevel@tonic-gate * (2) lgrow() updates the pointer to the hashtable before it updates 129*0Sstevel@tonic-gate * the size: the worst case scenario is that we have the wrong size (but 130*0Sstevel@tonic-gate * the correct table), so we hash to the wrong bucket, grab the wrong 131*0Sstevel@tonic-gate * lock, and then realize that things have changed, rewind and start 132*0Sstevel@tonic-gate * again. If both the size and the table changed since we loaded them, 133*0Sstevel@tonic-gate * we'll realize that too and restart. 134*0Sstevel@tonic-gate * 135*0Sstevel@tonic-gate * (3) The protocol for growing the hashtable involves holding *all* the 136*0Sstevel@tonic-gate * locks in the table, hence the unlocking code (TABLE_LOCK_EXIT()) 137*0Sstevel@tonic-gate * doesn't need to do any dances, since neither the table nor the size 138*0Sstevel@tonic-gate * can change while any bucket lock is held. 139*0Sstevel@tonic-gate * 140*0Sstevel@tonic-gate * (4) If the hashtable is growing (by thread t1) while another thread 141*0Sstevel@tonic-gate * (t2) is trying to grab a bucket lock, t2 might have a stale reference 142*0Sstevel@tonic-gate * to li->li_htsize: 143*0Sstevel@tonic-gate * 144*0Sstevel@tonic-gate * - t1 grabs all locks in lgrow() 145*0Sstevel@tonic-gate * - t2 loads li->li_htsize and li->li_hashtable 146*0Sstevel@tonic-gate * - t1 changes li->hashtable 147*0Sstevel@tonic-gate * - t2 loads from an offset in the "stale" hashtable and tries to grab 148*0Sstevel@tonic-gate * the relevant mutex. 149*0Sstevel@tonic-gate * 150*0Sstevel@tonic-gate * If t1 had free'd the stale hashtable, t2 would be in trouble. Hence, 151*0Sstevel@tonic-gate * stale hashtables are not freed but stored in a list of "retired" 152*0Sstevel@tonic-gate * hashtables, which is emptied when the filesystem is unmounted. 153*0Sstevel@tonic-gate */ 154*0Sstevel@tonic-gate static void 155*0Sstevel@tonic-gate table_lock_enter(vnode_t *vp, struct loinfo *li) 156*0Sstevel@tonic-gate { 157*0Sstevel@tonic-gate struct lobucket *chain; 158*0Sstevel@tonic-gate uint_t htsize; 159*0Sstevel@tonic-gate uint_t hash; 160*0Sstevel@tonic-gate 161*0Sstevel@tonic-gate for (;;) { 162*0Sstevel@tonic-gate htsize = li->li_htsize; 163*0Sstevel@tonic-gate membar_consumer(); 164*0Sstevel@tonic-gate chain = (struct lobucket *)li->li_hashtable; 165*0Sstevel@tonic-gate hash = ltablehash(vp, htsize); 166*0Sstevel@tonic-gate mutex_enter(&chain[hash].lh_lock); 167*0Sstevel@tonic-gate if (li->li_hashtable == chain && li->li_htsize == htsize) 168*0Sstevel@tonic-gate break; 169*0Sstevel@tonic-gate mutex_exit(&chain[hash].lh_lock); 170*0Sstevel@tonic-gate } 171*0Sstevel@tonic-gate } 172*0Sstevel@tonic-gate 173*0Sstevel@tonic-gate void 174*0Sstevel@tonic-gate lofs_subrinit(void) 175*0Sstevel@tonic-gate { 176*0Sstevel@tonic-gate /* 177*0Sstevel@tonic-gate * Initialize the cache. 178*0Sstevel@tonic-gate */ 179*0Sstevel@tonic-gate lnode_cache = kmem_cache_create("lnode_cache", sizeof (lnode_t), 180*0Sstevel@tonic-gate 0, NULL, NULL, NULL, NULL, NULL, 0); 181*0Sstevel@tonic-gate } 182*0Sstevel@tonic-gate 183*0Sstevel@tonic-gate void 184*0Sstevel@tonic-gate lofs_subrfini(void) 185*0Sstevel@tonic-gate { 186*0Sstevel@tonic-gate kmem_cache_destroy(lnode_cache); 187*0Sstevel@tonic-gate } 188*0Sstevel@tonic-gate 189*0Sstevel@tonic-gate /* 190*0Sstevel@tonic-gate * Initialize a (struct loinfo), and initialize the hashtable to have 191*0Sstevel@tonic-gate * htsize buckets. 192*0Sstevel@tonic-gate */ 193*0Sstevel@tonic-gate void 194*0Sstevel@tonic-gate lsetup(struct loinfo *li, uint_t htsize) 195*0Sstevel@tonic-gate { 196*0Sstevel@tonic-gate li->li_refct = 0; 197*0Sstevel@tonic-gate li->li_lfs = NULL; 198*0Sstevel@tonic-gate if (htsize == 0) 199*0Sstevel@tonic-gate htsize = LOFS_DEFAULT_HTSIZE; 200*0Sstevel@tonic-gate li->li_htsize = htsize; 201*0Sstevel@tonic-gate li->li_hashtable = kmem_zalloc(htsize * sizeof (*li->li_hashtable), 202*0Sstevel@tonic-gate KM_SLEEP); 203*0Sstevel@tonic-gate mutex_init(&li->li_lfslock, NULL, MUTEX_DEFAULT, NULL); 204*0Sstevel@tonic-gate mutex_init(&li->li_htlock, NULL, MUTEX_DEFAULT, NULL); 205*0Sstevel@tonic-gate li->li_retired = NULL; 206*0Sstevel@tonic-gate } 207*0Sstevel@tonic-gate 208*0Sstevel@tonic-gate /* 209*0Sstevel@tonic-gate * Destroy a (struct loinfo) 210*0Sstevel@tonic-gate */ 211*0Sstevel@tonic-gate void 212*0Sstevel@tonic-gate ldestroy(struct loinfo *li) 213*0Sstevel@tonic-gate { 214*0Sstevel@tonic-gate uint_t i, htsize; 215*0Sstevel@tonic-gate struct lobucket *table; 216*0Sstevel@tonic-gate struct lo_retired_ht *lrhp, *trhp; 217*0Sstevel@tonic-gate 218*0Sstevel@tonic-gate mutex_destroy(&li->li_htlock); 219*0Sstevel@tonic-gate mutex_destroy(&li->li_lfslock); 220*0Sstevel@tonic-gate htsize = li->li_htsize; 221*0Sstevel@tonic-gate table = li->li_hashtable; 222*0Sstevel@tonic-gate for (i = 0; i < htsize; i++) 223*0Sstevel@tonic-gate mutex_destroy(&table[i].lh_lock); 224*0Sstevel@tonic-gate kmem_free(table, htsize * sizeof (*li->li_hashtable)); 225*0Sstevel@tonic-gate 226*0Sstevel@tonic-gate /* 227*0Sstevel@tonic-gate * Free the retired hashtables. 228*0Sstevel@tonic-gate */ 229*0Sstevel@tonic-gate lrhp = li->li_retired; 230*0Sstevel@tonic-gate while (lrhp != NULL) { 231*0Sstevel@tonic-gate trhp = lrhp; 232*0Sstevel@tonic-gate lrhp = lrhp->lrh_next; 233*0Sstevel@tonic-gate kmem_free(trhp->lrh_table, 234*0Sstevel@tonic-gate trhp->lrh_size * sizeof (*li->li_hashtable)); 235*0Sstevel@tonic-gate kmem_free(trhp, sizeof (*trhp)); 236*0Sstevel@tonic-gate } 237*0Sstevel@tonic-gate li->li_retired = NULL; 238*0Sstevel@tonic-gate } 239*0Sstevel@tonic-gate 240*0Sstevel@tonic-gate /* 241*0Sstevel@tonic-gate * Return a looped back vnode for the given vnode. 242*0Sstevel@tonic-gate * If no lnode exists for this vnode create one and put it 243*0Sstevel@tonic-gate * in a table hashed by vnode. If the lnode for 244*0Sstevel@tonic-gate * this vnode is already in the table return it (ref count is 245*0Sstevel@tonic-gate * incremented by lfind). The lnode will be flushed from the 246*0Sstevel@tonic-gate * table when lo_inactive calls freelonode. 247*0Sstevel@tonic-gate * NOTE: vp is assumed to be a held vnode. 248*0Sstevel@tonic-gate */ 249*0Sstevel@tonic-gate struct vnode * 250*0Sstevel@tonic-gate makelonode(struct vnode *vp, struct loinfo *li) 251*0Sstevel@tonic-gate { 252*0Sstevel@tonic-gate lnode_t *lp, *tlp; 253*0Sstevel@tonic-gate struct vfs *vfsp; 254*0Sstevel@tonic-gate vnode_t *nvp; 255*0Sstevel@tonic-gate 256*0Sstevel@tonic-gate TABLE_LOCK_ENTER(vp, li); 257*0Sstevel@tonic-gate if ((lp = lfind(vp, li)) == NULL) { 258*0Sstevel@tonic-gate /* 259*0Sstevel@tonic-gate * Optimistically assume that we won't need to sleep. 260*0Sstevel@tonic-gate */ 261*0Sstevel@tonic-gate lp = kmem_cache_alloc(lnode_cache, KM_NOSLEEP); 262*0Sstevel@tonic-gate nvp = vn_alloc(KM_NOSLEEP); 263*0Sstevel@tonic-gate if (lp == NULL || nvp == NULL) { 264*0Sstevel@tonic-gate TABLE_LOCK_EXIT(vp, li); 265*0Sstevel@tonic-gate /* The lnode allocation may have succeeded, save it */ 266*0Sstevel@tonic-gate tlp = lp; 267*0Sstevel@tonic-gate if (tlp == NULL) { 268*0Sstevel@tonic-gate tlp = kmem_cache_alloc(lnode_cache, KM_SLEEP); 269*0Sstevel@tonic-gate } 270*0Sstevel@tonic-gate if (nvp == NULL) { 271*0Sstevel@tonic-gate nvp = vn_alloc(KM_SLEEP); 272*0Sstevel@tonic-gate } 273*0Sstevel@tonic-gate TABLE_LOCK_ENTER(vp, li); 274*0Sstevel@tonic-gate if ((lp = lfind(vp, li)) != NULL) { 275*0Sstevel@tonic-gate kmem_cache_free(lnode_cache, tlp); 276*0Sstevel@tonic-gate vn_free(nvp); 277*0Sstevel@tonic-gate VN_RELE(vp); 278*0Sstevel@tonic-gate goto found_lnode; 279*0Sstevel@tonic-gate } 280*0Sstevel@tonic-gate lp = tlp; 281*0Sstevel@tonic-gate } 282*0Sstevel@tonic-gate atomic_add_32(&li->li_refct, 1); 283*0Sstevel@tonic-gate vfsp = makelfsnode(vp->v_vfsp, li); 284*0Sstevel@tonic-gate lp->lo_vnode = nvp; 285*0Sstevel@tonic-gate VN_SET_VFS_TYPE_DEV(nvp, vfsp, vp->v_type, vp->v_rdev); 286*0Sstevel@tonic-gate nvp->v_flag |= (vp->v_flag & (VNOMOUNT|VNOMAP|VDIROPEN)); 287*0Sstevel@tonic-gate vn_setops(nvp, lo_vnodeops); 288*0Sstevel@tonic-gate nvp->v_data = (caddr_t)lp; 289*0Sstevel@tonic-gate lp->lo_vp = vp; 290*0Sstevel@tonic-gate lp->lo_looping = 0; 291*0Sstevel@tonic-gate lsave(lp, li); 292*0Sstevel@tonic-gate vn_exists(vp); 293*0Sstevel@tonic-gate } else { 294*0Sstevel@tonic-gate VN_RELE(vp); 295*0Sstevel@tonic-gate } 296*0Sstevel@tonic-gate 297*0Sstevel@tonic-gate found_lnode: 298*0Sstevel@tonic-gate TABLE_LOCK_EXIT(vp, li); 299*0Sstevel@tonic-gate return (ltov(lp)); 300*0Sstevel@tonic-gate } 301*0Sstevel@tonic-gate 302*0Sstevel@tonic-gate /* 303*0Sstevel@tonic-gate * Get/Make vfs structure for given real vfs 304*0Sstevel@tonic-gate */ 305*0Sstevel@tonic-gate static struct vfs * 306*0Sstevel@tonic-gate makelfsnode(struct vfs *vfsp, struct loinfo *li) 307*0Sstevel@tonic-gate { 308*0Sstevel@tonic-gate struct lfsnode *lfs; 309*0Sstevel@tonic-gate struct lfsnode *tlfs; 310*0Sstevel@tonic-gate 311*0Sstevel@tonic-gate /* 312*0Sstevel@tonic-gate * Don't grab any locks for the fast (common) case. 313*0Sstevel@tonic-gate */ 314*0Sstevel@tonic-gate if (vfsp == li->li_realvfs) 315*0Sstevel@tonic-gate return (li->li_mountvfs); 316*0Sstevel@tonic-gate ASSERT(li->li_refct > 0); 317*0Sstevel@tonic-gate mutex_enter(&li->li_lfslock); 318*0Sstevel@tonic-gate if ((lfs = lfsfind(vfsp, li)) == NULL) { 319*0Sstevel@tonic-gate mutex_exit(&li->li_lfslock); 320*0Sstevel@tonic-gate lfs = kmem_zalloc(sizeof (*lfs), KM_SLEEP); 321*0Sstevel@tonic-gate mutex_enter(&li->li_lfslock); 322*0Sstevel@tonic-gate if ((tlfs = lfsfind(vfsp, li)) != NULL) { 323*0Sstevel@tonic-gate kmem_free(lfs, sizeof (*lfs)); 324*0Sstevel@tonic-gate lfs = tlfs; 325*0Sstevel@tonic-gate goto found_lfs; 326*0Sstevel@tonic-gate } 327*0Sstevel@tonic-gate lfs->lfs_realvfs = vfsp; 328*0Sstevel@tonic-gate 329*0Sstevel@tonic-gate /* 330*0Sstevel@tonic-gate * Even though the lfsnode is strictly speaking a private 331*0Sstevel@tonic-gate * implementation detail of lofs, it should behave as a regular 332*0Sstevel@tonic-gate * vfs_t for the benefit of the rest of the kernel. 333*0Sstevel@tonic-gate */ 334*0Sstevel@tonic-gate VFS_INIT(&lfs->lfs_vfs, lo_vfsops, (caddr_t)li); 335*0Sstevel@tonic-gate lfs->lfs_vfs.vfs_fstype = li->li_mountvfs->vfs_fstype; 336*0Sstevel@tonic-gate lfs->lfs_vfs.vfs_flag = 337*0Sstevel@tonic-gate ((vfsp->vfs_flag | li->li_mflag) & ~li->li_dflag) & 338*0Sstevel@tonic-gate INHERIT_VFS_FLAG; 339*0Sstevel@tonic-gate lfs->lfs_vfs.vfs_bsize = vfsp->vfs_bsize; 340*0Sstevel@tonic-gate lfs->lfs_vfs.vfs_dev = vfsp->vfs_dev; 341*0Sstevel@tonic-gate lfs->lfs_vfs.vfs_fsid = vfsp->vfs_fsid; 342*0Sstevel@tonic-gate 343*0Sstevel@tonic-gate if (vfsp->vfs_mntpt != NULL) { 344*0Sstevel@tonic-gate lfs->lfs_vfs.vfs_mntpt = vfs_getmntpoint(vfsp); 345*0Sstevel@tonic-gate /* Leave a reference to the mountpoint */ 346*0Sstevel@tonic-gate } 347*0Sstevel@tonic-gate 348*0Sstevel@tonic-gate (void) VFS_ROOT(vfsp, &lfs->lfs_realrootvp); 349*0Sstevel@tonic-gate 350*0Sstevel@tonic-gate /* 351*0Sstevel@tonic-gate * We use 1 instead of 0 as the value to associate with 352*0Sstevel@tonic-gate * an idle lfs_vfs. This is to prevent VFS_RELE() 353*0Sstevel@tonic-gate * trying to kmem_free() our lfs_t (which is the wrong 354*0Sstevel@tonic-gate * size). 355*0Sstevel@tonic-gate */ 356*0Sstevel@tonic-gate VFS_HOLD(&lfs->lfs_vfs); 357*0Sstevel@tonic-gate lfs->lfs_next = li->li_lfs; 358*0Sstevel@tonic-gate li->li_lfs = lfs; 359*0Sstevel@tonic-gate } 360*0Sstevel@tonic-gate 361*0Sstevel@tonic-gate found_lfs: 362*0Sstevel@tonic-gate VFS_HOLD(&lfs->lfs_vfs); 363*0Sstevel@tonic-gate mutex_exit(&li->li_lfslock); 364*0Sstevel@tonic-gate return (&lfs->lfs_vfs); 365*0Sstevel@tonic-gate } 366*0Sstevel@tonic-gate 367*0Sstevel@tonic-gate /* 368*0Sstevel@tonic-gate * Free lfs node since no longer in use 369*0Sstevel@tonic-gate */ 370*0Sstevel@tonic-gate static void 371*0Sstevel@tonic-gate freelfsnode(struct lfsnode *lfs, struct loinfo *li) 372*0Sstevel@tonic-gate { 373*0Sstevel@tonic-gate struct lfsnode *prev = NULL; 374*0Sstevel@tonic-gate struct lfsnode *this; 375*0Sstevel@tonic-gate 376*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&li->li_lfslock)); 377*0Sstevel@tonic-gate ASSERT(li->li_refct > 0); 378*0Sstevel@tonic-gate for (this = li->li_lfs; this != NULL; this = this->lfs_next) { 379*0Sstevel@tonic-gate if (this == lfs) { 380*0Sstevel@tonic-gate ASSERT(lfs->lfs_vfs.vfs_count == 1); 381*0Sstevel@tonic-gate if (prev == NULL) 382*0Sstevel@tonic-gate li->li_lfs = lfs->lfs_next; 383*0Sstevel@tonic-gate else 384*0Sstevel@tonic-gate prev->lfs_next = lfs->lfs_next; 385*0Sstevel@tonic-gate if (lfs->lfs_realrootvp != NULL) { 386*0Sstevel@tonic-gate VN_RELE(lfs->lfs_realrootvp); 387*0Sstevel@tonic-gate } 388*0Sstevel@tonic-gate if (lfs->lfs_vfs.vfs_mntpt != NULL) 389*0Sstevel@tonic-gate refstr_rele(lfs->lfs_vfs.vfs_mntpt); 390*0Sstevel@tonic-gate sema_destroy(&lfs->lfs_vfs.vfs_reflock); 391*0Sstevel@tonic-gate kmem_free(lfs, sizeof (struct lfsnode)); 392*0Sstevel@tonic-gate return; 393*0Sstevel@tonic-gate } 394*0Sstevel@tonic-gate prev = this; 395*0Sstevel@tonic-gate } 396*0Sstevel@tonic-gate panic("freelfsnode"); 397*0Sstevel@tonic-gate /*NOTREACHED*/ 398*0Sstevel@tonic-gate } 399*0Sstevel@tonic-gate 400*0Sstevel@tonic-gate /* 401*0Sstevel@tonic-gate * Find lfs given real vfs and mount instance(li) 402*0Sstevel@tonic-gate */ 403*0Sstevel@tonic-gate static struct lfsnode * 404*0Sstevel@tonic-gate lfsfind(struct vfs *vfsp, struct loinfo *li) 405*0Sstevel@tonic-gate { 406*0Sstevel@tonic-gate struct lfsnode *lfs; 407*0Sstevel@tonic-gate 408*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&li->li_lfslock)); 409*0Sstevel@tonic-gate 410*0Sstevel@tonic-gate /* 411*0Sstevel@tonic-gate * We need to handle the case where a UFS filesystem was forced 412*0Sstevel@tonic-gate * unmounted and then a subsequent mount got the same vfs 413*0Sstevel@tonic-gate * structure. If the new mount lies in the lofs hierarchy, then 414*0Sstevel@tonic-gate * this will confuse lofs, because the original vfsp (of the 415*0Sstevel@tonic-gate * forced unmounted filesystem) is still around. We check for 416*0Sstevel@tonic-gate * this condition here. 417*0Sstevel@tonic-gate * 418*0Sstevel@tonic-gate * If we find a cache vfsp hit, then we check to see if the 419*0Sstevel@tonic-gate * cached filesystem was forced unmounted. Skip all such 420*0Sstevel@tonic-gate * entries. This should be safe to do since no 421*0Sstevel@tonic-gate * makelonode()->makelfsnode()->lfsfind() calls should be 422*0Sstevel@tonic-gate * generated for such force-unmounted filesystems (because (ufs) 423*0Sstevel@tonic-gate * lookup would've returned an error). 424*0Sstevel@tonic-gate */ 425*0Sstevel@tonic-gate for (lfs = li->li_lfs; lfs != NULL; lfs = lfs->lfs_next) { 426*0Sstevel@tonic-gate if (lfs->lfs_realvfs == vfsp) { 427*0Sstevel@tonic-gate struct vnode *realvp; 428*0Sstevel@tonic-gate 429*0Sstevel@tonic-gate realvp = lfs->lfs_realrootvp; 430*0Sstevel@tonic-gate if (realvp == NULL) 431*0Sstevel@tonic-gate continue; 432*0Sstevel@tonic-gate if (realvp->v_vfsp == NULL || realvp->v_type == VBAD) 433*0Sstevel@tonic-gate continue; 434*0Sstevel@tonic-gate return (lfs); 435*0Sstevel@tonic-gate } 436*0Sstevel@tonic-gate } 437*0Sstevel@tonic-gate return (NULL); 438*0Sstevel@tonic-gate } 439*0Sstevel@tonic-gate 440*0Sstevel@tonic-gate /* 441*0Sstevel@tonic-gate * Find real vfs given loopback vfs 442*0Sstevel@tonic-gate */ 443*0Sstevel@tonic-gate struct vfs * 444*0Sstevel@tonic-gate lo_realvfs(struct vfs *vfsp, struct vnode **realrootvpp) 445*0Sstevel@tonic-gate { 446*0Sstevel@tonic-gate struct loinfo *li = vtoli(vfsp); 447*0Sstevel@tonic-gate struct lfsnode *lfs; 448*0Sstevel@tonic-gate 449*0Sstevel@tonic-gate ASSERT(li->li_refct > 0); 450*0Sstevel@tonic-gate if (vfsp == li->li_mountvfs) { 451*0Sstevel@tonic-gate if (realrootvpp != NULL) 452*0Sstevel@tonic-gate *realrootvpp = vtol(li->li_rootvp)->lo_vp; 453*0Sstevel@tonic-gate return (li->li_realvfs); 454*0Sstevel@tonic-gate } 455*0Sstevel@tonic-gate mutex_enter(&li->li_lfslock); 456*0Sstevel@tonic-gate for (lfs = li->li_lfs; lfs != NULL; lfs = lfs->lfs_next) { 457*0Sstevel@tonic-gate if (vfsp == &lfs->lfs_vfs) { 458*0Sstevel@tonic-gate if (realrootvpp != NULL) 459*0Sstevel@tonic-gate *realrootvpp = lfs->lfs_realrootvp; 460*0Sstevel@tonic-gate mutex_exit(&li->li_lfslock); 461*0Sstevel@tonic-gate return (lfs->lfs_realvfs); 462*0Sstevel@tonic-gate } 463*0Sstevel@tonic-gate } 464*0Sstevel@tonic-gate panic("lo_realvfs"); 465*0Sstevel@tonic-gate /*NOTREACHED*/ 466*0Sstevel@tonic-gate } 467*0Sstevel@tonic-gate 468*0Sstevel@tonic-gate /* 469*0Sstevel@tonic-gate * Lnode lookup stuff. 470*0Sstevel@tonic-gate * These routines maintain a table of lnodes hashed by vp so 471*0Sstevel@tonic-gate * that the lnode for a vp can be found if it already exists. 472*0Sstevel@tonic-gate * 473*0Sstevel@tonic-gate * NB: A lofs shadow vnode causes exactly one VN_HOLD() on the 474*0Sstevel@tonic-gate * underlying vnode. 475*0Sstevel@tonic-gate */ 476*0Sstevel@tonic-gate 477*0Sstevel@tonic-gate /* 478*0Sstevel@tonic-gate * Retire old hashtables. 479*0Sstevel@tonic-gate */ 480*0Sstevel@tonic-gate static void 481*0Sstevel@tonic-gate lretire(struct loinfo *li, struct lobucket *table, uint_t size) 482*0Sstevel@tonic-gate { 483*0Sstevel@tonic-gate struct lo_retired_ht *lrhp; 484*0Sstevel@tonic-gate 485*0Sstevel@tonic-gate lrhp = kmem_alloc(sizeof (*lrhp), KM_SLEEP); 486*0Sstevel@tonic-gate lrhp->lrh_table = table; 487*0Sstevel@tonic-gate lrhp->lrh_size = size; 488*0Sstevel@tonic-gate 489*0Sstevel@tonic-gate mutex_enter(&li->li_htlock); 490*0Sstevel@tonic-gate lrhp->lrh_next = li->li_retired; 491*0Sstevel@tonic-gate li->li_retired = lrhp; 492*0Sstevel@tonic-gate mutex_exit(&li->li_htlock); 493*0Sstevel@tonic-gate } 494*0Sstevel@tonic-gate 495*0Sstevel@tonic-gate /* 496*0Sstevel@tonic-gate * Grow the hashtable. 497*0Sstevel@tonic-gate */ 498*0Sstevel@tonic-gate static void 499*0Sstevel@tonic-gate lgrow(struct loinfo *li, uint_t newsize) 500*0Sstevel@tonic-gate { 501*0Sstevel@tonic-gate uint_t oldsize; 502*0Sstevel@tonic-gate uint_t i; 503*0Sstevel@tonic-gate struct lobucket *oldtable, *newtable; 504*0Sstevel@tonic-gate 505*0Sstevel@tonic-gate /* 506*0Sstevel@tonic-gate * It's OK to not have enough memory to resize the hashtable. 507*0Sstevel@tonic-gate * We'll go down this path the next time we add something to the 508*0Sstevel@tonic-gate * table, and retry the allocation then. 509*0Sstevel@tonic-gate */ 510*0Sstevel@tonic-gate if ((newtable = kmem_zalloc(newsize * sizeof (*li->li_hashtable), 511*0Sstevel@tonic-gate KM_NOSLEEP)) == NULL) 512*0Sstevel@tonic-gate return; 513*0Sstevel@tonic-gate 514*0Sstevel@tonic-gate mutex_enter(&li->li_htlock); 515*0Sstevel@tonic-gate if (newsize <= li->li_htsize) { 516*0Sstevel@tonic-gate mutex_exit(&li->li_htlock); 517*0Sstevel@tonic-gate kmem_free(newtable, newsize * sizeof (*li->li_hashtable)); 518*0Sstevel@tonic-gate return; 519*0Sstevel@tonic-gate } 520*0Sstevel@tonic-gate oldsize = li->li_htsize; 521*0Sstevel@tonic-gate oldtable = li->li_hashtable; 522*0Sstevel@tonic-gate 523*0Sstevel@tonic-gate /* 524*0Sstevel@tonic-gate * Grab all locks so TABLE_LOCK_ENTER() calls block until the 525*0Sstevel@tonic-gate * resize is complete. 526*0Sstevel@tonic-gate */ 527*0Sstevel@tonic-gate for (i = 0; i < oldsize; i++) 528*0Sstevel@tonic-gate mutex_enter(&oldtable[i].lh_lock); 529*0Sstevel@tonic-gate /* 530*0Sstevel@tonic-gate * li->li_hashtable gets set before li->li_htsize, so in the 531*0Sstevel@tonic-gate * time between the two assignments, callers of 532*0Sstevel@tonic-gate * TABLE_LOCK_ENTER() cannot hash to a bucket beyond oldsize, 533*0Sstevel@tonic-gate * hence we only need to grab the locks up to oldsize. 534*0Sstevel@tonic-gate */ 535*0Sstevel@tonic-gate for (i = 0; i < oldsize; i++) 536*0Sstevel@tonic-gate mutex_enter(&newtable[i].lh_lock); 537*0Sstevel@tonic-gate /* 538*0Sstevel@tonic-gate * Rehash. 539*0Sstevel@tonic-gate */ 540*0Sstevel@tonic-gate for (i = 0; i < oldsize; i++) { 541*0Sstevel@tonic-gate lnode_t *tlp, *nlp; 542*0Sstevel@tonic-gate 543*0Sstevel@tonic-gate for (tlp = oldtable[i].lh_chain; tlp != NULL; tlp = nlp) { 544*0Sstevel@tonic-gate uint_t hash = ltablehash(tlp->lo_vp, newsize); 545*0Sstevel@tonic-gate 546*0Sstevel@tonic-gate nlp = tlp->lo_next; 547*0Sstevel@tonic-gate tlp->lo_next = newtable[hash].lh_chain; 548*0Sstevel@tonic-gate newtable[hash].lh_chain = tlp; 549*0Sstevel@tonic-gate newtable[hash].lh_count++; 550*0Sstevel@tonic-gate } 551*0Sstevel@tonic-gate } 552*0Sstevel@tonic-gate 553*0Sstevel@tonic-gate /* 554*0Sstevel@tonic-gate * As soon as we store the new hashtable, future locking operations 555*0Sstevel@tonic-gate * will use it. Therefore, we must ensure that all the state we've 556*0Sstevel@tonic-gate * just established reaches global visibility before the new hashtable 557*0Sstevel@tonic-gate * does. 558*0Sstevel@tonic-gate */ 559*0Sstevel@tonic-gate membar_producer(); 560*0Sstevel@tonic-gate li->li_hashtable = newtable; 561*0Sstevel@tonic-gate 562*0Sstevel@tonic-gate /* 563*0Sstevel@tonic-gate * table_lock_enter() relies on the fact that li->li_hashtable 564*0Sstevel@tonic-gate * is set to its new value before li->li_htsize. 565*0Sstevel@tonic-gate */ 566*0Sstevel@tonic-gate membar_producer(); 567*0Sstevel@tonic-gate li->li_htsize = newsize; 568*0Sstevel@tonic-gate 569*0Sstevel@tonic-gate /* 570*0Sstevel@tonic-gate * The new state is consistent now, so we can drop all the locks. 571*0Sstevel@tonic-gate */ 572*0Sstevel@tonic-gate for (i = 0; i < oldsize; i++) { 573*0Sstevel@tonic-gate mutex_exit(&newtable[i].lh_lock); 574*0Sstevel@tonic-gate mutex_exit(&oldtable[i].lh_lock); 575*0Sstevel@tonic-gate } 576*0Sstevel@tonic-gate mutex_exit(&li->li_htlock); 577*0Sstevel@tonic-gate 578*0Sstevel@tonic-gate lretire(li, oldtable, oldsize); 579*0Sstevel@tonic-gate } 580*0Sstevel@tonic-gate 581*0Sstevel@tonic-gate /* 582*0Sstevel@tonic-gate * Put a lnode in the table 583*0Sstevel@tonic-gate */ 584*0Sstevel@tonic-gate static void 585*0Sstevel@tonic-gate lsave(lnode_t *lp, struct loinfo *li) 586*0Sstevel@tonic-gate { 587*0Sstevel@tonic-gate ASSERT(lp->lo_vp); 588*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(TABLE_LOCK(lp->lo_vp, li))); 589*0Sstevel@tonic-gate 590*0Sstevel@tonic-gate #ifdef LODEBUG 591*0Sstevel@tonic-gate lo_dprint(4, "lsave lp %p hash %d\n", 592*0Sstevel@tonic-gate lp, ltablehash(lp->lo_vp, li)); 593*0Sstevel@tonic-gate #endif 594*0Sstevel@tonic-gate 595*0Sstevel@tonic-gate TABLE_COUNT(lp->lo_vp, li)++; 596*0Sstevel@tonic-gate lp->lo_next = TABLE_BUCKET(lp->lo_vp, li); 597*0Sstevel@tonic-gate TABLE_BUCKET(lp->lo_vp, li) = lp; 598*0Sstevel@tonic-gate 599*0Sstevel@tonic-gate if (li->li_refct > (li->li_htsize << lo_resize_threshold)) { 600*0Sstevel@tonic-gate TABLE_LOCK_EXIT(lp->lo_vp, li); 601*0Sstevel@tonic-gate lgrow(li, li->li_htsize << lo_resize_factor); 602*0Sstevel@tonic-gate TABLE_LOCK_ENTER(lp->lo_vp, li); 603*0Sstevel@tonic-gate } 604*0Sstevel@tonic-gate } 605*0Sstevel@tonic-gate 606*0Sstevel@tonic-gate /* 607*0Sstevel@tonic-gate * Our version of vfs_rele() that stops at 1 instead of 0, and calls 608*0Sstevel@tonic-gate * freelfsnode() instead of kmem_free(). 609*0Sstevel@tonic-gate */ 610*0Sstevel@tonic-gate static void 611*0Sstevel@tonic-gate lfs_rele(struct lfsnode *lfs, struct loinfo *li) 612*0Sstevel@tonic-gate { 613*0Sstevel@tonic-gate vfs_t *vfsp = &lfs->lfs_vfs; 614*0Sstevel@tonic-gate 615*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&li->li_lfslock)); 616*0Sstevel@tonic-gate ASSERT(vfsp->vfs_count > 1); 617*0Sstevel@tonic-gate if (atomic_add_32_nv(&vfsp->vfs_count, -1) == 1) 618*0Sstevel@tonic-gate freelfsnode(lfs, li); 619*0Sstevel@tonic-gate } 620*0Sstevel@tonic-gate 621*0Sstevel@tonic-gate /* 622*0Sstevel@tonic-gate * Remove a lnode from the table 623*0Sstevel@tonic-gate */ 624*0Sstevel@tonic-gate void 625*0Sstevel@tonic-gate freelonode(lnode_t *lp) 626*0Sstevel@tonic-gate { 627*0Sstevel@tonic-gate lnode_t *lt; 628*0Sstevel@tonic-gate lnode_t *ltprev = NULL; 629*0Sstevel@tonic-gate struct lfsnode *lfs, *nextlfs; 630*0Sstevel@tonic-gate struct vfs *vfsp; 631*0Sstevel@tonic-gate struct vnode *vp = ltov(lp); 632*0Sstevel@tonic-gate struct vnode *realvp = realvp(vp); 633*0Sstevel@tonic-gate struct loinfo *li = vtoli(vp->v_vfsp); 634*0Sstevel@tonic-gate 635*0Sstevel@tonic-gate #ifdef LODEBUG 636*0Sstevel@tonic-gate lo_dprint(4, "freelonode lp %p hash %d\n", 637*0Sstevel@tonic-gate lp, ltablehash(lp->lo_vp, li)); 638*0Sstevel@tonic-gate #endif 639*0Sstevel@tonic-gate TABLE_LOCK_ENTER(lp->lo_vp, li); 640*0Sstevel@tonic-gate 641*0Sstevel@tonic-gate mutex_enter(&vp->v_lock); 642*0Sstevel@tonic-gate if (vp->v_count > 1) { 643*0Sstevel@tonic-gate vp->v_count--; /* release our hold from vn_rele */ 644*0Sstevel@tonic-gate mutex_exit(&vp->v_lock); 645*0Sstevel@tonic-gate TABLE_LOCK_EXIT(lp->lo_vp, li); 646*0Sstevel@tonic-gate return; 647*0Sstevel@tonic-gate } 648*0Sstevel@tonic-gate mutex_exit(&vp->v_lock); 649*0Sstevel@tonic-gate 650*0Sstevel@tonic-gate for (lt = TABLE_BUCKET(lp->lo_vp, li); lt != NULL; 651*0Sstevel@tonic-gate ltprev = lt, lt = lt->lo_next) { 652*0Sstevel@tonic-gate if (lt == lp) { 653*0Sstevel@tonic-gate #ifdef LODEBUG 654*0Sstevel@tonic-gate lo_dprint(4, "freeing %p, vfsp %p\n", 655*0Sstevel@tonic-gate vp, vp->v_vfsp); 656*0Sstevel@tonic-gate #endif 657*0Sstevel@tonic-gate atomic_add_32(&li->li_refct, -1); 658*0Sstevel@tonic-gate vfsp = vp->v_vfsp; 659*0Sstevel@tonic-gate vn_invalid(vp); 660*0Sstevel@tonic-gate if (vfsp != li->li_mountvfs) { 661*0Sstevel@tonic-gate mutex_enter(&li->li_lfslock); 662*0Sstevel@tonic-gate /* 663*0Sstevel@tonic-gate * Check for unused lfs 664*0Sstevel@tonic-gate */ 665*0Sstevel@tonic-gate lfs = li->li_lfs; 666*0Sstevel@tonic-gate while (lfs != NULL) { 667*0Sstevel@tonic-gate nextlfs = lfs->lfs_next; 668*0Sstevel@tonic-gate if (vfsp == &lfs->lfs_vfs) { 669*0Sstevel@tonic-gate lfs_rele(lfs, li); 670*0Sstevel@tonic-gate break; 671*0Sstevel@tonic-gate } 672*0Sstevel@tonic-gate if (lfs->lfs_vfs.vfs_count == 1) { 673*0Sstevel@tonic-gate /* 674*0Sstevel@tonic-gate * Lfs is idle 675*0Sstevel@tonic-gate */ 676*0Sstevel@tonic-gate freelfsnode(lfs, li); 677*0Sstevel@tonic-gate } 678*0Sstevel@tonic-gate lfs = nextlfs; 679*0Sstevel@tonic-gate } 680*0Sstevel@tonic-gate mutex_exit(&li->li_lfslock); 681*0Sstevel@tonic-gate } 682*0Sstevel@tonic-gate if (ltprev == NULL) { 683*0Sstevel@tonic-gate TABLE_BUCKET(lt->lo_vp, li) = lt->lo_next; 684*0Sstevel@tonic-gate } else { 685*0Sstevel@tonic-gate ltprev->lo_next = lt->lo_next; 686*0Sstevel@tonic-gate } 687*0Sstevel@tonic-gate TABLE_COUNT(lt->lo_vp, li)--; 688*0Sstevel@tonic-gate TABLE_LOCK_EXIT(lt->lo_vp, li); 689*0Sstevel@tonic-gate kmem_cache_free(lnode_cache, lt); 690*0Sstevel@tonic-gate vn_free(vp); 691*0Sstevel@tonic-gate VN_RELE(realvp); 692*0Sstevel@tonic-gate return; 693*0Sstevel@tonic-gate } 694*0Sstevel@tonic-gate } 695*0Sstevel@tonic-gate panic("freelonode"); 696*0Sstevel@tonic-gate /*NOTREACHED*/ 697*0Sstevel@tonic-gate } 698*0Sstevel@tonic-gate 699*0Sstevel@tonic-gate /* 700*0Sstevel@tonic-gate * Lookup a lnode by vp 701*0Sstevel@tonic-gate */ 702*0Sstevel@tonic-gate static lnode_t * 703*0Sstevel@tonic-gate lfind(struct vnode *vp, struct loinfo *li) 704*0Sstevel@tonic-gate { 705*0Sstevel@tonic-gate lnode_t *lt; 706*0Sstevel@tonic-gate 707*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(TABLE_LOCK(vp, li))); 708*0Sstevel@tonic-gate 709*0Sstevel@tonic-gate lt = TABLE_BUCKET(vp, li); 710*0Sstevel@tonic-gate while (lt != NULL) { 711*0Sstevel@tonic-gate if (lt->lo_vp == vp) { 712*0Sstevel@tonic-gate VN_HOLD(ltov(lt)); 713*0Sstevel@tonic-gate return (lt); 714*0Sstevel@tonic-gate } 715*0Sstevel@tonic-gate lt = lt->lo_next; 716*0Sstevel@tonic-gate } 717*0Sstevel@tonic-gate return (NULL); 718*0Sstevel@tonic-gate } 719*0Sstevel@tonic-gate 720*0Sstevel@tonic-gate #ifdef LODEBUG 721*0Sstevel@tonic-gate static int lofsdebug; 722*0Sstevel@tonic-gate #endif /* LODEBUG */ 723*0Sstevel@tonic-gate 724*0Sstevel@tonic-gate /* 725*0Sstevel@tonic-gate * Utilities used by both client and server 726*0Sstevel@tonic-gate * Standard levels: 727*0Sstevel@tonic-gate * 0) no debugging 728*0Sstevel@tonic-gate * 1) hard failures 729*0Sstevel@tonic-gate * 2) soft failures 730*0Sstevel@tonic-gate * 3) current test software 731*0Sstevel@tonic-gate * 4) main procedure entry points 732*0Sstevel@tonic-gate * 5) main procedure exit points 733*0Sstevel@tonic-gate * 6) utility procedure entry points 734*0Sstevel@tonic-gate * 7) utility procedure exit points 735*0Sstevel@tonic-gate * 8) obscure procedure entry points 736*0Sstevel@tonic-gate * 9) obscure procedure exit points 737*0Sstevel@tonic-gate * 10) random stuff 738*0Sstevel@tonic-gate * 11) all <= 1 739*0Sstevel@tonic-gate * 12) all <= 2 740*0Sstevel@tonic-gate * 13) all <= 3 741*0Sstevel@tonic-gate * ... 742*0Sstevel@tonic-gate */ 743*0Sstevel@tonic-gate 744*0Sstevel@tonic-gate #ifdef LODEBUG 745*0Sstevel@tonic-gate /*VARARGS2*/ 746*0Sstevel@tonic-gate lo_dprint(level, str, a1, a2, a3, a4, a5, a6, a7, a8, a9) 747*0Sstevel@tonic-gate int level; 748*0Sstevel@tonic-gate char *str; 749*0Sstevel@tonic-gate int a1, a2, a3, a4, a5, a6, a7, a8, a9; 750*0Sstevel@tonic-gate { 751*0Sstevel@tonic-gate 752*0Sstevel@tonic-gate if (lofsdebug == level || (lofsdebug > 10 && (lofsdebug - 10) >= level)) 753*0Sstevel@tonic-gate printf(str, a1, a2, a3, a4, a5, a6, a7, a8, a9); 754*0Sstevel@tonic-gate } 755*0Sstevel@tonic-gate #endif 756