1 /* $NetBSD: ulfs_dirhash.c,v 1.17 2016/06/20 01:53:38 dholland Exp $ */ 2 /* from NetBSD: ufs_dirhash.c,v 1.37 2014/12/20 00:28:05 christos Exp */ 3 4 /* 5 * Copyright (c) 2001, 2002 Ian Dowse. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD: src/sys/ufs/ufs/ufs_dirhash.c,v 1.3.2.8 2004/12/08 11:54:13 dwmalone Exp $ 29 */ 30 31 #include <sys/cdefs.h> 32 __KERNEL_RCSID(0, "$NetBSD: ulfs_dirhash.c,v 1.17 2016/06/20 01:53:38 dholland Exp $"); 33 34 /* 35 * This implements a hash-based lookup scheme for ULFS directories. 36 */ 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/kernel.h> 41 #include <sys/kmem.h> 42 #include <sys/types.h> 43 #include <sys/hash.h> 44 #include <sys/proc.h> 45 #include <sys/buf.h> 46 #include <sys/vnode.h> 47 #include <sys/mount.h> 48 #include <sys/pool.h> 49 #include <sys/sysctl.h> 50 #include <sys/atomic.h> 51 52 #include <ufs/lfs/lfs.h> 53 #include <ufs/lfs/lfs_accessors.h> 54 #include <ufs/lfs/ulfs_inode.h> 55 #include <ufs/lfs/ulfs_dirhash.h> 56 #include <ufs/lfs/ulfsmount.h> 57 #include <ufs/lfs/ulfs_bswap.h> 58 #include <ufs/lfs/ulfs_extern.h> 59 60 #define WRAPINCR(val, limit) (((val) + 1 == (limit)) ? 0 : ((val) + 1)) 61 #define WRAPDECR(val, limit) (((val) == 0) ? ((limit) - 1) : ((val) - 1)) 62 #define OFSFMT(ip) ((ip)->i_lfs->um_maxsymlinklen <= 0) 63 #define BLKFREE2IDX(n) ((n) > DH_NFSTATS ? DH_NFSTATS : (n)) 64 65 static u_int ulfs_dirhashminblks = 5; 66 static u_int ulfs_dirhashmaxmem = 2 * 1024 * 1024; 67 static u_int ulfs_dirhashmem; 68 static u_int ulfs_dirhashcheck = 0; 69 70 static int ulfsdirhash_hash(struct dirhash *dh, const char *name, int namelen); 71 static void ulfsdirhash_adjfree(struct dirhash *dh, doff_t offset, int diff, 72 int dirblksiz); 73 static void ulfsdirhash_delslot(struct dirhash *dh, int slot); 74 static int ulfsdirhash_findslot(struct dirhash *dh, const char *name, 75 int namelen, doff_t offset); 76 static doff_t ulfsdirhash_getprev(struct lfs *fs, LFS_DIRHEADER *dp, 77 doff_t offset, int dirblksiz); 78 static int ulfsdirhash_recycle(int wanted); 79 80 static pool_cache_t ulfsdirhashblk_cache; 81 static pool_cache_t ulfsdirhash_cache; 82 83 #define DIRHASHLIST_LOCK() mutex_enter(&ulfsdirhash_lock) 84 #define DIRHASHLIST_UNLOCK() mutex_exit(&ulfsdirhash_lock) 85 #define DIRHASH_LOCK(dh) mutex_enter(&(dh)->dh_lock) 86 #define DIRHASH_UNLOCK(dh) mutex_exit(&(dh)->dh_lock) 87 #define DIRHASH_BLKALLOC() \ 88 pool_cache_get(ulfsdirhashblk_cache, PR_NOWAIT) 89 #define DIRHASH_BLKFREE(ptr) \ 90 pool_cache_put(ulfsdirhashblk_cache, ptr) 91 92 /* Dirhash list; recently-used entries are near the tail. */ 93 static TAILQ_HEAD(, dirhash) ulfsdirhash_list; 94 95 /* Protects: ulfsdirhash_list, `dh_list' field, ulfs_dirhashmem. */ 96 static kmutex_t ulfsdirhash_lock; 97 98 static struct sysctllog *ulfsdirhash_sysctl_log; 99 100 /* 101 * Locking order: 102 * ulfsdirhash_lock 103 * dh_lock 104 * 105 * The dh_lock mutex should be acquired either via the inode lock, or via 106 * ulfsdirhash_lock. Only the owner of the inode may free the associated 107 * dirhash, but anything can steal its memory and set dh_hash to NULL. 108 */ 109 110 /* 111 * Attempt to build up a hash table for the directory contents in 112 * inode 'ip'. Returns 0 on success, or -1 of the operation failed. 113 */ 114 int 115 ulfsdirhash_build(struct inode *ip) 116 { 117 struct lfs *fs = ip->i_lfs; 118 struct dirhash *dh; 119 struct buf *bp = NULL; 120 LFS_DIRHEADER *ep; 121 struct vnode *vp; 122 doff_t bmask, pos; 123 int dirblocks, i, j, memreqd, nblocks, narrays, nslots, slot; 124 int dirblksiz = ip->i_lfs->um_dirblksiz; 125 126 /* Check if we can/should use dirhash. */ 127 if (ip->i_dirhash == NULL) { 128 if (ip->i_size < (ulfs_dirhashminblks * dirblksiz) || OFSFMT(ip)) 129 return (-1); 130 } else { 131 /* Hash exists, but sysctls could have changed. */ 132 if (ip->i_size < (ulfs_dirhashminblks * dirblksiz) || 133 ulfs_dirhashmem > ulfs_dirhashmaxmem) { 134 ulfsdirhash_free(ip); 135 return (-1); 136 } 137 /* Check if hash exists and is intact (note: unlocked read). */ 138 if (ip->i_dirhash->dh_hash != NULL) 139 return (0); 140 /* Free the old, recycled hash and build a new one. */ 141 ulfsdirhash_free(ip); 142 } 143 144 /* Don't hash removed directories. */ 145 if (ip->i_nlink == 0) 146 return (-1); 147 148 vp = ip->i_vnode; 149 /* Allocate 50% more entries than this dir size could ever need. */ 150 KASSERT(ip->i_size >= dirblksiz); 151 nslots = ip->i_size / LFS_DIRECTSIZ(fs, 1); 152 nslots = (nslots * 3 + 1) / 2; 153 narrays = howmany(nslots, DH_NBLKOFF); 154 nslots = narrays * DH_NBLKOFF; 155 dirblocks = howmany(ip->i_size, dirblksiz); 156 nblocks = (dirblocks * 3 + 1) / 2; 157 158 memreqd = sizeof(*dh) + narrays * sizeof(*dh->dh_hash) + 159 narrays * DH_NBLKOFF * sizeof(**dh->dh_hash) + 160 nblocks * sizeof(*dh->dh_blkfree); 161 162 while (atomic_add_int_nv(&ulfs_dirhashmem, memreqd) > 163 ulfs_dirhashmaxmem) { 164 atomic_add_int(&ulfs_dirhashmem, -memreqd); 165 if (memreqd > ulfs_dirhashmaxmem / 2) 166 return (-1); 167 /* Try to free some space. */ 168 if (ulfsdirhash_recycle(memreqd) != 0) 169 return (-1); 170 else 171 DIRHASHLIST_UNLOCK(); 172 } 173 174 /* 175 * Use non-blocking mallocs so that we will revert to a linear 176 * lookup on failure rather than potentially blocking forever. 177 */ 178 dh = pool_cache_get(ulfsdirhash_cache, PR_NOWAIT); 179 if (dh == NULL) { 180 atomic_add_int(&ulfs_dirhashmem, -memreqd); 181 return (-1); 182 } 183 memset(dh, 0, sizeof(*dh)); 184 mutex_init(&dh->dh_lock, MUTEX_DEFAULT, IPL_NONE); 185 DIRHASH_LOCK(dh); 186 dh->dh_hashsz = narrays * sizeof(dh->dh_hash[0]); 187 dh->dh_hash = kmem_zalloc(dh->dh_hashsz, KM_NOSLEEP); 188 dh->dh_blkfreesz = nblocks * sizeof(dh->dh_blkfree[0]); 189 dh->dh_blkfree = kmem_zalloc(dh->dh_blkfreesz, KM_NOSLEEP); 190 if (dh->dh_hash == NULL || dh->dh_blkfree == NULL) 191 goto fail; 192 for (i = 0; i < narrays; i++) { 193 if ((dh->dh_hash[i] = DIRHASH_BLKALLOC()) == NULL) 194 goto fail; 195 for (j = 0; j < DH_NBLKOFF; j++) 196 dh->dh_hash[i][j] = DIRHASH_EMPTY; 197 } 198 199 /* Initialise the hash table and block statistics. */ 200 dh->dh_narrays = narrays; 201 dh->dh_hlen = nslots; 202 dh->dh_nblk = nblocks; 203 dh->dh_dirblks = dirblocks; 204 for (i = 0; i < dirblocks; i++) 205 dh->dh_blkfree[i] = dirblksiz / DIRALIGN; 206 for (i = 0; i < DH_NFSTATS; i++) 207 dh->dh_firstfree[i] = -1; 208 dh->dh_firstfree[DH_NFSTATS] = 0; 209 dh->dh_seqopt = 0; 210 dh->dh_seqoff = 0; 211 dh->dh_score = DH_SCOREINIT; 212 ip->i_dirhash = dh; 213 214 bmask = VFSTOULFS(vp->v_mount)->um_mountp->mnt_stat.f_iosize - 1; 215 pos = 0; 216 while (pos < ip->i_size) { 217 if ((curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) 218 != 0) { 219 preempt(); 220 } 221 /* If necessary, get the next directory block. */ 222 if ((pos & bmask) == 0) { 223 if (bp != NULL) 224 brelse(bp, 0); 225 if (ulfs_blkatoff(vp, (off_t)pos, NULL, &bp, false) != 0) 226 goto fail; 227 } 228 229 /* Add this entry to the hash. */ 230 ep = (LFS_DIRHEADER *)((char *)bp->b_data + (pos & bmask)); 231 if (lfs_dir_getreclen(fs, ep) == 0 || lfs_dir_getreclen(fs, ep) > 232 dirblksiz - (pos & (dirblksiz - 1))) { 233 /* Corrupted directory. */ 234 brelse(bp, 0); 235 goto fail; 236 } 237 if (lfs_dir_getino(fs, ep) != 0) { 238 /* Add the entry (simplified ulfsdirhash_add). */ 239 slot = ulfsdirhash_hash(dh, lfs_dir_nameptr(fs, ep), 240 lfs_dir_getnamlen(fs, ep)); 241 while (DH_ENTRY(dh, slot) != DIRHASH_EMPTY) 242 slot = WRAPINCR(slot, dh->dh_hlen); 243 dh->dh_hused++; 244 DH_ENTRY(dh, slot) = pos; 245 ulfsdirhash_adjfree(dh, pos, -LFS_DIRSIZ(fs, ep), 246 dirblksiz); 247 } 248 pos += lfs_dir_getreclen(fs, ep); 249 } 250 251 if (bp != NULL) 252 brelse(bp, 0); 253 DIRHASHLIST_LOCK(); 254 TAILQ_INSERT_TAIL(&ulfsdirhash_list, dh, dh_list); 255 dh->dh_onlist = 1; 256 DIRHASH_UNLOCK(dh); 257 DIRHASHLIST_UNLOCK(); 258 return (0); 259 260 fail: 261 ip->i_dirhash = NULL; 262 DIRHASH_UNLOCK(dh); 263 if (dh->dh_hash != NULL) { 264 for (i = 0; i < narrays; i++) 265 if (dh->dh_hash[i] != NULL) 266 DIRHASH_BLKFREE(dh->dh_hash[i]); 267 kmem_free(dh->dh_hash, dh->dh_hashsz); 268 } 269 if (dh->dh_blkfree != NULL) 270 kmem_free(dh->dh_blkfree, dh->dh_blkfreesz); 271 mutex_destroy(&dh->dh_lock); 272 pool_cache_put(ulfsdirhash_cache, dh); 273 atomic_add_int(&ulfs_dirhashmem, -memreqd); 274 return (-1); 275 } 276 277 /* 278 * Free any hash table associated with inode 'ip'. 279 */ 280 void 281 ulfsdirhash_free(struct inode *ip) 282 { 283 struct dirhash *dh; 284 int i, mem; 285 286 if ((dh = ip->i_dirhash) == NULL) 287 return; 288 289 ip->i_dirhash = NULL; 290 291 if (dh->dh_onlist) { 292 DIRHASHLIST_LOCK(); 293 if (dh->dh_onlist) 294 TAILQ_REMOVE(&ulfsdirhash_list, dh, dh_list); 295 DIRHASHLIST_UNLOCK(); 296 } 297 298 /* The dirhash pointed to by 'dh' is exclusively ours now. */ 299 mem = sizeof(*dh); 300 if (dh->dh_hash != NULL) { 301 for (i = 0; i < dh->dh_narrays; i++) 302 DIRHASH_BLKFREE(dh->dh_hash[i]); 303 kmem_free(dh->dh_hash, dh->dh_hashsz); 304 kmem_free(dh->dh_blkfree, dh->dh_blkfreesz); 305 mem += dh->dh_hashsz; 306 mem += dh->dh_narrays * DH_NBLKOFF * sizeof(**dh->dh_hash); 307 mem += dh->dh_nblk * sizeof(*dh->dh_blkfree); 308 } 309 mutex_destroy(&dh->dh_lock); 310 pool_cache_put(ulfsdirhash_cache, dh); 311 312 atomic_add_int(&ulfs_dirhashmem, -mem); 313 } 314 315 /* 316 * Find the offset of the specified name within the given inode. 317 * Returns 0 on success, ENOENT if the entry does not exist, or 318 * EJUSTRETURN if the caller should revert to a linear search. 319 * 320 * If successful, the directory offset is stored in *offp, and a 321 * pointer to a struct buf containing the entry is stored in *bpp. If 322 * prevoffp is non-NULL, the offset of the previous entry within 323 * the DIRBLKSIZ-sized block is stored in *prevoffp (if the entry 324 * is the first in a block, the start of the block is used). 325 */ 326 int 327 ulfsdirhash_lookup(struct inode *ip, const char *name, int namelen, doff_t *offp, 328 struct buf **bpp, doff_t *prevoffp) 329 { 330 struct lfs *fs = ip->i_lfs; 331 struct dirhash *dh, *dh_next; 332 LFS_DIRHEADER *dp; 333 struct vnode *vp; 334 struct buf *bp; 335 doff_t blkoff, bmask, offset, prevoff; 336 int i, slot; 337 int dirblksiz = ip->i_lfs->um_dirblksiz; 338 339 if ((dh = ip->i_dirhash) == NULL) 340 return (EJUSTRETURN); 341 342 /* 343 * Move this dirhash towards the end of the list if it has a 344 * score higher than the next entry, and acquire the dh_lock. 345 * Optimise the case where it's already the last by performing 346 * an unlocked read of the TAILQ_NEXT pointer. 347 * 348 * In both cases, end up holding just dh_lock. 349 */ 350 if (TAILQ_NEXT(dh, dh_list) != NULL) { 351 DIRHASHLIST_LOCK(); 352 DIRHASH_LOCK(dh); 353 /* 354 * If the new score will be greater than that of the next 355 * entry, then move this entry past it. With both mutexes 356 * held, dh_next won't go away, but its dh_score could 357 * change; that's not important since it is just a hint. 358 */ 359 if (dh->dh_hash != NULL && 360 (dh_next = TAILQ_NEXT(dh, dh_list)) != NULL && 361 dh->dh_score >= dh_next->dh_score) { 362 KASSERT(dh->dh_onlist); 363 TAILQ_REMOVE(&ulfsdirhash_list, dh, dh_list); 364 TAILQ_INSERT_AFTER(&ulfsdirhash_list, dh_next, dh, 365 dh_list); 366 } 367 DIRHASHLIST_UNLOCK(); 368 } else { 369 /* Already the last, though that could change as we wait. */ 370 DIRHASH_LOCK(dh); 371 } 372 if (dh->dh_hash == NULL) { 373 DIRHASH_UNLOCK(dh); 374 ulfsdirhash_free(ip); 375 return (EJUSTRETURN); 376 } 377 378 /* Update the score. */ 379 if (dh->dh_score < DH_SCOREMAX) 380 dh->dh_score++; 381 382 vp = ip->i_vnode; 383 bmask = VFSTOULFS(vp->v_mount)->um_mountp->mnt_stat.f_iosize - 1; 384 blkoff = -1; 385 bp = NULL; 386 restart: 387 slot = ulfsdirhash_hash(dh, name, namelen); 388 389 if (dh->dh_seqopt) { 390 /* 391 * Sequential access optimisation. dh_seqoff contains the 392 * offset of the directory entry immediately following 393 * the last entry that was looked up. Check if this offset 394 * appears in the hash chain for the name we are looking for. 395 */ 396 for (i = slot; (offset = DH_ENTRY(dh, i)) != DIRHASH_EMPTY; 397 i = WRAPINCR(i, dh->dh_hlen)) 398 if (offset == dh->dh_seqoff) 399 break; 400 if (offset == dh->dh_seqoff) { 401 /* 402 * We found an entry with the expected offset. This 403 * is probably the entry we want, but if not, the 404 * code below will turn off seqoff and retry. 405 */ 406 slot = i; 407 } else 408 dh->dh_seqopt = 0; 409 } 410 411 for (; (offset = DH_ENTRY(dh, slot)) != DIRHASH_EMPTY; 412 slot = WRAPINCR(slot, dh->dh_hlen)) { 413 if (offset == DIRHASH_DEL) 414 continue; 415 416 if (offset < 0 || offset >= ip->i_size) 417 panic("ulfsdirhash_lookup: bad offset in hash array"); 418 if ((offset & ~bmask) != blkoff) { 419 if (bp != NULL) 420 brelse(bp, 0); 421 blkoff = offset & ~bmask; 422 if (ulfs_blkatoff(vp, (off_t)blkoff, 423 NULL, &bp, false) != 0) { 424 DIRHASH_UNLOCK(dh); 425 return (EJUSTRETURN); 426 } 427 } 428 dp = (LFS_DIRHEADER *)((char *)bp->b_data + (offset & bmask)); 429 if (lfs_dir_getreclen(fs, dp) == 0 || lfs_dir_getreclen(fs, dp) > 430 dirblksiz - (offset & (dirblksiz - 1))) { 431 /* Corrupted directory. */ 432 DIRHASH_UNLOCK(dh); 433 brelse(bp, 0); 434 return (EJUSTRETURN); 435 } 436 if (lfs_dir_getnamlen(fs, dp) == namelen && 437 memcmp(lfs_dir_nameptr(fs, dp), name, namelen) == 0) { 438 /* Found. Get the prev offset if needed. */ 439 if (prevoffp != NULL) { 440 if (offset & (dirblksiz - 1)) { 441 prevoff = ulfsdirhash_getprev(fs, dp, 442 offset, dirblksiz); 443 if (prevoff == -1) { 444 brelse(bp, 0); 445 return (EJUSTRETURN); 446 } 447 } else 448 prevoff = offset; 449 *prevoffp = prevoff; 450 } 451 452 /* Check for sequential access, and update offset. */ 453 if (dh->dh_seqopt == 0 && dh->dh_seqoff == offset) 454 dh->dh_seqopt = 1; 455 dh->dh_seqoff = offset + LFS_DIRSIZ(fs, dp); 456 DIRHASH_UNLOCK(dh); 457 458 *bpp = bp; 459 *offp = offset; 460 return (0); 461 } 462 463 if (dh->dh_hash == NULL) { 464 DIRHASH_UNLOCK(dh); 465 if (bp != NULL) 466 brelse(bp, 0); 467 ulfsdirhash_free(ip); 468 return (EJUSTRETURN); 469 } 470 /* 471 * When the name doesn't match in the seqopt case, go back 472 * and search normally. 473 */ 474 if (dh->dh_seqopt) { 475 dh->dh_seqopt = 0; 476 goto restart; 477 } 478 } 479 DIRHASH_UNLOCK(dh); 480 if (bp != NULL) 481 brelse(bp, 0); 482 return (ENOENT); 483 } 484 485 /* 486 * Find a directory block with room for 'slotneeded' bytes. Returns 487 * the offset of the directory entry that begins the free space. 488 * This will either be the offset of an existing entry that has free 489 * space at the end, or the offset of an entry with d_ino == 0 at 490 * the start of a DIRBLKSIZ block. 491 * 492 * To use the space, the caller may need to compact existing entries in 493 * the directory. The total number of bytes in all of the entries involved 494 * in the compaction is stored in *slotsize. In other words, all of 495 * the entries that must be compacted are exactly contained in the 496 * region beginning at the returned offset and spanning *slotsize bytes. 497 * 498 * Returns -1 if no space was found, indicating that the directory 499 * must be extended. 500 */ 501 doff_t 502 ulfsdirhash_findfree(struct inode *ip, int slotneeded, int *slotsize) 503 { 504 struct lfs *fs = ip->i_lfs; 505 LFS_DIRHEADER *dp; 506 struct dirhash *dh; 507 struct buf *bp; 508 doff_t pos, slotstart; 509 int dirblock, error, freebytes, i; 510 int dirblksiz = ip->i_lfs->um_dirblksiz; 511 512 if ((dh = ip->i_dirhash) == NULL) 513 return (-1); 514 515 DIRHASH_LOCK(dh); 516 if (dh->dh_hash == NULL) { 517 DIRHASH_UNLOCK(dh); 518 ulfsdirhash_free(ip); 519 return (-1); 520 } 521 522 /* Find a directory block with the desired free space. */ 523 dirblock = -1; 524 for (i = howmany(slotneeded, DIRALIGN); i <= DH_NFSTATS; i++) 525 if ((dirblock = dh->dh_firstfree[i]) != -1) 526 break; 527 if (dirblock == -1) { 528 DIRHASH_UNLOCK(dh); 529 return (-1); 530 } 531 532 KASSERT(dirblock < dh->dh_nblk && 533 dh->dh_blkfree[dirblock] >= howmany(slotneeded, DIRALIGN)); 534 pos = dirblock * dirblksiz; 535 error = ulfs_blkatoff(ip->i_vnode, (off_t)pos, (void *)&dp, &bp, false); 536 if (error) { 537 DIRHASH_UNLOCK(dh); 538 return (-1); 539 } 540 /* Find the first entry with free space. */ 541 for (i = 0; i < dirblksiz; ) { 542 if (lfs_dir_getreclen(fs, dp) == 0) { 543 DIRHASH_UNLOCK(dh); 544 brelse(bp, 0); 545 return (-1); 546 } 547 if (lfs_dir_getino(fs, dp) == 0 || lfs_dir_getreclen(fs, dp) > LFS_DIRSIZ(fs, dp)) 548 break; 549 i += lfs_dir_getreclen(fs, dp); 550 dp = LFS_NEXTDIR(fs, dp); 551 } 552 if (i > dirblksiz) { 553 DIRHASH_UNLOCK(dh); 554 brelse(bp, 0); 555 return (-1); 556 } 557 slotstart = pos + i; 558 559 /* Find the range of entries needed to get enough space */ 560 freebytes = 0; 561 while (i < dirblksiz && freebytes < slotneeded) { 562 freebytes += lfs_dir_getreclen(fs, dp); 563 if (lfs_dir_getino(fs, dp) != 0) 564 freebytes -= LFS_DIRSIZ(fs, dp); 565 if (lfs_dir_getreclen(fs, dp) == 0) { 566 DIRHASH_UNLOCK(dh); 567 brelse(bp, 0); 568 return (-1); 569 } 570 i += lfs_dir_getreclen(fs, dp); 571 dp = LFS_NEXTDIR(fs, dp); 572 } 573 if (i > dirblksiz) { 574 DIRHASH_UNLOCK(dh); 575 brelse(bp, 0); 576 return (-1); 577 } 578 if (freebytes < slotneeded) 579 panic("ulfsdirhash_findfree: free mismatch"); 580 DIRHASH_UNLOCK(dh); 581 brelse(bp, 0); 582 *slotsize = pos + i - slotstart; 583 return (slotstart); 584 } 585 586 /* 587 * Return the start of the unused space at the end of a directory, or 588 * -1 if there are no trailing unused blocks. 589 */ 590 doff_t 591 ulfsdirhash_enduseful(struct inode *ip) 592 { 593 struct dirhash *dh; 594 int i; 595 int dirblksiz = ip->i_lfs->um_dirblksiz; 596 597 if ((dh = ip->i_dirhash) == NULL) 598 return (-1); 599 600 DIRHASH_LOCK(dh); 601 if (dh->dh_hash == NULL) { 602 DIRHASH_UNLOCK(dh); 603 ulfsdirhash_free(ip); 604 return (-1); 605 } 606 607 if (dh->dh_blkfree[dh->dh_dirblks - 1] != dirblksiz / DIRALIGN) { 608 DIRHASH_UNLOCK(dh); 609 return (-1); 610 } 611 612 for (i = dh->dh_dirblks - 1; i >= 0; i--) 613 if (dh->dh_blkfree[i] != dirblksiz / DIRALIGN) 614 break; 615 DIRHASH_UNLOCK(dh); 616 return ((doff_t)(i + 1) * dirblksiz); 617 } 618 619 /* 620 * Insert information into the hash about a new directory entry. dirp 621 * points to a struct lfs_direct containing the entry, and offset specifies 622 * the offset of this entry. 623 */ 624 void 625 ulfsdirhash_add(struct inode *ip, LFS_DIRHEADER *dirp, doff_t offset) 626 { 627 struct lfs *fs = ip->i_lfs; 628 struct dirhash *dh; 629 int slot; 630 int dirblksiz = ip->i_lfs->um_dirblksiz; 631 632 if ((dh = ip->i_dirhash) == NULL) 633 return; 634 635 DIRHASH_LOCK(dh); 636 if (dh->dh_hash == NULL) { 637 DIRHASH_UNLOCK(dh); 638 ulfsdirhash_free(ip); 639 return; 640 } 641 642 KASSERT(offset < dh->dh_dirblks * dirblksiz); 643 /* 644 * Normal hash usage is < 66%. If the usage gets too high then 645 * remove the hash entirely and let it be rebuilt later. 646 */ 647 if (dh->dh_hused >= (dh->dh_hlen * 3) / 4) { 648 DIRHASH_UNLOCK(dh); 649 ulfsdirhash_free(ip); 650 return; 651 } 652 653 /* Find a free hash slot (empty or deleted), and add the entry. */ 654 slot = ulfsdirhash_hash(dh, lfs_dir_nameptr(fs, dirp), 655 lfs_dir_getnamlen(fs, dirp)); 656 while (DH_ENTRY(dh, slot) >= 0) 657 slot = WRAPINCR(slot, dh->dh_hlen); 658 if (DH_ENTRY(dh, slot) == DIRHASH_EMPTY) 659 dh->dh_hused++; 660 DH_ENTRY(dh, slot) = offset; 661 662 /* Update the per-block summary info. */ 663 ulfsdirhash_adjfree(dh, offset, -LFS_DIRSIZ(fs, dirp), dirblksiz); 664 DIRHASH_UNLOCK(dh); 665 } 666 667 /* 668 * Remove the specified directory entry from the hash. The entry to remove 669 * is defined by the name in `dirp', which must exist at the specified 670 * `offset' within the directory. 671 */ 672 void 673 ulfsdirhash_remove(struct inode *ip, LFS_DIRHEADER *dirp, doff_t offset) 674 { 675 struct lfs *fs = ip->i_lfs; 676 struct dirhash *dh; 677 int slot; 678 int dirblksiz = ip->i_lfs->um_dirblksiz; 679 680 if ((dh = ip->i_dirhash) == NULL) 681 return; 682 683 DIRHASH_LOCK(dh); 684 if (dh->dh_hash == NULL) { 685 DIRHASH_UNLOCK(dh); 686 ulfsdirhash_free(ip); 687 return; 688 } 689 690 KASSERT(offset < dh->dh_dirblks * dirblksiz); 691 /* Find the entry */ 692 slot = ulfsdirhash_findslot(dh, lfs_dir_nameptr(fs, dirp), 693 lfs_dir_getnamlen(fs, dirp), offset); 694 695 /* Remove the hash entry. */ 696 ulfsdirhash_delslot(dh, slot); 697 698 /* Update the per-block summary info. */ 699 ulfsdirhash_adjfree(dh, offset, LFS_DIRSIZ(fs, dirp), dirblksiz); 700 DIRHASH_UNLOCK(dh); 701 } 702 703 /* 704 * Change the offset associated with a directory entry in the hash. Used 705 * when compacting directory blocks. 706 */ 707 void 708 ulfsdirhash_move(struct inode *ip, LFS_DIRHEADER *dirp, doff_t oldoff, 709 doff_t newoff) 710 { 711 struct lfs *fs = ip->i_lfs; 712 struct dirhash *dh; 713 int slot; 714 715 if ((dh = ip->i_dirhash) == NULL) 716 return; 717 DIRHASH_LOCK(dh); 718 if (dh->dh_hash == NULL) { 719 DIRHASH_UNLOCK(dh); 720 ulfsdirhash_free(ip); 721 return; 722 } 723 724 KASSERT(oldoff < dh->dh_dirblks * ip->i_lfs->um_dirblksiz && 725 newoff < dh->dh_dirblks * ip->i_lfs->um_dirblksiz); 726 /* Find the entry, and update the offset. */ 727 slot = ulfsdirhash_findslot(dh, lfs_dir_nameptr(fs, dirp), 728 lfs_dir_getnamlen(fs, dirp), oldoff); 729 DH_ENTRY(dh, slot) = newoff; 730 DIRHASH_UNLOCK(dh); 731 } 732 733 /* 734 * Inform dirhash that the directory has grown by one block that 735 * begins at offset (i.e. the new length is offset + DIRBLKSIZ). 736 */ 737 void 738 ulfsdirhash_newblk(struct inode *ip, doff_t offset) 739 { 740 struct dirhash *dh; 741 int block; 742 int dirblksiz = ip->i_lfs->um_dirblksiz; 743 744 if ((dh = ip->i_dirhash) == NULL) 745 return; 746 DIRHASH_LOCK(dh); 747 if (dh->dh_hash == NULL) { 748 DIRHASH_UNLOCK(dh); 749 ulfsdirhash_free(ip); 750 return; 751 } 752 753 KASSERT(offset == dh->dh_dirblks * dirblksiz); 754 block = offset / dirblksiz; 755 if (block >= dh->dh_nblk) { 756 /* Out of space; must rebuild. */ 757 DIRHASH_UNLOCK(dh); 758 ulfsdirhash_free(ip); 759 return; 760 } 761 dh->dh_dirblks = block + 1; 762 763 /* Account for the new free block. */ 764 dh->dh_blkfree[block] = dirblksiz / DIRALIGN; 765 if (dh->dh_firstfree[DH_NFSTATS] == -1) 766 dh->dh_firstfree[DH_NFSTATS] = block; 767 DIRHASH_UNLOCK(dh); 768 } 769 770 /* 771 * Inform dirhash that the directory is being truncated. 772 */ 773 void 774 ulfsdirhash_dirtrunc(struct inode *ip, doff_t offset) 775 { 776 struct dirhash *dh; 777 int block, i; 778 int dirblksiz = ip->i_lfs->um_dirblksiz; 779 780 if ((dh = ip->i_dirhash) == NULL) 781 return; 782 783 DIRHASH_LOCK(dh); 784 if (dh->dh_hash == NULL) { 785 DIRHASH_UNLOCK(dh); 786 ulfsdirhash_free(ip); 787 return; 788 } 789 790 KASSERT(offset <= dh->dh_dirblks * dirblksiz); 791 block = howmany(offset, dirblksiz); 792 /* 793 * If the directory shrinks to less than 1/8 of dh_nblk blocks 794 * (about 20% of its original size due to the 50% extra added in 795 * ulfsdirhash_build) then free it, and let the caller rebuild 796 * if necessary. 797 */ 798 if (block < dh->dh_nblk / 8 && dh->dh_narrays > 1) { 799 DIRHASH_UNLOCK(dh); 800 ulfsdirhash_free(ip); 801 return; 802 } 803 804 /* 805 * Remove any `first free' information pertaining to the 806 * truncated blocks. All blocks we're removing should be 807 * completely unused. 808 */ 809 if (dh->dh_firstfree[DH_NFSTATS] >= block) 810 dh->dh_firstfree[DH_NFSTATS] = -1; 811 for (i = block; i < dh->dh_dirblks; i++) 812 if (dh->dh_blkfree[i] != dirblksiz / DIRALIGN) 813 panic("ulfsdirhash_dirtrunc: blocks in use"); 814 for (i = 0; i < DH_NFSTATS; i++) 815 if (dh->dh_firstfree[i] >= block) 816 panic("ulfsdirhash_dirtrunc: first free corrupt"); 817 dh->dh_dirblks = block; 818 DIRHASH_UNLOCK(dh); 819 } 820 821 /* 822 * Debugging function to check that the dirhash information about 823 * a directory block matches its actual contents. Panics if a mismatch 824 * is detected. 825 * 826 * On entry, `sbuf' should point to the start of an in-core 827 * DIRBLKSIZ-sized directory block, and `offset' should contain the 828 * offset from the start of the directory of that block. 829 */ 830 void 831 ulfsdirhash_checkblock(struct inode *ip, char *sbuf, doff_t offset) 832 { 833 struct lfs *fs = ip->i_lfs; 834 struct dirhash *dh; 835 LFS_DIRHEADER *dp; 836 int block, ffslot, i, nfree; 837 int dirblksiz = ip->i_lfs->um_dirblksiz; 838 839 if (!ulfs_dirhashcheck) 840 return; 841 if ((dh = ip->i_dirhash) == NULL) 842 return; 843 844 DIRHASH_LOCK(dh); 845 if (dh->dh_hash == NULL) { 846 DIRHASH_UNLOCK(dh); 847 ulfsdirhash_free(ip); 848 return; 849 } 850 851 block = offset / dirblksiz; 852 if ((offset & (dirblksiz - 1)) != 0 || block >= dh->dh_dirblks) 853 panic("ulfsdirhash_checkblock: bad offset"); 854 855 nfree = 0; 856 for (i = 0; i < dirblksiz; i += lfs_dir_getreclen(fs, dp)) { 857 dp = (LFS_DIRHEADER *)(sbuf + i); 858 if (lfs_dir_getreclen(fs, dp) == 0 || i + lfs_dir_getreclen(fs, dp) > dirblksiz) 859 panic("ulfsdirhash_checkblock: bad dir"); 860 861 if (lfs_dir_getino(fs, dp) == 0) { 862 #if 0 863 /* 864 * XXX entries with d_ino == 0 should only occur 865 * at the start of a DIRBLKSIZ block. However the 866 * ulfs code is tolerant of such entries at other 867 * offsets, and fsck does not fix them. 868 */ 869 if (i != 0) 870 panic("ulfsdirhash_checkblock: bad dir inode"); 871 #endif 872 nfree += lfs_dir_getreclen(fs, dp); 873 continue; 874 } 875 876 /* Check that the entry exists (will panic if it doesn't). */ 877 ulfsdirhash_findslot(dh, lfs_dir_nameptr(fs, dp), 878 lfs_dir_getnamlen(fs, dp), 879 offset + i); 880 881 nfree += lfs_dir_getreclen(fs, dp) - LFS_DIRSIZ(fs, dp); 882 } 883 if (i != dirblksiz) 884 panic("ulfsdirhash_checkblock: bad dir end"); 885 886 if (dh->dh_blkfree[block] * DIRALIGN != nfree) 887 panic("ulfsdirhash_checkblock: bad free count"); 888 889 ffslot = BLKFREE2IDX(nfree / DIRALIGN); 890 for (i = 0; i <= DH_NFSTATS; i++) 891 if (dh->dh_firstfree[i] == block && i != ffslot) 892 panic("ulfsdirhash_checkblock: bad first-free"); 893 if (dh->dh_firstfree[ffslot] == -1) 894 panic("ulfsdirhash_checkblock: missing first-free entry"); 895 DIRHASH_UNLOCK(dh); 896 } 897 898 /* 899 * Hash the specified filename into a dirhash slot. 900 */ 901 static int 902 ulfsdirhash_hash(struct dirhash *dh, const char *name, int namelen) 903 { 904 u_int32_t hash; 905 906 /* 907 * We hash the name and then some other bit of data that is 908 * invariant over the dirhash's lifetime. Otherwise names 909 * differing only in the last byte are placed close to one 910 * another in the table, which is bad for linear probing. 911 */ 912 hash = hash32_buf(name, namelen, HASH32_BUF_INIT); 913 hash = hash32_buf(&dh, sizeof(dh), hash); 914 return (hash % dh->dh_hlen); 915 } 916 917 /* 918 * Adjust the number of free bytes in the block containing `offset' 919 * by the value specified by `diff'. 920 * 921 * The caller must ensure we have exclusive access to `dh'; normally 922 * that means that dh_lock should be held, but this is also called 923 * from ulfsdirhash_build() where exclusive access can be assumed. 924 */ 925 static void 926 ulfsdirhash_adjfree(struct dirhash *dh, doff_t offset, int diff, int dirblksiz) 927 { 928 int block, i, nfidx, ofidx; 929 930 KASSERT(mutex_owned(&dh->dh_lock)); 931 932 /* Update the per-block summary info. */ 933 block = offset / dirblksiz; 934 KASSERT(block < dh->dh_nblk && block < dh->dh_dirblks); 935 ofidx = BLKFREE2IDX(dh->dh_blkfree[block]); 936 dh->dh_blkfree[block] = (int)dh->dh_blkfree[block] + (diff / DIRALIGN); 937 nfidx = BLKFREE2IDX(dh->dh_blkfree[block]); 938 939 /* Update the `first free' list if necessary. */ 940 if (ofidx != nfidx) { 941 /* If removing, scan forward for the next block. */ 942 if (dh->dh_firstfree[ofidx] == block) { 943 for (i = block + 1; i < dh->dh_dirblks; i++) 944 if (BLKFREE2IDX(dh->dh_blkfree[i]) == ofidx) 945 break; 946 dh->dh_firstfree[ofidx] = (i < dh->dh_dirblks) ? i : -1; 947 } 948 949 /* Make this the new `first free' if necessary */ 950 if (dh->dh_firstfree[nfidx] > block || 951 dh->dh_firstfree[nfidx] == -1) 952 dh->dh_firstfree[nfidx] = block; 953 } 954 } 955 956 /* 957 * Find the specified name which should have the specified offset. 958 * Returns a slot number, and panics on failure. 959 * 960 * `dh' must be locked on entry and remains so on return. 961 */ 962 static int 963 ulfsdirhash_findslot(struct dirhash *dh, const char *name, int namelen, 964 doff_t offset) 965 { 966 int slot; 967 968 KASSERT(mutex_owned(&dh->dh_lock)); 969 970 /* Find the entry. */ 971 KASSERT(dh->dh_hused < dh->dh_hlen); 972 slot = ulfsdirhash_hash(dh, name, namelen); 973 while (DH_ENTRY(dh, slot) != offset && 974 DH_ENTRY(dh, slot) != DIRHASH_EMPTY) 975 slot = WRAPINCR(slot, dh->dh_hlen); 976 if (DH_ENTRY(dh, slot) != offset) 977 panic("ulfsdirhash_findslot: '%.*s' not found", namelen, name); 978 979 return (slot); 980 } 981 982 /* 983 * Remove the entry corresponding to the specified slot from the hash array. 984 * 985 * `dh' must be locked on entry and remains so on return. 986 */ 987 static void 988 ulfsdirhash_delslot(struct dirhash *dh, int slot) 989 { 990 int i; 991 992 KASSERT(mutex_owned(&dh->dh_lock)); 993 994 /* Mark the entry as deleted. */ 995 DH_ENTRY(dh, slot) = DIRHASH_DEL; 996 997 /* If this is the end of a chain of DIRHASH_DEL slots, remove them. */ 998 for (i = slot; DH_ENTRY(dh, i) == DIRHASH_DEL; ) 999 i = WRAPINCR(i, dh->dh_hlen); 1000 if (DH_ENTRY(dh, i) == DIRHASH_EMPTY) { 1001 i = WRAPDECR(i, dh->dh_hlen); 1002 while (DH_ENTRY(dh, i) == DIRHASH_DEL) { 1003 DH_ENTRY(dh, i) = DIRHASH_EMPTY; 1004 dh->dh_hused--; 1005 i = WRAPDECR(i, dh->dh_hlen); 1006 } 1007 KASSERT(dh->dh_hused >= 0); 1008 } 1009 } 1010 1011 /* 1012 * Given a directory entry and its offset, find the offset of the 1013 * previous entry in the same DIRBLKSIZ-sized block. Returns an 1014 * offset, or -1 if there is no previous entry in the block or some 1015 * other problem occurred. 1016 */ 1017 static doff_t 1018 ulfsdirhash_getprev(struct lfs *fs, LFS_DIRHEADER *dirp, 1019 doff_t offset, int dirblksiz) 1020 { 1021 LFS_DIRHEADER *dp; 1022 char *blkbuf; 1023 doff_t blkoff, prevoff; 1024 int entrypos, i; 1025 unsigned reclen; 1026 1027 blkoff = offset & ~(dirblksiz - 1); /* offset of start of block */ 1028 entrypos = offset & (dirblksiz - 1); /* entry relative to block */ 1029 blkbuf = (char *)dirp - entrypos; 1030 prevoff = blkoff; 1031 1032 /* If `offset' is the start of a block, there is no previous entry. */ 1033 if (entrypos == 0) 1034 return (-1); 1035 1036 /* Scan from the start of the block until we get to the entry. */ 1037 for (i = 0; i < entrypos; i += reclen) { 1038 dp = (LFS_DIRHEADER *)(blkbuf + i); 1039 reclen = lfs_dir_getreclen(fs, dp); 1040 if (reclen == 0 || i + reclen > entrypos) 1041 return (-1); /* Corrupted directory. */ 1042 prevoff = blkoff + i; 1043 } 1044 return (prevoff); 1045 } 1046 1047 /* 1048 * Try to free up `wanted' bytes by stealing memory from existing 1049 * dirhashes. Returns zero with list locked if successful. 1050 */ 1051 static int 1052 ulfsdirhash_recycle(int wanted) 1053 { 1054 struct dirhash *dh; 1055 doff_t **hash; 1056 u_int8_t *blkfree; 1057 int i, mem, narrays; 1058 size_t hashsz, blkfreesz; 1059 1060 DIRHASHLIST_LOCK(); 1061 while (wanted + ulfs_dirhashmem > ulfs_dirhashmaxmem) { 1062 /* Find a dirhash, and lock it. */ 1063 if ((dh = TAILQ_FIRST(&ulfsdirhash_list)) == NULL) { 1064 DIRHASHLIST_UNLOCK(); 1065 return (-1); 1066 } 1067 DIRHASH_LOCK(dh); 1068 KASSERT(dh->dh_hash != NULL); 1069 1070 /* Decrement the score; only recycle if it becomes zero. */ 1071 if (--dh->dh_score > 0) { 1072 DIRHASH_UNLOCK(dh); 1073 DIRHASHLIST_UNLOCK(); 1074 return (-1); 1075 } 1076 1077 /* Remove it from the list and detach its memory. */ 1078 TAILQ_REMOVE(&ulfsdirhash_list, dh, dh_list); 1079 dh->dh_onlist = 0; 1080 hash = dh->dh_hash; 1081 hashsz = dh->dh_hashsz; 1082 dh->dh_hash = NULL; 1083 blkfree = dh->dh_blkfree; 1084 blkfreesz = dh->dh_blkfreesz; 1085 dh->dh_blkfree = NULL; 1086 narrays = dh->dh_narrays; 1087 mem = narrays * sizeof(*dh->dh_hash) + 1088 narrays * DH_NBLKOFF * sizeof(**dh->dh_hash) + 1089 dh->dh_nblk * sizeof(*dh->dh_blkfree); 1090 1091 /* Unlock everything, free the detached memory. */ 1092 DIRHASH_UNLOCK(dh); 1093 DIRHASHLIST_UNLOCK(); 1094 1095 for (i = 0; i < narrays; i++) 1096 DIRHASH_BLKFREE(hash[i]); 1097 kmem_free(hash, hashsz); 1098 kmem_free(blkfree, blkfreesz); 1099 1100 /* Account for the returned memory, and repeat if necessary. */ 1101 DIRHASHLIST_LOCK(); 1102 atomic_add_int(&ulfs_dirhashmem, -mem); 1103 } 1104 /* Success. */ 1105 return (0); 1106 } 1107 1108 static void 1109 ulfsdirhash_sysctl_init(void) 1110 { 1111 const struct sysctlnode *rnode, *cnode; 1112 1113 sysctl_createv(&ulfsdirhash_sysctl_log, 0, NULL, &rnode, 1114 CTLFLAG_PERMANENT, 1115 CTLTYPE_NODE, "ulfs", 1116 SYSCTL_DESCR("ulfs"), 1117 NULL, 0, NULL, 0, 1118 CTL_VFS, CTL_CREATE, CTL_EOL); 1119 1120 sysctl_createv(&ulfsdirhash_sysctl_log, 0, &rnode, &rnode, 1121 CTLFLAG_PERMANENT, 1122 CTLTYPE_NODE, "dirhash", 1123 SYSCTL_DESCR("dirhash"), 1124 NULL, 0, NULL, 0, 1125 CTL_CREATE, CTL_EOL); 1126 1127 sysctl_createv(&ulfsdirhash_sysctl_log, 0, &rnode, &cnode, 1128 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 1129 CTLTYPE_INT, "minblocks", 1130 SYSCTL_DESCR("minimum hashed directory size in blocks"), 1131 NULL, 0, &ulfs_dirhashminblks, 0, 1132 CTL_CREATE, CTL_EOL); 1133 1134 sysctl_createv(&ulfsdirhash_sysctl_log, 0, &rnode, &cnode, 1135 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 1136 CTLTYPE_INT, "maxmem", 1137 SYSCTL_DESCR("maximum dirhash memory usage"), 1138 NULL, 0, &ulfs_dirhashmaxmem, 0, 1139 CTL_CREATE, CTL_EOL); 1140 1141 sysctl_createv(&ulfsdirhash_sysctl_log, 0, &rnode, &cnode, 1142 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 1143 CTLTYPE_INT, "memused", 1144 SYSCTL_DESCR("current dirhash memory usage"), 1145 NULL, 0, &ulfs_dirhashmem, 0, 1146 CTL_CREATE, CTL_EOL); 1147 1148 sysctl_createv(&ulfsdirhash_sysctl_log, 0, &rnode, &cnode, 1149 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 1150 CTLTYPE_INT, "docheck", 1151 SYSCTL_DESCR("enable extra sanity checks"), 1152 NULL, 0, &ulfs_dirhashcheck, 0, 1153 CTL_CREATE, CTL_EOL); 1154 } 1155 1156 void 1157 ulfsdirhash_init(void) 1158 { 1159 1160 mutex_init(&ulfsdirhash_lock, MUTEX_DEFAULT, IPL_NONE); 1161 ulfsdirhashblk_cache = pool_cache_init(DH_NBLKOFF * sizeof(daddr_t), 0, 1162 0, 0, "dirhashblk", NULL, IPL_NONE, NULL, NULL, NULL); 1163 ulfsdirhash_cache = pool_cache_init(sizeof(struct dirhash), 0, 1164 0, 0, "dirhash", NULL, IPL_NONE, NULL, NULL, NULL); 1165 TAILQ_INIT(&ulfsdirhash_list); 1166 ulfsdirhash_sysctl_init(); 1167 } 1168 1169 void 1170 ulfsdirhash_done(void) 1171 { 1172 1173 KASSERT(TAILQ_EMPTY(&ulfsdirhash_list)); 1174 pool_cache_destroy(ulfsdirhashblk_cache); 1175 pool_cache_destroy(ulfsdirhash_cache); 1176 mutex_destroy(&ulfsdirhash_lock); 1177 sysctl_teardown(&ulfsdirhash_sysctl_log); 1178 } 1179