xref: /netbsd-src/sys/ufs/lfs/ulfs_dirhash.c (revision 03d50941f1843a8811048fdc161cdebbce408f93)
1 /*	$NetBSD: ulfs_dirhash.c,v 1.19 2022/08/07 02:33:47 simonb Exp $	*/
2 /*  from NetBSD: ufs_dirhash.c,v 1.37 2014/12/20 00:28:05 christos Exp  */
3 
4 /*
5  * Copyright (c) 2001, 2002 Ian Dowse.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD: src/sys/ufs/ufs/ufs_dirhash.c,v 1.3.2.8 2004/12/08 11:54:13 dwmalone Exp $
29  */
30 
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: ulfs_dirhash.c,v 1.19 2022/08/07 02:33:47 simonb Exp $");
33 
34 /*
35  * This implements a hash-based lookup scheme for ULFS directories.
36  */
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/kmem.h>
42 #include <sys/types.h>
43 #include <sys/hash.h>
44 #include <sys/proc.h>
45 #include <sys/buf.h>
46 #include <sys/vnode.h>
47 #include <sys/mount.h>
48 #include <sys/pool.h>
49 #include <sys/sysctl.h>
50 #include <sys/atomic.h>
51 
52 #include <ufs/lfs/lfs.h>
53 #include <ufs/lfs/lfs_accessors.h>
54 #include <ufs/lfs/ulfs_inode.h>
55 #include <ufs/lfs/ulfs_dirhash.h>
56 #include <ufs/lfs/ulfsmount.h>
57 #include <ufs/lfs/ulfs_bswap.h>
58 #include <ufs/lfs/ulfs_extern.h>
59 
60 /*
61  * Defaults for dirhash cache sizes:
62  *  - use up to 1/64th of system memory.
63  *  - disable dirhash (set the cache size to 0 bytes) if the
64  *    calculated value of hash is less than 2MB.
65  *  - cap maximum size of the dirhash cache at 32MB.
66  */
67 #define	DIRHASH_DEFAULT_DIVIDER	64
68 #define	MIN_DEFAULT_DIRHASH_MEM	(2 * 1024 * 1024)
69 #define	MAX_DEFAULT_DIRHASH_MEM	(32 * 1024 * 1024)
70 
71 
72 #define WRAPINCR(val, limit)	(((val) + 1 == (limit)) ? 0 : ((val) + 1))
73 #define WRAPDECR(val, limit)	(((val) == 0) ? ((limit) - 1) : ((val) - 1))
74 #define OFSFMT(ip)		((ip)->i_lfs->um_maxsymlinklen <= 0)
75 #define BLKFREE2IDX(n)		((n) > DH_NFSTATS ? DH_NFSTATS : (n))
76 
77 static u_int ulfs_dirhashminblks = 5;
78 static u_int ulfs_dirhashmaxmem = 0;
79 static u_int ulfs_dirhashmem;
80 static u_int ulfs_dirhashcheck = 0;
81 
82 static int ulfsdirhash_hash(struct dirhash *dh, const char *name, int namelen);
83 static void ulfsdirhash_adjfree(struct dirhash *dh, doff_t offset, int diff,
84 	   int dirblksiz);
85 static void ulfsdirhash_delslot(struct dirhash *dh, int slot);
86 static int ulfsdirhash_findslot(struct dirhash *dh, const char *name,
87 	   int namelen, doff_t offset);
88 static doff_t ulfsdirhash_getprev(struct lfs *fs, LFS_DIRHEADER *dp,
89 	   doff_t offset, int dirblksiz);
90 static int ulfsdirhash_recycle(int wanted);
91 
92 static pool_cache_t ulfsdirhashblk_cache;
93 static pool_cache_t ulfsdirhash_cache;
94 
95 #define DIRHASHLIST_LOCK()		mutex_enter(&ulfsdirhash_lock)
96 #define DIRHASHLIST_UNLOCK()		mutex_exit(&ulfsdirhash_lock)
97 #define DIRHASH_LOCK(dh)		mutex_enter(&(dh)->dh_lock)
98 #define DIRHASH_UNLOCK(dh)		mutex_exit(&(dh)->dh_lock)
99 #define DIRHASH_BLKALLOC()		\
100     pool_cache_get(ulfsdirhashblk_cache, PR_NOWAIT)
101 #define DIRHASH_BLKFREE(ptr)		\
102     pool_cache_put(ulfsdirhashblk_cache, ptr)
103 
104 /* Dirhash list; recently-used entries are near the tail. */
105 static TAILQ_HEAD(, dirhash) ulfsdirhash_list;
106 
107 /* Protects: ulfsdirhash_list, `dh_list' field, ulfs_dirhashmem. */
108 static kmutex_t ulfsdirhash_lock;
109 
110 static struct sysctllog *ulfsdirhash_sysctl_log;
111 
112 /*
113  * Locking order:
114  *	ulfsdirhash_lock
115  *	dh_lock
116  *
117  * The dh_lock mutex should be acquired either via the inode lock, or via
118  * ulfsdirhash_lock. Only the owner of the inode may free the associated
119  * dirhash, but anything can steal its memory and set dh_hash to NULL.
120  */
121 
122 /*
123  * Attempt to build up a hash table for the directory contents in
124  * inode 'ip'. Returns 0 on success, or -1 of the operation failed.
125  */
126 int
ulfsdirhash_build(struct inode * ip)127 ulfsdirhash_build(struct inode *ip)
128 {
129 	struct lfs *fs = ip->i_lfs;
130 	struct dirhash *dh;
131 	struct buf *bp = NULL;
132 	LFS_DIRHEADER *ep;
133 	struct vnode *vp;
134 	doff_t bmask, pos;
135 	int dirblocks, i, j, memreqd, nblocks, narrays, nslots, slot;
136 	int dirblksiz = ip->i_lfs->um_dirblksiz;
137 
138 	/* Check if we can/should use dirhash. */
139 	if (ip->i_dirhash == NULL) {
140 		if (ulfs_dirhashmaxmem == 0 ||
141 		    ip->i_size < (ulfs_dirhashminblks * dirblksiz) ||
142 		    OFSFMT(ip))
143 			return (-1);
144 	} else {
145 		/* Hash exists, but sysctls could have changed. */
146 		if (ip->i_size < (ulfs_dirhashminblks * dirblksiz) ||
147 		    ulfs_dirhashmem > ulfs_dirhashmaxmem) {
148 			ulfsdirhash_free(ip);
149 			return (-1);
150 		}
151 		/* Check if hash exists and is intact (note: unlocked read). */
152 		if (ip->i_dirhash->dh_hash != NULL)
153 			return (0);
154 		/* Free the old, recycled hash and build a new one. */
155 		ulfsdirhash_free(ip);
156 	}
157 
158 	/* Don't hash removed directories. */
159 	if (ip->i_nlink == 0)
160 		return (-1);
161 
162 	vp = ip->i_vnode;
163 	/* Allocate 50% more entries than this dir size could ever need. */
164 	KASSERT(ip->i_size >= dirblksiz);
165 	nslots = ip->i_size / LFS_DIRECTSIZ(fs, 1);
166 	nslots = (nslots * 3 + 1) / 2;
167 	narrays = howmany(nslots, DH_NBLKOFF);
168 	nslots = narrays * DH_NBLKOFF;
169 	dirblocks = howmany(ip->i_size, dirblksiz);
170 	nblocks = (dirblocks * 3 + 1) / 2;
171 
172 	memreqd = sizeof(*dh) + narrays * sizeof(*dh->dh_hash) +
173 	    narrays * DH_NBLKOFF * sizeof(**dh->dh_hash) +
174 	    nblocks * sizeof(*dh->dh_blkfree);
175 
176 	while (atomic_add_int_nv(&ulfs_dirhashmem, memreqd) >
177 	    ulfs_dirhashmaxmem) {
178 		atomic_add_int(&ulfs_dirhashmem, -memreqd);
179 		if (memreqd > ulfs_dirhashmaxmem / 2)
180 			return (-1);
181 		/* Try to free some space. */
182 		if (ulfsdirhash_recycle(memreqd) != 0)
183 			return (-1);
184 	        else
185 		    	DIRHASHLIST_UNLOCK();
186 	}
187 
188 	/*
189 	 * Use non-blocking mallocs so that we will revert to a linear
190 	 * lookup on failure rather than potentially blocking forever.
191 	 */
192 	dh = pool_cache_get(ulfsdirhash_cache, PR_NOWAIT);
193 	if (dh == NULL) {
194 		atomic_add_int(&ulfs_dirhashmem, -memreqd);
195 		return (-1);
196 	}
197 	memset(dh, 0, sizeof(*dh));
198 	mutex_init(&dh->dh_lock, MUTEX_DEFAULT, IPL_NONE);
199 	DIRHASH_LOCK(dh);
200 	dh->dh_hashsz = narrays * sizeof(dh->dh_hash[0]);
201 	dh->dh_hash = kmem_zalloc(dh->dh_hashsz, KM_NOSLEEP);
202 	dh->dh_blkfreesz = nblocks * sizeof(dh->dh_blkfree[0]);
203 	dh->dh_blkfree = kmem_zalloc(dh->dh_blkfreesz, KM_NOSLEEP);
204 	if (dh->dh_hash == NULL || dh->dh_blkfree == NULL)
205 		goto fail;
206 	for (i = 0; i < narrays; i++) {
207 		if ((dh->dh_hash[i] = DIRHASH_BLKALLOC()) == NULL)
208 			goto fail;
209 		for (j = 0; j < DH_NBLKOFF; j++)
210 			dh->dh_hash[i][j] = DIRHASH_EMPTY;
211 	}
212 
213 	/* Initialise the hash table and block statistics. */
214 	dh->dh_narrays = narrays;
215 	dh->dh_hlen = nslots;
216 	dh->dh_nblk = nblocks;
217 	dh->dh_dirblks = dirblocks;
218 	for (i = 0; i < dirblocks; i++)
219 		dh->dh_blkfree[i] = dirblksiz / DIRALIGN;
220 	for (i = 0; i < DH_NFSTATS; i++)
221 		dh->dh_firstfree[i] = -1;
222 	dh->dh_firstfree[DH_NFSTATS] = 0;
223 	dh->dh_seqopt = 0;
224 	dh->dh_seqoff = 0;
225 	dh->dh_score = DH_SCOREINIT;
226 	ip->i_dirhash = dh;
227 
228 	bmask = VFSTOULFS(vp->v_mount)->um_mountp->mnt_stat.f_iosize - 1;
229 	pos = 0;
230 	while (pos < ip->i_size) {
231 		preempt_point();
232 
233 		/* If necessary, get the next directory block. */
234 		if ((pos & bmask) == 0) {
235 			if (bp != NULL)
236 				brelse(bp, 0);
237 			if (ulfs_blkatoff(vp, (off_t)pos, NULL, &bp, false) != 0)
238 				goto fail;
239 		}
240 
241 		/* Add this entry to the hash. */
242 		ep = (LFS_DIRHEADER *)((char *)bp->b_data + (pos & bmask));
243 		if (lfs_dir_getreclen(fs, ep) == 0 || lfs_dir_getreclen(fs, ep) >
244 		    dirblksiz - (pos & (dirblksiz - 1))) {
245 			/* Corrupted directory. */
246 			brelse(bp, 0);
247 			goto fail;
248 		}
249 		if (lfs_dir_getino(fs, ep) != 0) {
250 			/* Add the entry (simplified ulfsdirhash_add). */
251 			slot = ulfsdirhash_hash(dh, lfs_dir_nameptr(fs, ep),
252 						lfs_dir_getnamlen(fs, ep));
253 			while (DH_ENTRY(dh, slot) != DIRHASH_EMPTY)
254 				slot = WRAPINCR(slot, dh->dh_hlen);
255 			dh->dh_hused++;
256 			DH_ENTRY(dh, slot) = pos;
257 			ulfsdirhash_adjfree(dh, pos, -LFS_DIRSIZ(fs, ep),
258 			    dirblksiz);
259 		}
260 		pos += lfs_dir_getreclen(fs, ep);
261 	}
262 
263 	if (bp != NULL)
264 		brelse(bp, 0);
265 	DIRHASHLIST_LOCK();
266 	TAILQ_INSERT_TAIL(&ulfsdirhash_list, dh, dh_list);
267 	dh->dh_onlist = 1;
268 	DIRHASH_UNLOCK(dh);
269 	DIRHASHLIST_UNLOCK();
270 	return (0);
271 
272 fail:
273 	ip->i_dirhash = NULL;
274 	DIRHASH_UNLOCK(dh);
275 	if (dh->dh_hash != NULL) {
276 		for (i = 0; i < narrays; i++)
277 			if (dh->dh_hash[i] != NULL)
278 				DIRHASH_BLKFREE(dh->dh_hash[i]);
279 		kmem_free(dh->dh_hash, dh->dh_hashsz);
280 	}
281 	if (dh->dh_blkfree != NULL)
282 		kmem_free(dh->dh_blkfree, dh->dh_blkfreesz);
283 	mutex_destroy(&dh->dh_lock);
284 	pool_cache_put(ulfsdirhash_cache, dh);
285 	atomic_add_int(&ulfs_dirhashmem, -memreqd);
286 	return (-1);
287 }
288 
289 /*
290  * Free any hash table associated with inode 'ip'.
291  */
292 void
ulfsdirhash_free(struct inode * ip)293 ulfsdirhash_free(struct inode *ip)
294 {
295 	struct dirhash *dh;
296 	int i, mem;
297 
298 	if ((dh = ip->i_dirhash) == NULL)
299 		return;
300 
301 	ip->i_dirhash = NULL;
302 
303 	if (dh->dh_onlist) {
304 		DIRHASHLIST_LOCK();
305 		if (dh->dh_onlist)
306 			TAILQ_REMOVE(&ulfsdirhash_list, dh, dh_list);
307 		DIRHASHLIST_UNLOCK();
308 	}
309 
310 	/* The dirhash pointed to by 'dh' is exclusively ours now. */
311 	mem = sizeof(*dh);
312 	if (dh->dh_hash != NULL) {
313 		for (i = 0; i < dh->dh_narrays; i++)
314 			DIRHASH_BLKFREE(dh->dh_hash[i]);
315 		kmem_free(dh->dh_hash, dh->dh_hashsz);
316 		kmem_free(dh->dh_blkfree, dh->dh_blkfreesz);
317 		mem += dh->dh_hashsz;
318 		mem += dh->dh_narrays * DH_NBLKOFF * sizeof(**dh->dh_hash);
319 		mem += dh->dh_nblk * sizeof(*dh->dh_blkfree);
320 	}
321 	mutex_destroy(&dh->dh_lock);
322 	pool_cache_put(ulfsdirhash_cache, dh);
323 
324 	atomic_add_int(&ulfs_dirhashmem, -mem);
325 }
326 
327 /*
328  * Find the offset of the specified name within the given inode.
329  * Returns 0 on success, ENOENT if the entry does not exist, or
330  * EJUSTRETURN if the caller should revert to a linear search.
331  *
332  * If successful, the directory offset is stored in *offp, and a
333  * pointer to a struct buf containing the entry is stored in *bpp. If
334  * prevoffp is non-NULL, the offset of the previous entry within
335  * the DIRBLKSIZ-sized block is stored in *prevoffp (if the entry
336  * is the first in a block, the start of the block is used).
337  */
338 int
ulfsdirhash_lookup(struct inode * ip,const char * name,int namelen,doff_t * offp,struct buf ** bpp,doff_t * prevoffp)339 ulfsdirhash_lookup(struct inode *ip, const char *name, int namelen, doff_t *offp,
340     struct buf **bpp, doff_t *prevoffp)
341 {
342 	struct lfs *fs = ip->i_lfs;
343 	struct dirhash *dh, *dh_next;
344 	LFS_DIRHEADER *dp;
345 	struct vnode *vp;
346 	struct buf *bp;
347 	doff_t blkoff, bmask, offset, prevoff;
348 	int i, slot;
349 	int dirblksiz = ip->i_lfs->um_dirblksiz;
350 
351 	if ((dh = ip->i_dirhash) == NULL)
352 		return (EJUSTRETURN);
353 
354 	/*
355 	 * Move this dirhash towards the end of the list if it has a
356 	 * score higher than the next entry, and acquire the dh_lock.
357 	 * Optimise the case where it's already the last by performing
358 	 * an unlocked read of the TAILQ_NEXT pointer.
359 	 *
360 	 * In both cases, end up holding just dh_lock.
361 	 */
362 	if (TAILQ_NEXT(dh, dh_list) != NULL) {
363 		DIRHASHLIST_LOCK();
364 		DIRHASH_LOCK(dh);
365 		/*
366 		 * If the new score will be greater than that of the next
367 		 * entry, then move this entry past it. With both mutexes
368 		 * held, dh_next won't go away, but its dh_score could
369 		 * change; that's not important since it is just a hint.
370 		 */
371 		if (dh->dh_hash != NULL &&
372 		    (dh_next = TAILQ_NEXT(dh, dh_list)) != NULL &&
373 		    dh->dh_score >= dh_next->dh_score) {
374 			KASSERT(dh->dh_onlist);
375 			TAILQ_REMOVE(&ulfsdirhash_list, dh, dh_list);
376 			TAILQ_INSERT_AFTER(&ulfsdirhash_list, dh_next, dh,
377 			    dh_list);
378 		}
379 		DIRHASHLIST_UNLOCK();
380 	} else {
381 		/* Already the last, though that could change as we wait. */
382 		DIRHASH_LOCK(dh);
383 	}
384 	if (dh->dh_hash == NULL) {
385 		DIRHASH_UNLOCK(dh);
386 		ulfsdirhash_free(ip);
387 		return (EJUSTRETURN);
388 	}
389 
390 	/* Update the score. */
391 	if (dh->dh_score < DH_SCOREMAX)
392 		dh->dh_score++;
393 
394 	vp = ip->i_vnode;
395 	bmask = VFSTOULFS(vp->v_mount)->um_mountp->mnt_stat.f_iosize - 1;
396 	blkoff = -1;
397 	bp = NULL;
398 restart:
399 	slot = ulfsdirhash_hash(dh, name, namelen);
400 
401 	if (dh->dh_seqopt) {
402 		/*
403 		 * Sequential access optimisation. dh_seqoff contains the
404 		 * offset of the directory entry immediately following
405 		 * the last entry that was looked up. Check if this offset
406 		 * appears in the hash chain for the name we are looking for.
407 		 */
408 		for (i = slot; (offset = DH_ENTRY(dh, i)) != DIRHASH_EMPTY;
409 		    i = WRAPINCR(i, dh->dh_hlen))
410 			if (offset == dh->dh_seqoff)
411 				break;
412 		if (offset == dh->dh_seqoff) {
413 			/*
414 			 * We found an entry with the expected offset. This
415 			 * is probably the entry we want, but if not, the
416 			 * code below will turn off seqoff and retry.
417 			 */
418 			slot = i;
419 		} else
420 			dh->dh_seqopt = 0;
421 	}
422 
423 	for (; (offset = DH_ENTRY(dh, slot)) != DIRHASH_EMPTY;
424 	    slot = WRAPINCR(slot, dh->dh_hlen)) {
425 		if (offset == DIRHASH_DEL)
426 			continue;
427 
428 		if (offset < 0 || offset >= ip->i_size)
429 			panic("ulfsdirhash_lookup: bad offset in hash array");
430 		if ((offset & ~bmask) != blkoff) {
431 			if (bp != NULL)
432 				brelse(bp, 0);
433 			blkoff = offset & ~bmask;
434 			if (ulfs_blkatoff(vp, (off_t)blkoff,
435 			    NULL, &bp, false) != 0) {
436 				DIRHASH_UNLOCK(dh);
437 				return (EJUSTRETURN);
438 			}
439 		}
440 		dp = (LFS_DIRHEADER *)((char *)bp->b_data + (offset & bmask));
441 		if (lfs_dir_getreclen(fs, dp) == 0 || lfs_dir_getreclen(fs, dp) >
442 		    dirblksiz - (offset & (dirblksiz - 1))) {
443 			/* Corrupted directory. */
444 			DIRHASH_UNLOCK(dh);
445 			brelse(bp, 0);
446 			return (EJUSTRETURN);
447 		}
448 		if (lfs_dir_getnamlen(fs, dp) == namelen &&
449 		    memcmp(lfs_dir_nameptr(fs, dp), name, namelen) == 0) {
450 			/* Found. Get the prev offset if needed. */
451 			if (prevoffp != NULL) {
452 				if (offset & (dirblksiz - 1)) {
453 					prevoff = ulfsdirhash_getprev(fs, dp,
454 					    offset, dirblksiz);
455 					if (prevoff == -1) {
456 						brelse(bp, 0);
457 						return (EJUSTRETURN);
458 					}
459 				} else
460 					prevoff = offset;
461 				*prevoffp = prevoff;
462 			}
463 
464 			/* Check for sequential access, and update offset. */
465 			if (dh->dh_seqopt == 0 && dh->dh_seqoff == offset)
466 				dh->dh_seqopt = 1;
467 			dh->dh_seqoff = offset + LFS_DIRSIZ(fs, dp);
468 			DIRHASH_UNLOCK(dh);
469 
470 			*bpp = bp;
471 			*offp = offset;
472 			return (0);
473 		}
474 
475 		if (dh->dh_hash == NULL) {
476 			DIRHASH_UNLOCK(dh);
477 			if (bp != NULL)
478 				brelse(bp, 0);
479 			ulfsdirhash_free(ip);
480 			return (EJUSTRETURN);
481 		}
482 		/*
483 		 * When the name doesn't match in the seqopt case, go back
484 		 * and search normally.
485 		 */
486 		if (dh->dh_seqopt) {
487 			dh->dh_seqopt = 0;
488 			goto restart;
489 		}
490 	}
491 	DIRHASH_UNLOCK(dh);
492 	if (bp != NULL)
493 		brelse(bp, 0);
494 	return (ENOENT);
495 }
496 
497 /*
498  * Find a directory block with room for 'slotneeded' bytes. Returns
499  * the offset of the directory entry that begins the free space.
500  * This will either be the offset of an existing entry that has free
501  * space at the end, or the offset of an entry with d_ino == 0 at
502  * the start of a DIRBLKSIZ block.
503  *
504  * To use the space, the caller may need to compact existing entries in
505  * the directory. The total number of bytes in all of the entries involved
506  * in the compaction is stored in *slotsize. In other words, all of
507  * the entries that must be compacted are exactly contained in the
508  * region beginning at the returned offset and spanning *slotsize bytes.
509  *
510  * Returns -1 if no space was found, indicating that the directory
511  * must be extended.
512  */
513 doff_t
ulfsdirhash_findfree(struct inode * ip,int slotneeded,int * slotsize)514 ulfsdirhash_findfree(struct inode *ip, int slotneeded, int *slotsize)
515 {
516 	struct lfs *fs = ip->i_lfs;
517 	LFS_DIRHEADER *dp;
518 	struct dirhash *dh;
519 	struct buf *bp;
520 	doff_t pos, slotstart;
521 	int dirblock, error, freebytes, i;
522 	int dirblksiz = ip->i_lfs->um_dirblksiz;
523 
524 	if ((dh = ip->i_dirhash) == NULL)
525 		return (-1);
526 
527 	DIRHASH_LOCK(dh);
528 	if (dh->dh_hash == NULL) {
529 		DIRHASH_UNLOCK(dh);
530 		ulfsdirhash_free(ip);
531 		return (-1);
532 	}
533 
534 	/* Find a directory block with the desired free space. */
535 	dirblock = -1;
536 	for (i = howmany(slotneeded, DIRALIGN); i <= DH_NFSTATS; i++)
537 		if ((dirblock = dh->dh_firstfree[i]) != -1)
538 			break;
539 	if (dirblock == -1) {
540 		DIRHASH_UNLOCK(dh);
541 		return (-1);
542 	}
543 
544 	KASSERT(dirblock < dh->dh_nblk &&
545 	    dh->dh_blkfree[dirblock] >= howmany(slotneeded, DIRALIGN));
546 	pos = dirblock * dirblksiz;
547 	error = ulfs_blkatoff(ip->i_vnode, (off_t)pos, (void *)&dp, &bp, false);
548 	if (error) {
549 		DIRHASH_UNLOCK(dh);
550 		return (-1);
551 	}
552 	/* Find the first entry with free space. */
553 	for (i = 0; i < dirblksiz; ) {
554 		if (lfs_dir_getreclen(fs, dp) == 0) {
555 			DIRHASH_UNLOCK(dh);
556 			brelse(bp, 0);
557 			return (-1);
558 		}
559 		if (lfs_dir_getino(fs, dp) == 0 || lfs_dir_getreclen(fs, dp) > LFS_DIRSIZ(fs, dp))
560 			break;
561 		i += lfs_dir_getreclen(fs, dp);
562 		dp = LFS_NEXTDIR(fs, dp);
563 	}
564 	if (i > dirblksiz) {
565 		DIRHASH_UNLOCK(dh);
566 		brelse(bp, 0);
567 		return (-1);
568 	}
569 	slotstart = pos + i;
570 
571 	/* Find the range of entries needed to get enough space */
572 	freebytes = 0;
573 	while (i < dirblksiz && freebytes < slotneeded) {
574 		freebytes += lfs_dir_getreclen(fs, dp);
575 		if (lfs_dir_getino(fs, dp) != 0)
576 			freebytes -= LFS_DIRSIZ(fs, dp);
577 		if (lfs_dir_getreclen(fs, dp) == 0) {
578 			DIRHASH_UNLOCK(dh);
579 			brelse(bp, 0);
580 			return (-1);
581 		}
582 		i += lfs_dir_getreclen(fs, dp);
583 		dp = LFS_NEXTDIR(fs, dp);
584 	}
585 	if (i > dirblksiz) {
586 		DIRHASH_UNLOCK(dh);
587 		brelse(bp, 0);
588 		return (-1);
589 	}
590 	if (freebytes < slotneeded)
591 		panic("ulfsdirhash_findfree: free mismatch");
592 	DIRHASH_UNLOCK(dh);
593 	brelse(bp, 0);
594 	*slotsize = pos + i - slotstart;
595 	return (slotstart);
596 }
597 
598 /*
599  * Return the start of the unused space at the end of a directory, or
600  * -1 if there are no trailing unused blocks.
601  */
602 doff_t
ulfsdirhash_enduseful(struct inode * ip)603 ulfsdirhash_enduseful(struct inode *ip)
604 {
605 	struct dirhash *dh;
606 	int i;
607 	int dirblksiz = ip->i_lfs->um_dirblksiz;
608 
609 	if ((dh = ip->i_dirhash) == NULL)
610 		return (-1);
611 
612 	DIRHASH_LOCK(dh);
613 	if (dh->dh_hash == NULL) {
614 		DIRHASH_UNLOCK(dh);
615 		ulfsdirhash_free(ip);
616 		return (-1);
617 	}
618 
619 	if (dh->dh_blkfree[dh->dh_dirblks - 1] != dirblksiz / DIRALIGN) {
620 		DIRHASH_UNLOCK(dh);
621 		return (-1);
622 	}
623 
624 	for (i = dh->dh_dirblks - 1; i >= 0; i--)
625 		if (dh->dh_blkfree[i] != dirblksiz / DIRALIGN)
626 			break;
627 	DIRHASH_UNLOCK(dh);
628 	return ((doff_t)(i + 1) * dirblksiz);
629 }
630 
631 /*
632  * Insert information into the hash about a new directory entry. dirp
633  * points to a struct lfs_direct containing the entry, and offset specifies
634  * the offset of this entry.
635  */
636 void
ulfsdirhash_add(struct inode * ip,LFS_DIRHEADER * dirp,doff_t offset)637 ulfsdirhash_add(struct inode *ip, LFS_DIRHEADER *dirp, doff_t offset)
638 {
639 	struct lfs *fs = ip->i_lfs;
640 	struct dirhash *dh;
641 	int slot;
642 	int dirblksiz = ip->i_lfs->um_dirblksiz;
643 
644 	if ((dh = ip->i_dirhash) == NULL)
645 		return;
646 
647 	DIRHASH_LOCK(dh);
648 	if (dh->dh_hash == NULL) {
649 		DIRHASH_UNLOCK(dh);
650 		ulfsdirhash_free(ip);
651 		return;
652 	}
653 
654 	KASSERT(offset < dh->dh_dirblks * dirblksiz);
655 	/*
656 	 * Normal hash usage is < 66%. If the usage gets too high then
657 	 * remove the hash entirely and let it be rebuilt later.
658 	 */
659 	if (dh->dh_hused >= (dh->dh_hlen * 3) / 4) {
660 		DIRHASH_UNLOCK(dh);
661 		ulfsdirhash_free(ip);
662 		return;
663 	}
664 
665 	/* Find a free hash slot (empty or deleted), and add the entry. */
666 	slot = ulfsdirhash_hash(dh, lfs_dir_nameptr(fs, dirp),
667 				lfs_dir_getnamlen(fs, dirp));
668 	while (DH_ENTRY(dh, slot) >= 0)
669 		slot = WRAPINCR(slot, dh->dh_hlen);
670 	if (DH_ENTRY(dh, slot) == DIRHASH_EMPTY)
671 		dh->dh_hused++;
672 	DH_ENTRY(dh, slot) = offset;
673 
674 	/* Update the per-block summary info. */
675 	ulfsdirhash_adjfree(dh, offset, -LFS_DIRSIZ(fs, dirp), dirblksiz);
676 	DIRHASH_UNLOCK(dh);
677 }
678 
679 /*
680  * Remove the specified directory entry from the hash. The entry to remove
681  * is defined by the name in `dirp', which must exist at the specified
682  * `offset' within the directory.
683  */
684 void
ulfsdirhash_remove(struct inode * ip,LFS_DIRHEADER * dirp,doff_t offset)685 ulfsdirhash_remove(struct inode *ip, LFS_DIRHEADER *dirp, doff_t offset)
686 {
687 	struct lfs *fs = ip->i_lfs;
688 	struct dirhash *dh;
689 	int slot;
690 	int dirblksiz = ip->i_lfs->um_dirblksiz;
691 
692 	if ((dh = ip->i_dirhash) == NULL)
693 		return;
694 
695 	DIRHASH_LOCK(dh);
696 	if (dh->dh_hash == NULL) {
697 		DIRHASH_UNLOCK(dh);
698 		ulfsdirhash_free(ip);
699 		return;
700 	}
701 
702 	KASSERT(offset < dh->dh_dirblks * dirblksiz);
703 	/* Find the entry */
704 	slot = ulfsdirhash_findslot(dh, lfs_dir_nameptr(fs, dirp),
705 				    lfs_dir_getnamlen(fs, dirp), offset);
706 
707 	/* Remove the hash entry. */
708 	ulfsdirhash_delslot(dh, slot);
709 
710 	/* Update the per-block summary info. */
711 	ulfsdirhash_adjfree(dh, offset, LFS_DIRSIZ(fs, dirp), dirblksiz);
712 	DIRHASH_UNLOCK(dh);
713 }
714 
715 /*
716  * Change the offset associated with a directory entry in the hash. Used
717  * when compacting directory blocks.
718  */
719 void
ulfsdirhash_move(struct inode * ip,LFS_DIRHEADER * dirp,doff_t oldoff,doff_t newoff)720 ulfsdirhash_move(struct inode *ip, LFS_DIRHEADER *dirp, doff_t oldoff,
721     doff_t newoff)
722 {
723 	struct lfs *fs = ip->i_lfs;
724 	struct dirhash *dh;
725 	int slot;
726 
727 	if ((dh = ip->i_dirhash) == NULL)
728 		return;
729 	DIRHASH_LOCK(dh);
730 	if (dh->dh_hash == NULL) {
731 		DIRHASH_UNLOCK(dh);
732 		ulfsdirhash_free(ip);
733 		return;
734 	}
735 
736 	KASSERT(oldoff < dh->dh_dirblks * ip->i_lfs->um_dirblksiz &&
737 	    newoff < dh->dh_dirblks * ip->i_lfs->um_dirblksiz);
738 	/* Find the entry, and update the offset. */
739 	slot = ulfsdirhash_findslot(dh, lfs_dir_nameptr(fs, dirp),
740 				    lfs_dir_getnamlen(fs, dirp), oldoff);
741 	DH_ENTRY(dh, slot) = newoff;
742 	DIRHASH_UNLOCK(dh);
743 }
744 
745 /*
746  * Inform dirhash that the directory has grown by one block that
747  * begins at offset (i.e. the new length is offset + DIRBLKSIZ).
748  */
749 void
ulfsdirhash_newblk(struct inode * ip,doff_t offset)750 ulfsdirhash_newblk(struct inode *ip, doff_t offset)
751 {
752 	struct dirhash *dh;
753 	int block;
754 	int dirblksiz = ip->i_lfs->um_dirblksiz;
755 
756 	if ((dh = ip->i_dirhash) == NULL)
757 		return;
758 	DIRHASH_LOCK(dh);
759 	if (dh->dh_hash == NULL) {
760 		DIRHASH_UNLOCK(dh);
761 		ulfsdirhash_free(ip);
762 		return;
763 	}
764 
765 	KASSERT(offset == dh->dh_dirblks * dirblksiz);
766 	block = offset / dirblksiz;
767 	if (block >= dh->dh_nblk) {
768 		/* Out of space; must rebuild. */
769 		DIRHASH_UNLOCK(dh);
770 		ulfsdirhash_free(ip);
771 		return;
772 	}
773 	dh->dh_dirblks = block + 1;
774 
775 	/* Account for the new free block. */
776 	dh->dh_blkfree[block] = dirblksiz / DIRALIGN;
777 	if (dh->dh_firstfree[DH_NFSTATS] == -1)
778 		dh->dh_firstfree[DH_NFSTATS] = block;
779 	DIRHASH_UNLOCK(dh);
780 }
781 
782 /*
783  * Inform dirhash that the directory is being truncated.
784  */
785 void
ulfsdirhash_dirtrunc(struct inode * ip,doff_t offset)786 ulfsdirhash_dirtrunc(struct inode *ip, doff_t offset)
787 {
788 	struct dirhash *dh;
789 	int block, i;
790 	int dirblksiz = ip->i_lfs->um_dirblksiz;
791 
792 	if ((dh = ip->i_dirhash) == NULL)
793 		return;
794 
795 	DIRHASH_LOCK(dh);
796 	if (dh->dh_hash == NULL) {
797 		DIRHASH_UNLOCK(dh);
798 		ulfsdirhash_free(ip);
799 		return;
800 	}
801 
802 	KASSERT(offset <= dh->dh_dirblks * dirblksiz);
803 	block = howmany(offset, dirblksiz);
804 	/*
805 	 * If the directory shrinks to less than 1/8 of dh_nblk blocks
806 	 * (about 20% of its original size due to the 50% extra added in
807 	 * ulfsdirhash_build) then free it, and let the caller rebuild
808 	 * if necessary.
809 	 */
810 	if (block < dh->dh_nblk / 8 && dh->dh_narrays > 1) {
811 		DIRHASH_UNLOCK(dh);
812 		ulfsdirhash_free(ip);
813 		return;
814 	}
815 
816 	/*
817 	 * Remove any `first free' information pertaining to the
818 	 * truncated blocks. All blocks we're removing should be
819 	 * completely unused.
820 	 */
821 	if (dh->dh_firstfree[DH_NFSTATS] >= block)
822 		dh->dh_firstfree[DH_NFSTATS] = -1;
823 	for (i = block; i < dh->dh_dirblks; i++)
824 		if (dh->dh_blkfree[i] != dirblksiz / DIRALIGN)
825 			panic("ulfsdirhash_dirtrunc: blocks in use");
826 	for (i = 0; i < DH_NFSTATS; i++)
827 		if (dh->dh_firstfree[i] >= block)
828 			panic("ulfsdirhash_dirtrunc: first free corrupt");
829 	dh->dh_dirblks = block;
830 	DIRHASH_UNLOCK(dh);
831 }
832 
833 /*
834  * Debugging function to check that the dirhash information about
835  * a directory block matches its actual contents. Panics if a mismatch
836  * is detected.
837  *
838  * On entry, `sbuf' should point to the start of an in-core
839  * DIRBLKSIZ-sized directory block, and `offset' should contain the
840  * offset from the start of the directory of that block.
841  */
842 void
ulfsdirhash_checkblock(struct inode * ip,char * sbuf,doff_t offset)843 ulfsdirhash_checkblock(struct inode *ip, char *sbuf, doff_t offset)
844 {
845 	struct lfs *fs = ip->i_lfs;
846 	struct dirhash *dh;
847 	LFS_DIRHEADER *dp;
848 	int block, ffslot, i, nfree;
849 	int dirblksiz = ip->i_lfs->um_dirblksiz;
850 
851 	if (!ulfs_dirhashcheck)
852 		return;
853 	if ((dh = ip->i_dirhash) == NULL)
854 		return;
855 
856 	DIRHASH_LOCK(dh);
857 	if (dh->dh_hash == NULL) {
858 		DIRHASH_UNLOCK(dh);
859 		ulfsdirhash_free(ip);
860 		return;
861 	}
862 
863 	block = offset / dirblksiz;
864 	if ((offset & (dirblksiz - 1)) != 0 || block >= dh->dh_dirblks)
865 		panic("ulfsdirhash_checkblock: bad offset");
866 
867 	nfree = 0;
868 	for (i = 0; i < dirblksiz; i += lfs_dir_getreclen(fs, dp)) {
869 		dp = (LFS_DIRHEADER *)(sbuf + i);
870 		if (lfs_dir_getreclen(fs, dp) == 0 || i + lfs_dir_getreclen(fs, dp) > dirblksiz)
871 			panic("ulfsdirhash_checkblock: bad dir");
872 
873 		if (lfs_dir_getino(fs, dp) == 0) {
874 #if 0
875 			/*
876 			 * XXX entries with d_ino == 0 should only occur
877 			 * at the start of a DIRBLKSIZ block. However the
878 			 * ulfs code is tolerant of such entries at other
879 			 * offsets, and fsck does not fix them.
880 			 */
881 			if (i != 0)
882 				panic("ulfsdirhash_checkblock: bad dir inode");
883 #endif
884 			nfree += lfs_dir_getreclen(fs, dp);
885 			continue;
886 		}
887 
888 		/* Check that the entry	exists (will panic if it doesn't). */
889 		ulfsdirhash_findslot(dh, lfs_dir_nameptr(fs, dp),
890 				     lfs_dir_getnamlen(fs, dp),
891 				     offset + i);
892 
893 		nfree += lfs_dir_getreclen(fs, dp) - LFS_DIRSIZ(fs, dp);
894 	}
895 	if (i != dirblksiz)
896 		panic("ulfsdirhash_checkblock: bad dir end");
897 
898 	if (dh->dh_blkfree[block] * DIRALIGN != nfree)
899 		panic("ulfsdirhash_checkblock: bad free count");
900 
901 	ffslot = BLKFREE2IDX(nfree / DIRALIGN);
902 	for (i = 0; i <= DH_NFSTATS; i++)
903 		if (dh->dh_firstfree[i] == block && i != ffslot)
904 			panic("ulfsdirhash_checkblock: bad first-free");
905 	if (dh->dh_firstfree[ffslot] == -1)
906 		panic("ulfsdirhash_checkblock: missing first-free entry");
907 	DIRHASH_UNLOCK(dh);
908 }
909 
910 /*
911  * Hash the specified filename into a dirhash slot.
912  */
913 static int
ulfsdirhash_hash(struct dirhash * dh,const char * name,int namelen)914 ulfsdirhash_hash(struct dirhash *dh, const char *name, int namelen)
915 {
916 	u_int32_t hash;
917 
918 	/*
919 	 * We hash the name and then some other bit of data that is
920 	 * invariant over the dirhash's lifetime. Otherwise names
921 	 * differing only in the last byte are placed close to one
922 	 * another in the table, which is bad for linear probing.
923 	 */
924 	hash = hash32_buf(name, namelen, HASH32_BUF_INIT);
925 	hash = hash32_buf(&dh, sizeof(dh), hash);
926 	return (hash % dh->dh_hlen);
927 }
928 
929 /*
930  * Adjust the number of free bytes in the block containing `offset'
931  * by the value specified by `diff'.
932  *
933  * The caller must ensure we have exclusive access to `dh'; normally
934  * that means that dh_lock should be held, but this is also called
935  * from ulfsdirhash_build() where exclusive access can be assumed.
936  */
937 static void
ulfsdirhash_adjfree(struct dirhash * dh,doff_t offset,int diff,int dirblksiz)938 ulfsdirhash_adjfree(struct dirhash *dh, doff_t offset, int diff, int dirblksiz)
939 {
940 	int block, i, nfidx, ofidx;
941 
942 	KASSERT(mutex_owned(&dh->dh_lock));
943 
944 	/* Update the per-block summary info. */
945 	block = offset / dirblksiz;
946 	KASSERT(block < dh->dh_nblk && block < dh->dh_dirblks);
947 	ofidx = BLKFREE2IDX(dh->dh_blkfree[block]);
948 	dh->dh_blkfree[block] = (int)dh->dh_blkfree[block] + (diff / DIRALIGN);
949 	nfidx = BLKFREE2IDX(dh->dh_blkfree[block]);
950 
951 	/* Update the `first free' list if necessary. */
952 	if (ofidx != nfidx) {
953 		/* If removing, scan forward for the next block. */
954 		if (dh->dh_firstfree[ofidx] == block) {
955 			for (i = block + 1; i < dh->dh_dirblks; i++)
956 				if (BLKFREE2IDX(dh->dh_blkfree[i]) == ofidx)
957 					break;
958 			dh->dh_firstfree[ofidx] = (i < dh->dh_dirblks) ? i : -1;
959 		}
960 
961 		/* Make this the new `first free' if necessary */
962 		if (dh->dh_firstfree[nfidx] > block ||
963 		    dh->dh_firstfree[nfidx] == -1)
964 			dh->dh_firstfree[nfidx] = block;
965 	}
966 }
967 
968 /*
969  * Find the specified name which should have the specified offset.
970  * Returns a slot number, and panics on failure.
971  *
972  * `dh' must be locked on entry and remains so on return.
973  */
974 static int
ulfsdirhash_findslot(struct dirhash * dh,const char * name,int namelen,doff_t offset)975 ulfsdirhash_findslot(struct dirhash *dh, const char *name, int namelen,
976     doff_t offset)
977 {
978 	int slot;
979 
980 	KASSERT(mutex_owned(&dh->dh_lock));
981 
982 	/* Find the entry. */
983 	KASSERT(dh->dh_hused < dh->dh_hlen);
984 	slot = ulfsdirhash_hash(dh, name, namelen);
985 	while (DH_ENTRY(dh, slot) != offset &&
986 	    DH_ENTRY(dh, slot) != DIRHASH_EMPTY)
987 		slot = WRAPINCR(slot, dh->dh_hlen);
988 	if (DH_ENTRY(dh, slot) != offset)
989 		panic("ulfsdirhash_findslot: '%.*s' not found", namelen, name);
990 
991 	return (slot);
992 }
993 
994 /*
995  * Remove the entry corresponding to the specified slot from the hash array.
996  *
997  * `dh' must be locked on entry and remains so on return.
998  */
999 static void
ulfsdirhash_delslot(struct dirhash * dh,int slot)1000 ulfsdirhash_delslot(struct dirhash *dh, int slot)
1001 {
1002 	int i;
1003 
1004 	KASSERT(mutex_owned(&dh->dh_lock));
1005 
1006 	/* Mark the entry as deleted. */
1007 	DH_ENTRY(dh, slot) = DIRHASH_DEL;
1008 
1009 	/* If this is the end of a chain of DIRHASH_DEL slots, remove them. */
1010 	for (i = slot; DH_ENTRY(dh, i) == DIRHASH_DEL; )
1011 		i = WRAPINCR(i, dh->dh_hlen);
1012 	if (DH_ENTRY(dh, i) == DIRHASH_EMPTY) {
1013 		i = WRAPDECR(i, dh->dh_hlen);
1014 		while (DH_ENTRY(dh, i) == DIRHASH_DEL) {
1015 			DH_ENTRY(dh, i) = DIRHASH_EMPTY;
1016 			dh->dh_hused--;
1017 			i = WRAPDECR(i, dh->dh_hlen);
1018 		}
1019 		KASSERT(dh->dh_hused >= 0);
1020 	}
1021 }
1022 
1023 /*
1024  * Given a directory entry and its offset, find the offset of the
1025  * previous entry in the same DIRBLKSIZ-sized block. Returns an
1026  * offset, or -1 if there is no previous entry in the block or some
1027  * other problem occurred.
1028  */
1029 static doff_t
ulfsdirhash_getprev(struct lfs * fs,LFS_DIRHEADER * dirp,doff_t offset,int dirblksiz)1030 ulfsdirhash_getprev(struct lfs *fs, LFS_DIRHEADER *dirp,
1031 		doff_t offset, int dirblksiz)
1032 {
1033 	LFS_DIRHEADER *dp;
1034 	char *blkbuf;
1035 	doff_t blkoff, prevoff;
1036 	int entrypos, i;
1037 	unsigned reclen;
1038 
1039 	blkoff = offset & ~(dirblksiz - 1);	/* offset of start of block */
1040 	entrypos = offset & (dirblksiz - 1);	/* entry relative to block */
1041 	blkbuf = (char *)dirp - entrypos;
1042 	prevoff = blkoff;
1043 
1044 	/* If `offset' is the start of a block, there is no previous entry. */
1045 	if (entrypos == 0)
1046 		return (-1);
1047 
1048 	/* Scan from the start of the block until we get to the entry. */
1049 	for (i = 0; i < entrypos; i += reclen) {
1050 		dp = (LFS_DIRHEADER *)(blkbuf + i);
1051 		reclen = lfs_dir_getreclen(fs, dp);
1052 		if (reclen == 0 || i + reclen > entrypos)
1053 			return (-1);	/* Corrupted directory. */
1054 		prevoff = blkoff + i;
1055 	}
1056 	return (prevoff);
1057 }
1058 
1059 /*
1060  * Try to free up `wanted' bytes by stealing memory from existing
1061  * dirhashes. Returns zero with list locked if successful.
1062  */
1063 static int
ulfsdirhash_recycle(int wanted)1064 ulfsdirhash_recycle(int wanted)
1065 {
1066 	struct dirhash *dh;
1067 	doff_t **hash;
1068 	u_int8_t *blkfree;
1069 	int i, mem, narrays;
1070 	size_t hashsz, blkfreesz;
1071 
1072 	DIRHASHLIST_LOCK();
1073 	while (wanted + ulfs_dirhashmem > ulfs_dirhashmaxmem) {
1074 		/* Find a dirhash, and lock it. */
1075 		if ((dh = TAILQ_FIRST(&ulfsdirhash_list)) == NULL) {
1076 			DIRHASHLIST_UNLOCK();
1077 			return (-1);
1078 		}
1079 		DIRHASH_LOCK(dh);
1080 		KASSERT(dh->dh_hash != NULL);
1081 
1082 		/* Decrement the score; only recycle if it becomes zero. */
1083 		if (--dh->dh_score > 0) {
1084 			DIRHASH_UNLOCK(dh);
1085 			DIRHASHLIST_UNLOCK();
1086 			return (-1);
1087 		}
1088 
1089 		/* Remove it from the list and detach its memory. */
1090 		TAILQ_REMOVE(&ulfsdirhash_list, dh, dh_list);
1091 		dh->dh_onlist = 0;
1092 		hash = dh->dh_hash;
1093 		hashsz = dh->dh_hashsz;
1094 		dh->dh_hash = NULL;
1095 		blkfree = dh->dh_blkfree;
1096 		blkfreesz = dh->dh_blkfreesz;
1097 		dh->dh_blkfree = NULL;
1098 		narrays = dh->dh_narrays;
1099 		mem = narrays * sizeof(*dh->dh_hash) +
1100 		    narrays * DH_NBLKOFF * sizeof(**dh->dh_hash) +
1101 		    dh->dh_nblk * sizeof(*dh->dh_blkfree);
1102 
1103 		/* Unlock everything, free the detached memory. */
1104 		DIRHASH_UNLOCK(dh);
1105 		DIRHASHLIST_UNLOCK();
1106 
1107 		for (i = 0; i < narrays; i++)
1108 			DIRHASH_BLKFREE(hash[i]);
1109 		kmem_free(hash, hashsz);
1110 		kmem_free(blkfree, blkfreesz);
1111 
1112 		/* Account for the returned memory, and repeat if necessary. */
1113 		DIRHASHLIST_LOCK();
1114 		atomic_add_int(&ulfs_dirhashmem, -mem);
1115 	}
1116 	/* Success. */
1117 	return (0);
1118 }
1119 
1120 static void
ulfsdirhash_sysctl_init(void)1121 ulfsdirhash_sysctl_init(void)
1122 {
1123 	const struct sysctlnode *rnode, *cnode;
1124 
1125 	sysctl_createv(&ulfsdirhash_sysctl_log, 0, NULL, &rnode,
1126 		       CTLFLAG_PERMANENT,
1127 		       CTLTYPE_NODE, "ulfs",
1128 		       SYSCTL_DESCR("ulfs"),
1129 		       NULL, 0, NULL, 0,
1130 		       CTL_VFS, CTL_CREATE, CTL_EOL);
1131 
1132 	sysctl_createv(&ulfsdirhash_sysctl_log, 0, &rnode, &rnode,
1133 		       CTLFLAG_PERMANENT,
1134 		       CTLTYPE_NODE, "dirhash",
1135 		       SYSCTL_DESCR("dirhash"),
1136 		       NULL, 0, NULL, 0,
1137 		       CTL_CREATE, CTL_EOL);
1138 
1139 	sysctl_createv(&ulfsdirhash_sysctl_log, 0, &rnode, &cnode,
1140 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1141 		       CTLTYPE_INT, "minblocks",
1142 		       SYSCTL_DESCR("minimum hashed directory size in blocks"),
1143 		       NULL, 0, &ulfs_dirhashminblks, 0,
1144 		       CTL_CREATE, CTL_EOL);
1145 
1146 	sysctl_createv(&ulfsdirhash_sysctl_log, 0, &rnode, &cnode,
1147 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1148 		       CTLTYPE_INT, "maxmem",
1149 		       SYSCTL_DESCR("maximum dirhash memory usage"),
1150 		       NULL, 0, &ulfs_dirhashmaxmem, 0,
1151 		       CTL_CREATE, CTL_EOL);
1152 
1153 	sysctl_createv(&ulfsdirhash_sysctl_log, 0, &rnode, &cnode,
1154 		       CTLFLAG_PERMANENT|CTLFLAG_READONLY,
1155 		       CTLTYPE_INT, "memused",
1156 		       SYSCTL_DESCR("current dirhash memory usage"),
1157 		       NULL, 0, &ulfs_dirhashmem, 0,
1158 		       CTL_CREATE, CTL_EOL);
1159 
1160 	sysctl_createv(&ulfsdirhash_sysctl_log, 0, &rnode, &cnode,
1161 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1162 		       CTLTYPE_INT, "docheck",
1163 		       SYSCTL_DESCR("enable extra sanity checks"),
1164 		       NULL, 0, &ulfs_dirhashcheck, 0,
1165 		       CTL_CREATE, CTL_EOL);
1166 }
1167 
1168 void
ulfsdirhash_init(void)1169 ulfsdirhash_init(void)
1170 {
1171 
1172 	/*
1173 	 * Only initialise defaults for the dirhash size if it hasn't
1174 	 * hasn't been set.
1175 	 */
1176 	if (ulfs_dirhashmaxmem == 0) {
1177 		/* Use 64-bit math to avoid overflows. */
1178 		uint64_t physmem_bytes, hash_bytes;
1179 
1180 		physmem_bytes = ctob((uint64_t)physmem);
1181 		hash_bytes = physmem_bytes / DIRHASH_DEFAULT_DIVIDER;
1182 
1183 		if (hash_bytes < MIN_DEFAULT_DIRHASH_MEM)
1184 			hash_bytes = 0;
1185 
1186 		if (hash_bytes > MAX_DEFAULT_DIRHASH_MEM)
1187 			hash_bytes = MAX_DEFAULT_DIRHASH_MEM;
1188 
1189 		ulfs_dirhashmaxmem = (u_int)hash_bytes;
1190 	}
1191 
1192 	mutex_init(&ulfsdirhash_lock, MUTEX_DEFAULT, IPL_NONE);
1193 	ulfsdirhashblk_cache = pool_cache_init(DH_NBLKOFF * sizeof(daddr_t), 0,
1194 	    0, 0, "dirhashblk", NULL, IPL_NONE, NULL, NULL, NULL);
1195 	ulfsdirhash_cache = pool_cache_init(sizeof(struct dirhash), 0,
1196 	    0, 0, "dirhash", NULL, IPL_NONE, NULL, NULL, NULL);
1197 	TAILQ_INIT(&ulfsdirhash_list);
1198 	ulfsdirhash_sysctl_init();
1199 }
1200 
1201 void
ulfsdirhash_done(void)1202 ulfsdirhash_done(void)
1203 {
1204 
1205 	KASSERT(TAILQ_EMPTY(&ulfsdirhash_list));
1206 	pool_cache_destroy(ulfsdirhashblk_cache);
1207 	pool_cache_destroy(ulfsdirhash_cache);
1208 	mutex_destroy(&ulfsdirhash_lock);
1209 	sysctl_teardown(&ulfsdirhash_sysctl_log);
1210 }
1211