ulfs_dirhash.c revision 1.18 1 /* $NetBSD: ulfs_dirhash.c,v 1.18 2020/03/14 18:08:40 ad Exp $ */
2 /* from NetBSD: ufs_dirhash.c,v 1.37 2014/12/20 00:28:05 christos Exp */
3
4 /*
5 * Copyright (c) 2001, 2002 Ian Dowse. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD: src/sys/ufs/ufs/ufs_dirhash.c,v 1.3.2.8 2004/12/08 11:54:13 dwmalone Exp $
29 */
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: ulfs_dirhash.c,v 1.18 2020/03/14 18:08:40 ad Exp $");
33
34 /*
35 * This implements a hash-based lookup scheme for ULFS directories.
36 */
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/kmem.h>
42 #include <sys/types.h>
43 #include <sys/hash.h>
44 #include <sys/proc.h>
45 #include <sys/buf.h>
46 #include <sys/vnode.h>
47 #include <sys/mount.h>
48 #include <sys/pool.h>
49 #include <sys/sysctl.h>
50 #include <sys/atomic.h>
51
52 #include <ufs/lfs/lfs.h>
53 #include <ufs/lfs/lfs_accessors.h>
54 #include <ufs/lfs/ulfs_inode.h>
55 #include <ufs/lfs/ulfs_dirhash.h>
56 #include <ufs/lfs/ulfsmount.h>
57 #include <ufs/lfs/ulfs_bswap.h>
58 #include <ufs/lfs/ulfs_extern.h>
59
60 #define WRAPINCR(val, limit) (((val) + 1 == (limit)) ? 0 : ((val) + 1))
61 #define WRAPDECR(val, limit) (((val) == 0) ? ((limit) - 1) : ((val) - 1))
62 #define OFSFMT(ip) ((ip)->i_lfs->um_maxsymlinklen <= 0)
63 #define BLKFREE2IDX(n) ((n) > DH_NFSTATS ? DH_NFSTATS : (n))
64
65 static u_int ulfs_dirhashminblks = 5;
66 static u_int ulfs_dirhashmaxmem = 2 * 1024 * 1024;
67 static u_int ulfs_dirhashmem;
68 static u_int ulfs_dirhashcheck = 0;
69
70 static int ulfsdirhash_hash(struct dirhash *dh, const char *name, int namelen);
71 static void ulfsdirhash_adjfree(struct dirhash *dh, doff_t offset, int diff,
72 int dirblksiz);
73 static void ulfsdirhash_delslot(struct dirhash *dh, int slot);
74 static int ulfsdirhash_findslot(struct dirhash *dh, const char *name,
75 int namelen, doff_t offset);
76 static doff_t ulfsdirhash_getprev(struct lfs *fs, LFS_DIRHEADER *dp,
77 doff_t offset, int dirblksiz);
78 static int ulfsdirhash_recycle(int wanted);
79
80 static pool_cache_t ulfsdirhashblk_cache;
81 static pool_cache_t ulfsdirhash_cache;
82
83 #define DIRHASHLIST_LOCK() mutex_enter(&ulfsdirhash_lock)
84 #define DIRHASHLIST_UNLOCK() mutex_exit(&ulfsdirhash_lock)
85 #define DIRHASH_LOCK(dh) mutex_enter(&(dh)->dh_lock)
86 #define DIRHASH_UNLOCK(dh) mutex_exit(&(dh)->dh_lock)
87 #define DIRHASH_BLKALLOC() \
88 pool_cache_get(ulfsdirhashblk_cache, PR_NOWAIT)
89 #define DIRHASH_BLKFREE(ptr) \
90 pool_cache_put(ulfsdirhashblk_cache, ptr)
91
92 /* Dirhash list; recently-used entries are near the tail. */
93 static TAILQ_HEAD(, dirhash) ulfsdirhash_list;
94
95 /* Protects: ulfsdirhash_list, `dh_list' field, ulfs_dirhashmem. */
96 static kmutex_t ulfsdirhash_lock;
97
98 static struct sysctllog *ulfsdirhash_sysctl_log;
99
100 /*
101 * Locking order:
102 * ulfsdirhash_lock
103 * dh_lock
104 *
105 * The dh_lock mutex should be acquired either via the inode lock, or via
106 * ulfsdirhash_lock. Only the owner of the inode may free the associated
107 * dirhash, but anything can steal its memory and set dh_hash to NULL.
108 */
109
110 /*
111 * Attempt to build up a hash table for the directory contents in
112 * inode 'ip'. Returns 0 on success, or -1 of the operation failed.
113 */
114 int
115 ulfsdirhash_build(struct inode *ip)
116 {
117 struct lfs *fs = ip->i_lfs;
118 struct dirhash *dh;
119 struct buf *bp = NULL;
120 LFS_DIRHEADER *ep;
121 struct vnode *vp;
122 doff_t bmask, pos;
123 int dirblocks, i, j, memreqd, nblocks, narrays, nslots, slot;
124 int dirblksiz = ip->i_lfs->um_dirblksiz;
125
126 /* Check if we can/should use dirhash. */
127 if (ip->i_dirhash == NULL) {
128 if (ip->i_size < (ulfs_dirhashminblks * dirblksiz) || OFSFMT(ip))
129 return (-1);
130 } else {
131 /* Hash exists, but sysctls could have changed. */
132 if (ip->i_size < (ulfs_dirhashminblks * dirblksiz) ||
133 ulfs_dirhashmem > ulfs_dirhashmaxmem) {
134 ulfsdirhash_free(ip);
135 return (-1);
136 }
137 /* Check if hash exists and is intact (note: unlocked read). */
138 if (ip->i_dirhash->dh_hash != NULL)
139 return (0);
140 /* Free the old, recycled hash and build a new one. */
141 ulfsdirhash_free(ip);
142 }
143
144 /* Don't hash removed directories. */
145 if (ip->i_nlink == 0)
146 return (-1);
147
148 vp = ip->i_vnode;
149 /* Allocate 50% more entries than this dir size could ever need. */
150 KASSERT(ip->i_size >= dirblksiz);
151 nslots = ip->i_size / LFS_DIRECTSIZ(fs, 1);
152 nslots = (nslots * 3 + 1) / 2;
153 narrays = howmany(nslots, DH_NBLKOFF);
154 nslots = narrays * DH_NBLKOFF;
155 dirblocks = howmany(ip->i_size, dirblksiz);
156 nblocks = (dirblocks * 3 + 1) / 2;
157
158 memreqd = sizeof(*dh) + narrays * sizeof(*dh->dh_hash) +
159 narrays * DH_NBLKOFF * sizeof(**dh->dh_hash) +
160 nblocks * sizeof(*dh->dh_blkfree);
161
162 while (atomic_add_int_nv(&ulfs_dirhashmem, memreqd) >
163 ulfs_dirhashmaxmem) {
164 atomic_add_int(&ulfs_dirhashmem, -memreqd);
165 if (memreqd > ulfs_dirhashmaxmem / 2)
166 return (-1);
167 /* Try to free some space. */
168 if (ulfsdirhash_recycle(memreqd) != 0)
169 return (-1);
170 else
171 DIRHASHLIST_UNLOCK();
172 }
173
174 /*
175 * Use non-blocking mallocs so that we will revert to a linear
176 * lookup on failure rather than potentially blocking forever.
177 */
178 dh = pool_cache_get(ulfsdirhash_cache, PR_NOWAIT);
179 if (dh == NULL) {
180 atomic_add_int(&ulfs_dirhashmem, -memreqd);
181 return (-1);
182 }
183 memset(dh, 0, sizeof(*dh));
184 mutex_init(&dh->dh_lock, MUTEX_DEFAULT, IPL_NONE);
185 DIRHASH_LOCK(dh);
186 dh->dh_hashsz = narrays * sizeof(dh->dh_hash[0]);
187 dh->dh_hash = kmem_zalloc(dh->dh_hashsz, KM_NOSLEEP);
188 dh->dh_blkfreesz = nblocks * sizeof(dh->dh_blkfree[0]);
189 dh->dh_blkfree = kmem_zalloc(dh->dh_blkfreesz, KM_NOSLEEP);
190 if (dh->dh_hash == NULL || dh->dh_blkfree == NULL)
191 goto fail;
192 for (i = 0; i < narrays; i++) {
193 if ((dh->dh_hash[i] = DIRHASH_BLKALLOC()) == NULL)
194 goto fail;
195 for (j = 0; j < DH_NBLKOFF; j++)
196 dh->dh_hash[i][j] = DIRHASH_EMPTY;
197 }
198
199 /* Initialise the hash table and block statistics. */
200 dh->dh_narrays = narrays;
201 dh->dh_hlen = nslots;
202 dh->dh_nblk = nblocks;
203 dh->dh_dirblks = dirblocks;
204 for (i = 0; i < dirblocks; i++)
205 dh->dh_blkfree[i] = dirblksiz / DIRALIGN;
206 for (i = 0; i < DH_NFSTATS; i++)
207 dh->dh_firstfree[i] = -1;
208 dh->dh_firstfree[DH_NFSTATS] = 0;
209 dh->dh_seqopt = 0;
210 dh->dh_seqoff = 0;
211 dh->dh_score = DH_SCOREINIT;
212 ip->i_dirhash = dh;
213
214 bmask = VFSTOULFS(vp->v_mount)->um_mountp->mnt_stat.f_iosize - 1;
215 pos = 0;
216 while (pos < ip->i_size) {
217 preempt_point();
218
219 /* If necessary, get the next directory block. */
220 if ((pos & bmask) == 0) {
221 if (bp != NULL)
222 brelse(bp, 0);
223 if (ulfs_blkatoff(vp, (off_t)pos, NULL, &bp, false) != 0)
224 goto fail;
225 }
226
227 /* Add this entry to the hash. */
228 ep = (LFS_DIRHEADER *)((char *)bp->b_data + (pos & bmask));
229 if (lfs_dir_getreclen(fs, ep) == 0 || lfs_dir_getreclen(fs, ep) >
230 dirblksiz - (pos & (dirblksiz - 1))) {
231 /* Corrupted directory. */
232 brelse(bp, 0);
233 goto fail;
234 }
235 if (lfs_dir_getino(fs, ep) != 0) {
236 /* Add the entry (simplified ulfsdirhash_add). */
237 slot = ulfsdirhash_hash(dh, lfs_dir_nameptr(fs, ep),
238 lfs_dir_getnamlen(fs, ep));
239 while (DH_ENTRY(dh, slot) != DIRHASH_EMPTY)
240 slot = WRAPINCR(slot, dh->dh_hlen);
241 dh->dh_hused++;
242 DH_ENTRY(dh, slot) = pos;
243 ulfsdirhash_adjfree(dh, pos, -LFS_DIRSIZ(fs, ep),
244 dirblksiz);
245 }
246 pos += lfs_dir_getreclen(fs, ep);
247 }
248
249 if (bp != NULL)
250 brelse(bp, 0);
251 DIRHASHLIST_LOCK();
252 TAILQ_INSERT_TAIL(&ulfsdirhash_list, dh, dh_list);
253 dh->dh_onlist = 1;
254 DIRHASH_UNLOCK(dh);
255 DIRHASHLIST_UNLOCK();
256 return (0);
257
258 fail:
259 ip->i_dirhash = NULL;
260 DIRHASH_UNLOCK(dh);
261 if (dh->dh_hash != NULL) {
262 for (i = 0; i < narrays; i++)
263 if (dh->dh_hash[i] != NULL)
264 DIRHASH_BLKFREE(dh->dh_hash[i]);
265 kmem_free(dh->dh_hash, dh->dh_hashsz);
266 }
267 if (dh->dh_blkfree != NULL)
268 kmem_free(dh->dh_blkfree, dh->dh_blkfreesz);
269 mutex_destroy(&dh->dh_lock);
270 pool_cache_put(ulfsdirhash_cache, dh);
271 atomic_add_int(&ulfs_dirhashmem, -memreqd);
272 return (-1);
273 }
274
275 /*
276 * Free any hash table associated with inode 'ip'.
277 */
278 void
279 ulfsdirhash_free(struct inode *ip)
280 {
281 struct dirhash *dh;
282 int i, mem;
283
284 if ((dh = ip->i_dirhash) == NULL)
285 return;
286
287 ip->i_dirhash = NULL;
288
289 if (dh->dh_onlist) {
290 DIRHASHLIST_LOCK();
291 if (dh->dh_onlist)
292 TAILQ_REMOVE(&ulfsdirhash_list, dh, dh_list);
293 DIRHASHLIST_UNLOCK();
294 }
295
296 /* The dirhash pointed to by 'dh' is exclusively ours now. */
297 mem = sizeof(*dh);
298 if (dh->dh_hash != NULL) {
299 for (i = 0; i < dh->dh_narrays; i++)
300 DIRHASH_BLKFREE(dh->dh_hash[i]);
301 kmem_free(dh->dh_hash, dh->dh_hashsz);
302 kmem_free(dh->dh_blkfree, dh->dh_blkfreesz);
303 mem += dh->dh_hashsz;
304 mem += dh->dh_narrays * DH_NBLKOFF * sizeof(**dh->dh_hash);
305 mem += dh->dh_nblk * sizeof(*dh->dh_blkfree);
306 }
307 mutex_destroy(&dh->dh_lock);
308 pool_cache_put(ulfsdirhash_cache, dh);
309
310 atomic_add_int(&ulfs_dirhashmem, -mem);
311 }
312
313 /*
314 * Find the offset of the specified name within the given inode.
315 * Returns 0 on success, ENOENT if the entry does not exist, or
316 * EJUSTRETURN if the caller should revert to a linear search.
317 *
318 * If successful, the directory offset is stored in *offp, and a
319 * pointer to a struct buf containing the entry is stored in *bpp. If
320 * prevoffp is non-NULL, the offset of the previous entry within
321 * the DIRBLKSIZ-sized block is stored in *prevoffp (if the entry
322 * is the first in a block, the start of the block is used).
323 */
324 int
325 ulfsdirhash_lookup(struct inode *ip, const char *name, int namelen, doff_t *offp,
326 struct buf **bpp, doff_t *prevoffp)
327 {
328 struct lfs *fs = ip->i_lfs;
329 struct dirhash *dh, *dh_next;
330 LFS_DIRHEADER *dp;
331 struct vnode *vp;
332 struct buf *bp;
333 doff_t blkoff, bmask, offset, prevoff;
334 int i, slot;
335 int dirblksiz = ip->i_lfs->um_dirblksiz;
336
337 if ((dh = ip->i_dirhash) == NULL)
338 return (EJUSTRETURN);
339
340 /*
341 * Move this dirhash towards the end of the list if it has a
342 * score higher than the next entry, and acquire the dh_lock.
343 * Optimise the case where it's already the last by performing
344 * an unlocked read of the TAILQ_NEXT pointer.
345 *
346 * In both cases, end up holding just dh_lock.
347 */
348 if (TAILQ_NEXT(dh, dh_list) != NULL) {
349 DIRHASHLIST_LOCK();
350 DIRHASH_LOCK(dh);
351 /*
352 * If the new score will be greater than that of the next
353 * entry, then move this entry past it. With both mutexes
354 * held, dh_next won't go away, but its dh_score could
355 * change; that's not important since it is just a hint.
356 */
357 if (dh->dh_hash != NULL &&
358 (dh_next = TAILQ_NEXT(dh, dh_list)) != NULL &&
359 dh->dh_score >= dh_next->dh_score) {
360 KASSERT(dh->dh_onlist);
361 TAILQ_REMOVE(&ulfsdirhash_list, dh, dh_list);
362 TAILQ_INSERT_AFTER(&ulfsdirhash_list, dh_next, dh,
363 dh_list);
364 }
365 DIRHASHLIST_UNLOCK();
366 } else {
367 /* Already the last, though that could change as we wait. */
368 DIRHASH_LOCK(dh);
369 }
370 if (dh->dh_hash == NULL) {
371 DIRHASH_UNLOCK(dh);
372 ulfsdirhash_free(ip);
373 return (EJUSTRETURN);
374 }
375
376 /* Update the score. */
377 if (dh->dh_score < DH_SCOREMAX)
378 dh->dh_score++;
379
380 vp = ip->i_vnode;
381 bmask = VFSTOULFS(vp->v_mount)->um_mountp->mnt_stat.f_iosize - 1;
382 blkoff = -1;
383 bp = NULL;
384 restart:
385 slot = ulfsdirhash_hash(dh, name, namelen);
386
387 if (dh->dh_seqopt) {
388 /*
389 * Sequential access optimisation. dh_seqoff contains the
390 * offset of the directory entry immediately following
391 * the last entry that was looked up. Check if this offset
392 * appears in the hash chain for the name we are looking for.
393 */
394 for (i = slot; (offset = DH_ENTRY(dh, i)) != DIRHASH_EMPTY;
395 i = WRAPINCR(i, dh->dh_hlen))
396 if (offset == dh->dh_seqoff)
397 break;
398 if (offset == dh->dh_seqoff) {
399 /*
400 * We found an entry with the expected offset. This
401 * is probably the entry we want, but if not, the
402 * code below will turn off seqoff and retry.
403 */
404 slot = i;
405 } else
406 dh->dh_seqopt = 0;
407 }
408
409 for (; (offset = DH_ENTRY(dh, slot)) != DIRHASH_EMPTY;
410 slot = WRAPINCR(slot, dh->dh_hlen)) {
411 if (offset == DIRHASH_DEL)
412 continue;
413
414 if (offset < 0 || offset >= ip->i_size)
415 panic("ulfsdirhash_lookup: bad offset in hash array");
416 if ((offset & ~bmask) != blkoff) {
417 if (bp != NULL)
418 brelse(bp, 0);
419 blkoff = offset & ~bmask;
420 if (ulfs_blkatoff(vp, (off_t)blkoff,
421 NULL, &bp, false) != 0) {
422 DIRHASH_UNLOCK(dh);
423 return (EJUSTRETURN);
424 }
425 }
426 dp = (LFS_DIRHEADER *)((char *)bp->b_data + (offset & bmask));
427 if (lfs_dir_getreclen(fs, dp) == 0 || lfs_dir_getreclen(fs, dp) >
428 dirblksiz - (offset & (dirblksiz - 1))) {
429 /* Corrupted directory. */
430 DIRHASH_UNLOCK(dh);
431 brelse(bp, 0);
432 return (EJUSTRETURN);
433 }
434 if (lfs_dir_getnamlen(fs, dp) == namelen &&
435 memcmp(lfs_dir_nameptr(fs, dp), name, namelen) == 0) {
436 /* Found. Get the prev offset if needed. */
437 if (prevoffp != NULL) {
438 if (offset & (dirblksiz - 1)) {
439 prevoff = ulfsdirhash_getprev(fs, dp,
440 offset, dirblksiz);
441 if (prevoff == -1) {
442 brelse(bp, 0);
443 return (EJUSTRETURN);
444 }
445 } else
446 prevoff = offset;
447 *prevoffp = prevoff;
448 }
449
450 /* Check for sequential access, and update offset. */
451 if (dh->dh_seqopt == 0 && dh->dh_seqoff == offset)
452 dh->dh_seqopt = 1;
453 dh->dh_seqoff = offset + LFS_DIRSIZ(fs, dp);
454 DIRHASH_UNLOCK(dh);
455
456 *bpp = bp;
457 *offp = offset;
458 return (0);
459 }
460
461 if (dh->dh_hash == NULL) {
462 DIRHASH_UNLOCK(dh);
463 if (bp != NULL)
464 brelse(bp, 0);
465 ulfsdirhash_free(ip);
466 return (EJUSTRETURN);
467 }
468 /*
469 * When the name doesn't match in the seqopt case, go back
470 * and search normally.
471 */
472 if (dh->dh_seqopt) {
473 dh->dh_seqopt = 0;
474 goto restart;
475 }
476 }
477 DIRHASH_UNLOCK(dh);
478 if (bp != NULL)
479 brelse(bp, 0);
480 return (ENOENT);
481 }
482
483 /*
484 * Find a directory block with room for 'slotneeded' bytes. Returns
485 * the offset of the directory entry that begins the free space.
486 * This will either be the offset of an existing entry that has free
487 * space at the end, or the offset of an entry with d_ino == 0 at
488 * the start of a DIRBLKSIZ block.
489 *
490 * To use the space, the caller may need to compact existing entries in
491 * the directory. The total number of bytes in all of the entries involved
492 * in the compaction is stored in *slotsize. In other words, all of
493 * the entries that must be compacted are exactly contained in the
494 * region beginning at the returned offset and spanning *slotsize bytes.
495 *
496 * Returns -1 if no space was found, indicating that the directory
497 * must be extended.
498 */
499 doff_t
500 ulfsdirhash_findfree(struct inode *ip, int slotneeded, int *slotsize)
501 {
502 struct lfs *fs = ip->i_lfs;
503 LFS_DIRHEADER *dp;
504 struct dirhash *dh;
505 struct buf *bp;
506 doff_t pos, slotstart;
507 int dirblock, error, freebytes, i;
508 int dirblksiz = ip->i_lfs->um_dirblksiz;
509
510 if ((dh = ip->i_dirhash) == NULL)
511 return (-1);
512
513 DIRHASH_LOCK(dh);
514 if (dh->dh_hash == NULL) {
515 DIRHASH_UNLOCK(dh);
516 ulfsdirhash_free(ip);
517 return (-1);
518 }
519
520 /* Find a directory block with the desired free space. */
521 dirblock = -1;
522 for (i = howmany(slotneeded, DIRALIGN); i <= DH_NFSTATS; i++)
523 if ((dirblock = dh->dh_firstfree[i]) != -1)
524 break;
525 if (dirblock == -1) {
526 DIRHASH_UNLOCK(dh);
527 return (-1);
528 }
529
530 KASSERT(dirblock < dh->dh_nblk &&
531 dh->dh_blkfree[dirblock] >= howmany(slotneeded, DIRALIGN));
532 pos = dirblock * dirblksiz;
533 error = ulfs_blkatoff(ip->i_vnode, (off_t)pos, (void *)&dp, &bp, false);
534 if (error) {
535 DIRHASH_UNLOCK(dh);
536 return (-1);
537 }
538 /* Find the first entry with free space. */
539 for (i = 0; i < dirblksiz; ) {
540 if (lfs_dir_getreclen(fs, dp) == 0) {
541 DIRHASH_UNLOCK(dh);
542 brelse(bp, 0);
543 return (-1);
544 }
545 if (lfs_dir_getino(fs, dp) == 0 || lfs_dir_getreclen(fs, dp) > LFS_DIRSIZ(fs, dp))
546 break;
547 i += lfs_dir_getreclen(fs, dp);
548 dp = LFS_NEXTDIR(fs, dp);
549 }
550 if (i > dirblksiz) {
551 DIRHASH_UNLOCK(dh);
552 brelse(bp, 0);
553 return (-1);
554 }
555 slotstart = pos + i;
556
557 /* Find the range of entries needed to get enough space */
558 freebytes = 0;
559 while (i < dirblksiz && freebytes < slotneeded) {
560 freebytes += lfs_dir_getreclen(fs, dp);
561 if (lfs_dir_getino(fs, dp) != 0)
562 freebytes -= LFS_DIRSIZ(fs, dp);
563 if (lfs_dir_getreclen(fs, dp) == 0) {
564 DIRHASH_UNLOCK(dh);
565 brelse(bp, 0);
566 return (-1);
567 }
568 i += lfs_dir_getreclen(fs, dp);
569 dp = LFS_NEXTDIR(fs, dp);
570 }
571 if (i > dirblksiz) {
572 DIRHASH_UNLOCK(dh);
573 brelse(bp, 0);
574 return (-1);
575 }
576 if (freebytes < slotneeded)
577 panic("ulfsdirhash_findfree: free mismatch");
578 DIRHASH_UNLOCK(dh);
579 brelse(bp, 0);
580 *slotsize = pos + i - slotstart;
581 return (slotstart);
582 }
583
584 /*
585 * Return the start of the unused space at the end of a directory, or
586 * -1 if there are no trailing unused blocks.
587 */
588 doff_t
589 ulfsdirhash_enduseful(struct inode *ip)
590 {
591 struct dirhash *dh;
592 int i;
593 int dirblksiz = ip->i_lfs->um_dirblksiz;
594
595 if ((dh = ip->i_dirhash) == NULL)
596 return (-1);
597
598 DIRHASH_LOCK(dh);
599 if (dh->dh_hash == NULL) {
600 DIRHASH_UNLOCK(dh);
601 ulfsdirhash_free(ip);
602 return (-1);
603 }
604
605 if (dh->dh_blkfree[dh->dh_dirblks - 1] != dirblksiz / DIRALIGN) {
606 DIRHASH_UNLOCK(dh);
607 return (-1);
608 }
609
610 for (i = dh->dh_dirblks - 1; i >= 0; i--)
611 if (dh->dh_blkfree[i] != dirblksiz / DIRALIGN)
612 break;
613 DIRHASH_UNLOCK(dh);
614 return ((doff_t)(i + 1) * dirblksiz);
615 }
616
617 /*
618 * Insert information into the hash about a new directory entry. dirp
619 * points to a struct lfs_direct containing the entry, and offset specifies
620 * the offset of this entry.
621 */
622 void
623 ulfsdirhash_add(struct inode *ip, LFS_DIRHEADER *dirp, doff_t offset)
624 {
625 struct lfs *fs = ip->i_lfs;
626 struct dirhash *dh;
627 int slot;
628 int dirblksiz = ip->i_lfs->um_dirblksiz;
629
630 if ((dh = ip->i_dirhash) == NULL)
631 return;
632
633 DIRHASH_LOCK(dh);
634 if (dh->dh_hash == NULL) {
635 DIRHASH_UNLOCK(dh);
636 ulfsdirhash_free(ip);
637 return;
638 }
639
640 KASSERT(offset < dh->dh_dirblks * dirblksiz);
641 /*
642 * Normal hash usage is < 66%. If the usage gets too high then
643 * remove the hash entirely and let it be rebuilt later.
644 */
645 if (dh->dh_hused >= (dh->dh_hlen * 3) / 4) {
646 DIRHASH_UNLOCK(dh);
647 ulfsdirhash_free(ip);
648 return;
649 }
650
651 /* Find a free hash slot (empty or deleted), and add the entry. */
652 slot = ulfsdirhash_hash(dh, lfs_dir_nameptr(fs, dirp),
653 lfs_dir_getnamlen(fs, dirp));
654 while (DH_ENTRY(dh, slot) >= 0)
655 slot = WRAPINCR(slot, dh->dh_hlen);
656 if (DH_ENTRY(dh, slot) == DIRHASH_EMPTY)
657 dh->dh_hused++;
658 DH_ENTRY(dh, slot) = offset;
659
660 /* Update the per-block summary info. */
661 ulfsdirhash_adjfree(dh, offset, -LFS_DIRSIZ(fs, dirp), dirblksiz);
662 DIRHASH_UNLOCK(dh);
663 }
664
665 /*
666 * Remove the specified directory entry from the hash. The entry to remove
667 * is defined by the name in `dirp', which must exist at the specified
668 * `offset' within the directory.
669 */
670 void
671 ulfsdirhash_remove(struct inode *ip, LFS_DIRHEADER *dirp, doff_t offset)
672 {
673 struct lfs *fs = ip->i_lfs;
674 struct dirhash *dh;
675 int slot;
676 int dirblksiz = ip->i_lfs->um_dirblksiz;
677
678 if ((dh = ip->i_dirhash) == NULL)
679 return;
680
681 DIRHASH_LOCK(dh);
682 if (dh->dh_hash == NULL) {
683 DIRHASH_UNLOCK(dh);
684 ulfsdirhash_free(ip);
685 return;
686 }
687
688 KASSERT(offset < dh->dh_dirblks * dirblksiz);
689 /* Find the entry */
690 slot = ulfsdirhash_findslot(dh, lfs_dir_nameptr(fs, dirp),
691 lfs_dir_getnamlen(fs, dirp), offset);
692
693 /* Remove the hash entry. */
694 ulfsdirhash_delslot(dh, slot);
695
696 /* Update the per-block summary info. */
697 ulfsdirhash_adjfree(dh, offset, LFS_DIRSIZ(fs, dirp), dirblksiz);
698 DIRHASH_UNLOCK(dh);
699 }
700
701 /*
702 * Change the offset associated with a directory entry in the hash. Used
703 * when compacting directory blocks.
704 */
705 void
706 ulfsdirhash_move(struct inode *ip, LFS_DIRHEADER *dirp, doff_t oldoff,
707 doff_t newoff)
708 {
709 struct lfs *fs = ip->i_lfs;
710 struct dirhash *dh;
711 int slot;
712
713 if ((dh = ip->i_dirhash) == NULL)
714 return;
715 DIRHASH_LOCK(dh);
716 if (dh->dh_hash == NULL) {
717 DIRHASH_UNLOCK(dh);
718 ulfsdirhash_free(ip);
719 return;
720 }
721
722 KASSERT(oldoff < dh->dh_dirblks * ip->i_lfs->um_dirblksiz &&
723 newoff < dh->dh_dirblks * ip->i_lfs->um_dirblksiz);
724 /* Find the entry, and update the offset. */
725 slot = ulfsdirhash_findslot(dh, lfs_dir_nameptr(fs, dirp),
726 lfs_dir_getnamlen(fs, dirp), oldoff);
727 DH_ENTRY(dh, slot) = newoff;
728 DIRHASH_UNLOCK(dh);
729 }
730
731 /*
732 * Inform dirhash that the directory has grown by one block that
733 * begins at offset (i.e. the new length is offset + DIRBLKSIZ).
734 */
735 void
736 ulfsdirhash_newblk(struct inode *ip, doff_t offset)
737 {
738 struct dirhash *dh;
739 int block;
740 int dirblksiz = ip->i_lfs->um_dirblksiz;
741
742 if ((dh = ip->i_dirhash) == NULL)
743 return;
744 DIRHASH_LOCK(dh);
745 if (dh->dh_hash == NULL) {
746 DIRHASH_UNLOCK(dh);
747 ulfsdirhash_free(ip);
748 return;
749 }
750
751 KASSERT(offset == dh->dh_dirblks * dirblksiz);
752 block = offset / dirblksiz;
753 if (block >= dh->dh_nblk) {
754 /* Out of space; must rebuild. */
755 DIRHASH_UNLOCK(dh);
756 ulfsdirhash_free(ip);
757 return;
758 }
759 dh->dh_dirblks = block + 1;
760
761 /* Account for the new free block. */
762 dh->dh_blkfree[block] = dirblksiz / DIRALIGN;
763 if (dh->dh_firstfree[DH_NFSTATS] == -1)
764 dh->dh_firstfree[DH_NFSTATS] = block;
765 DIRHASH_UNLOCK(dh);
766 }
767
768 /*
769 * Inform dirhash that the directory is being truncated.
770 */
771 void
772 ulfsdirhash_dirtrunc(struct inode *ip, doff_t offset)
773 {
774 struct dirhash *dh;
775 int block, i;
776 int dirblksiz = ip->i_lfs->um_dirblksiz;
777
778 if ((dh = ip->i_dirhash) == NULL)
779 return;
780
781 DIRHASH_LOCK(dh);
782 if (dh->dh_hash == NULL) {
783 DIRHASH_UNLOCK(dh);
784 ulfsdirhash_free(ip);
785 return;
786 }
787
788 KASSERT(offset <= dh->dh_dirblks * dirblksiz);
789 block = howmany(offset, dirblksiz);
790 /*
791 * If the directory shrinks to less than 1/8 of dh_nblk blocks
792 * (about 20% of its original size due to the 50% extra added in
793 * ulfsdirhash_build) then free it, and let the caller rebuild
794 * if necessary.
795 */
796 if (block < dh->dh_nblk / 8 && dh->dh_narrays > 1) {
797 DIRHASH_UNLOCK(dh);
798 ulfsdirhash_free(ip);
799 return;
800 }
801
802 /*
803 * Remove any `first free' information pertaining to the
804 * truncated blocks. All blocks we're removing should be
805 * completely unused.
806 */
807 if (dh->dh_firstfree[DH_NFSTATS] >= block)
808 dh->dh_firstfree[DH_NFSTATS] = -1;
809 for (i = block; i < dh->dh_dirblks; i++)
810 if (dh->dh_blkfree[i] != dirblksiz / DIRALIGN)
811 panic("ulfsdirhash_dirtrunc: blocks in use");
812 for (i = 0; i < DH_NFSTATS; i++)
813 if (dh->dh_firstfree[i] >= block)
814 panic("ulfsdirhash_dirtrunc: first free corrupt");
815 dh->dh_dirblks = block;
816 DIRHASH_UNLOCK(dh);
817 }
818
819 /*
820 * Debugging function to check that the dirhash information about
821 * a directory block matches its actual contents. Panics if a mismatch
822 * is detected.
823 *
824 * On entry, `sbuf' should point to the start of an in-core
825 * DIRBLKSIZ-sized directory block, and `offset' should contain the
826 * offset from the start of the directory of that block.
827 */
828 void
829 ulfsdirhash_checkblock(struct inode *ip, char *sbuf, doff_t offset)
830 {
831 struct lfs *fs = ip->i_lfs;
832 struct dirhash *dh;
833 LFS_DIRHEADER *dp;
834 int block, ffslot, i, nfree;
835 int dirblksiz = ip->i_lfs->um_dirblksiz;
836
837 if (!ulfs_dirhashcheck)
838 return;
839 if ((dh = ip->i_dirhash) == NULL)
840 return;
841
842 DIRHASH_LOCK(dh);
843 if (dh->dh_hash == NULL) {
844 DIRHASH_UNLOCK(dh);
845 ulfsdirhash_free(ip);
846 return;
847 }
848
849 block = offset / dirblksiz;
850 if ((offset & (dirblksiz - 1)) != 0 || block >= dh->dh_dirblks)
851 panic("ulfsdirhash_checkblock: bad offset");
852
853 nfree = 0;
854 for (i = 0; i < dirblksiz; i += lfs_dir_getreclen(fs, dp)) {
855 dp = (LFS_DIRHEADER *)(sbuf + i);
856 if (lfs_dir_getreclen(fs, dp) == 0 || i + lfs_dir_getreclen(fs, dp) > dirblksiz)
857 panic("ulfsdirhash_checkblock: bad dir");
858
859 if (lfs_dir_getino(fs, dp) == 0) {
860 #if 0
861 /*
862 * XXX entries with d_ino == 0 should only occur
863 * at the start of a DIRBLKSIZ block. However the
864 * ulfs code is tolerant of such entries at other
865 * offsets, and fsck does not fix them.
866 */
867 if (i != 0)
868 panic("ulfsdirhash_checkblock: bad dir inode");
869 #endif
870 nfree += lfs_dir_getreclen(fs, dp);
871 continue;
872 }
873
874 /* Check that the entry exists (will panic if it doesn't). */
875 ulfsdirhash_findslot(dh, lfs_dir_nameptr(fs, dp),
876 lfs_dir_getnamlen(fs, dp),
877 offset + i);
878
879 nfree += lfs_dir_getreclen(fs, dp) - LFS_DIRSIZ(fs, dp);
880 }
881 if (i != dirblksiz)
882 panic("ulfsdirhash_checkblock: bad dir end");
883
884 if (dh->dh_blkfree[block] * DIRALIGN != nfree)
885 panic("ulfsdirhash_checkblock: bad free count");
886
887 ffslot = BLKFREE2IDX(nfree / DIRALIGN);
888 for (i = 0; i <= DH_NFSTATS; i++)
889 if (dh->dh_firstfree[i] == block && i != ffslot)
890 panic("ulfsdirhash_checkblock: bad first-free");
891 if (dh->dh_firstfree[ffslot] == -1)
892 panic("ulfsdirhash_checkblock: missing first-free entry");
893 DIRHASH_UNLOCK(dh);
894 }
895
896 /*
897 * Hash the specified filename into a dirhash slot.
898 */
899 static int
900 ulfsdirhash_hash(struct dirhash *dh, const char *name, int namelen)
901 {
902 u_int32_t hash;
903
904 /*
905 * We hash the name and then some other bit of data that is
906 * invariant over the dirhash's lifetime. Otherwise names
907 * differing only in the last byte are placed close to one
908 * another in the table, which is bad for linear probing.
909 */
910 hash = hash32_buf(name, namelen, HASH32_BUF_INIT);
911 hash = hash32_buf(&dh, sizeof(dh), hash);
912 return (hash % dh->dh_hlen);
913 }
914
915 /*
916 * Adjust the number of free bytes in the block containing `offset'
917 * by the value specified by `diff'.
918 *
919 * The caller must ensure we have exclusive access to `dh'; normally
920 * that means that dh_lock should be held, but this is also called
921 * from ulfsdirhash_build() where exclusive access can be assumed.
922 */
923 static void
924 ulfsdirhash_adjfree(struct dirhash *dh, doff_t offset, int diff, int dirblksiz)
925 {
926 int block, i, nfidx, ofidx;
927
928 KASSERT(mutex_owned(&dh->dh_lock));
929
930 /* Update the per-block summary info. */
931 block = offset / dirblksiz;
932 KASSERT(block < dh->dh_nblk && block < dh->dh_dirblks);
933 ofidx = BLKFREE2IDX(dh->dh_blkfree[block]);
934 dh->dh_blkfree[block] = (int)dh->dh_blkfree[block] + (diff / DIRALIGN);
935 nfidx = BLKFREE2IDX(dh->dh_blkfree[block]);
936
937 /* Update the `first free' list if necessary. */
938 if (ofidx != nfidx) {
939 /* If removing, scan forward for the next block. */
940 if (dh->dh_firstfree[ofidx] == block) {
941 for (i = block + 1; i < dh->dh_dirblks; i++)
942 if (BLKFREE2IDX(dh->dh_blkfree[i]) == ofidx)
943 break;
944 dh->dh_firstfree[ofidx] = (i < dh->dh_dirblks) ? i : -1;
945 }
946
947 /* Make this the new `first free' if necessary */
948 if (dh->dh_firstfree[nfidx] > block ||
949 dh->dh_firstfree[nfidx] == -1)
950 dh->dh_firstfree[nfidx] = block;
951 }
952 }
953
954 /*
955 * Find the specified name which should have the specified offset.
956 * Returns a slot number, and panics on failure.
957 *
958 * `dh' must be locked on entry and remains so on return.
959 */
960 static int
961 ulfsdirhash_findslot(struct dirhash *dh, const char *name, int namelen,
962 doff_t offset)
963 {
964 int slot;
965
966 KASSERT(mutex_owned(&dh->dh_lock));
967
968 /* Find the entry. */
969 KASSERT(dh->dh_hused < dh->dh_hlen);
970 slot = ulfsdirhash_hash(dh, name, namelen);
971 while (DH_ENTRY(dh, slot) != offset &&
972 DH_ENTRY(dh, slot) != DIRHASH_EMPTY)
973 slot = WRAPINCR(slot, dh->dh_hlen);
974 if (DH_ENTRY(dh, slot) != offset)
975 panic("ulfsdirhash_findslot: '%.*s' not found", namelen, name);
976
977 return (slot);
978 }
979
980 /*
981 * Remove the entry corresponding to the specified slot from the hash array.
982 *
983 * `dh' must be locked on entry and remains so on return.
984 */
985 static void
986 ulfsdirhash_delslot(struct dirhash *dh, int slot)
987 {
988 int i;
989
990 KASSERT(mutex_owned(&dh->dh_lock));
991
992 /* Mark the entry as deleted. */
993 DH_ENTRY(dh, slot) = DIRHASH_DEL;
994
995 /* If this is the end of a chain of DIRHASH_DEL slots, remove them. */
996 for (i = slot; DH_ENTRY(dh, i) == DIRHASH_DEL; )
997 i = WRAPINCR(i, dh->dh_hlen);
998 if (DH_ENTRY(dh, i) == DIRHASH_EMPTY) {
999 i = WRAPDECR(i, dh->dh_hlen);
1000 while (DH_ENTRY(dh, i) == DIRHASH_DEL) {
1001 DH_ENTRY(dh, i) = DIRHASH_EMPTY;
1002 dh->dh_hused--;
1003 i = WRAPDECR(i, dh->dh_hlen);
1004 }
1005 KASSERT(dh->dh_hused >= 0);
1006 }
1007 }
1008
1009 /*
1010 * Given a directory entry and its offset, find the offset of the
1011 * previous entry in the same DIRBLKSIZ-sized block. Returns an
1012 * offset, or -1 if there is no previous entry in the block or some
1013 * other problem occurred.
1014 */
1015 static doff_t
1016 ulfsdirhash_getprev(struct lfs *fs, LFS_DIRHEADER *dirp,
1017 doff_t offset, int dirblksiz)
1018 {
1019 LFS_DIRHEADER *dp;
1020 char *blkbuf;
1021 doff_t blkoff, prevoff;
1022 int entrypos, i;
1023 unsigned reclen;
1024
1025 blkoff = offset & ~(dirblksiz - 1); /* offset of start of block */
1026 entrypos = offset & (dirblksiz - 1); /* entry relative to block */
1027 blkbuf = (char *)dirp - entrypos;
1028 prevoff = blkoff;
1029
1030 /* If `offset' is the start of a block, there is no previous entry. */
1031 if (entrypos == 0)
1032 return (-1);
1033
1034 /* Scan from the start of the block until we get to the entry. */
1035 for (i = 0; i < entrypos; i += reclen) {
1036 dp = (LFS_DIRHEADER *)(blkbuf + i);
1037 reclen = lfs_dir_getreclen(fs, dp);
1038 if (reclen == 0 || i + reclen > entrypos)
1039 return (-1); /* Corrupted directory. */
1040 prevoff = blkoff + i;
1041 }
1042 return (prevoff);
1043 }
1044
1045 /*
1046 * Try to free up `wanted' bytes by stealing memory from existing
1047 * dirhashes. Returns zero with list locked if successful.
1048 */
1049 static int
1050 ulfsdirhash_recycle(int wanted)
1051 {
1052 struct dirhash *dh;
1053 doff_t **hash;
1054 u_int8_t *blkfree;
1055 int i, mem, narrays;
1056 size_t hashsz, blkfreesz;
1057
1058 DIRHASHLIST_LOCK();
1059 while (wanted + ulfs_dirhashmem > ulfs_dirhashmaxmem) {
1060 /* Find a dirhash, and lock it. */
1061 if ((dh = TAILQ_FIRST(&ulfsdirhash_list)) == NULL) {
1062 DIRHASHLIST_UNLOCK();
1063 return (-1);
1064 }
1065 DIRHASH_LOCK(dh);
1066 KASSERT(dh->dh_hash != NULL);
1067
1068 /* Decrement the score; only recycle if it becomes zero. */
1069 if (--dh->dh_score > 0) {
1070 DIRHASH_UNLOCK(dh);
1071 DIRHASHLIST_UNLOCK();
1072 return (-1);
1073 }
1074
1075 /* Remove it from the list and detach its memory. */
1076 TAILQ_REMOVE(&ulfsdirhash_list, dh, dh_list);
1077 dh->dh_onlist = 0;
1078 hash = dh->dh_hash;
1079 hashsz = dh->dh_hashsz;
1080 dh->dh_hash = NULL;
1081 blkfree = dh->dh_blkfree;
1082 blkfreesz = dh->dh_blkfreesz;
1083 dh->dh_blkfree = NULL;
1084 narrays = dh->dh_narrays;
1085 mem = narrays * sizeof(*dh->dh_hash) +
1086 narrays * DH_NBLKOFF * sizeof(**dh->dh_hash) +
1087 dh->dh_nblk * sizeof(*dh->dh_blkfree);
1088
1089 /* Unlock everything, free the detached memory. */
1090 DIRHASH_UNLOCK(dh);
1091 DIRHASHLIST_UNLOCK();
1092
1093 for (i = 0; i < narrays; i++)
1094 DIRHASH_BLKFREE(hash[i]);
1095 kmem_free(hash, hashsz);
1096 kmem_free(blkfree, blkfreesz);
1097
1098 /* Account for the returned memory, and repeat if necessary. */
1099 DIRHASHLIST_LOCK();
1100 atomic_add_int(&ulfs_dirhashmem, -mem);
1101 }
1102 /* Success. */
1103 return (0);
1104 }
1105
1106 static void
1107 ulfsdirhash_sysctl_init(void)
1108 {
1109 const struct sysctlnode *rnode, *cnode;
1110
1111 sysctl_createv(&ulfsdirhash_sysctl_log, 0, NULL, &rnode,
1112 CTLFLAG_PERMANENT,
1113 CTLTYPE_NODE, "ulfs",
1114 SYSCTL_DESCR("ulfs"),
1115 NULL, 0, NULL, 0,
1116 CTL_VFS, CTL_CREATE, CTL_EOL);
1117
1118 sysctl_createv(&ulfsdirhash_sysctl_log, 0, &rnode, &rnode,
1119 CTLFLAG_PERMANENT,
1120 CTLTYPE_NODE, "dirhash",
1121 SYSCTL_DESCR("dirhash"),
1122 NULL, 0, NULL, 0,
1123 CTL_CREATE, CTL_EOL);
1124
1125 sysctl_createv(&ulfsdirhash_sysctl_log, 0, &rnode, &cnode,
1126 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1127 CTLTYPE_INT, "minblocks",
1128 SYSCTL_DESCR("minimum hashed directory size in blocks"),
1129 NULL, 0, &ulfs_dirhashminblks, 0,
1130 CTL_CREATE, CTL_EOL);
1131
1132 sysctl_createv(&ulfsdirhash_sysctl_log, 0, &rnode, &cnode,
1133 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1134 CTLTYPE_INT, "maxmem",
1135 SYSCTL_DESCR("maximum dirhash memory usage"),
1136 NULL, 0, &ulfs_dirhashmaxmem, 0,
1137 CTL_CREATE, CTL_EOL);
1138
1139 sysctl_createv(&ulfsdirhash_sysctl_log, 0, &rnode, &cnode,
1140 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
1141 CTLTYPE_INT, "memused",
1142 SYSCTL_DESCR("current dirhash memory usage"),
1143 NULL, 0, &ulfs_dirhashmem, 0,
1144 CTL_CREATE, CTL_EOL);
1145
1146 sysctl_createv(&ulfsdirhash_sysctl_log, 0, &rnode, &cnode,
1147 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1148 CTLTYPE_INT, "docheck",
1149 SYSCTL_DESCR("enable extra sanity checks"),
1150 NULL, 0, &ulfs_dirhashcheck, 0,
1151 CTL_CREATE, CTL_EOL);
1152 }
1153
1154 void
1155 ulfsdirhash_init(void)
1156 {
1157
1158 mutex_init(&ulfsdirhash_lock, MUTEX_DEFAULT, IPL_NONE);
1159 ulfsdirhashblk_cache = pool_cache_init(DH_NBLKOFF * sizeof(daddr_t), 0,
1160 0, 0, "dirhashblk", NULL, IPL_NONE, NULL, NULL, NULL);
1161 ulfsdirhash_cache = pool_cache_init(sizeof(struct dirhash), 0,
1162 0, 0, "dirhash", NULL, IPL_NONE, NULL, NULL, NULL);
1163 TAILQ_INIT(&ulfsdirhash_list);
1164 ulfsdirhash_sysctl_init();
1165 }
1166
1167 void
1168 ulfsdirhash_done(void)
1169 {
1170
1171 KASSERT(TAILQ_EMPTY(&ulfsdirhash_list));
1172 pool_cache_destroy(ulfsdirhashblk_cache);
1173 pool_cache_destroy(ulfsdirhash_cache);
1174 mutex_destroy(&ulfsdirhash_lock);
1175 sysctl_teardown(&ulfsdirhash_sysctl_log);
1176 }
1177