vfs_dirhash.c revision 1.8 1 /* $NetBSD: vfs_dirhash.c,v 1.8 2008/10/31 16:04:59 reinoud Exp $ */
2
3 /*
4 * Copyright (c) 2008 Reinoud Zandijk
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 */
28
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: vfs_dirhash.c,v 1.8 2008/10/31 16:04:59 reinoud Exp $");
32
33 /* CLEAN UP! */
34 #include <sys/param.h>
35 #include <sys/kernel.h>
36 #include <sys/buf.h>
37 #include <sys/dirent.h>
38 #include <sys/hash.h>
39 #include <sys/mutex.h>
40 #include <sys/pool.h>
41 #include <sys/queue.h>
42 #include <sys/vnode.h>
43 #include <sys/sysctl.h>
44
45 #include <sys/dirhash.h>
46
47 #if 1
48 # define DPRINTF(a) ;
49 #else
50 # define DPRINTF(a) printf(a);
51 #endif
52
53 /*
54 * The locking protocol of the dirhash structures is fairly simple:
55 *
56 * The global dirhash_queue is protected by the dirhashmutex. This lock is
57 * internal only and is FS/mountpoint/vnode independent. On exit of the
58 * exported functions this mutex is not helt.
59 *
60 * The dirhash structure is considered part of the vnode/inode/udf_node
61 * structure and will thus use the lock that protects that vnode/inode.
62 *
63 * The dirhash entries are considered part of the dirhash structure and thus
64 * are on the same lock.
65 */
66
67 static struct sysctllog *sysctl_log;
68 static struct pool dirhash_pool;
69 static struct pool dirhash_entry_pool;
70
71 static kmutex_t dirhashmutex;
72 static uint32_t maxdirhashsize = DIRHASH_SIZE;
73 static uint32_t dirhashsize = 0;
74 static TAILQ_HEAD(_dirhash, dirhash) dirhash_queue;
75
76
77 void
78 dirhash_init(void)
79 {
80 const struct sysctlnode *rnode, *cnode;
81 size_t sz;
82 uint32_t max_entries;
83
84 /* initialise dirhash queue */
85 TAILQ_INIT(&dirhash_queue);
86
87 /* init dirhash pools */
88 sz = sizeof(struct dirhash);
89 pool_init(&dirhash_pool, sz, 0, 0, 0,
90 "dirhpl", NULL, IPL_NONE);
91
92 sz = sizeof(struct dirhash_entry);
93 pool_init(&dirhash_entry_pool, sz, 0, 0, 0,
94 "dirhepl", NULL, IPL_NONE);
95
96 mutex_init(&dirhashmutex, MUTEX_DEFAULT, IPL_NONE);
97 max_entries = maxdirhashsize / sz;
98 pool_sethiwat(&dirhash_entry_pool, max_entries);
99 dirhashsize = 0;
100
101 /* create sysctl knobs and dials */
102 sysctl_log = NULL;
103 sysctl_createv(&sysctl_log, 0, NULL, &rnode,
104 CTLFLAG_PERMANENT,
105 CTLTYPE_NODE, "dirhash", NULL,
106 NULL, 0, NULL, 0,
107 CTL_VFS, VFS_GENERIC, CTL_CREATE, CTL_EOL);
108 sysctl_createv(&sysctl_log, 0, &rnode, &cnode,
109 CTLFLAG_PERMANENT,
110 CTLTYPE_INT, "memused",
111 SYSCTL_DESCR("current dirhash memory usage"),
112 NULL, 0, &dirhashsize, 0,
113 CTL_CREATE, CTL_EOL);
114 sysctl_createv(&sysctl_log, 0, &rnode, &cnode,
115 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
116 CTLTYPE_INT, "maxmem",
117 SYSCTL_DESCR("maximum dirhash memory usage"),
118 NULL, 0, &maxdirhashsize, 0,
119 CTL_CREATE, CTL_EOL);
120 }
121
122
123 #if 0
124 void
125 dirhash_finish(void)
126 {
127 pool_destroy(&dirhash_pool);
128 pool_destroy(&dirhash_entry_pool);
129
130 mutex_destroy(&dirhashmutex);
131
132 /* sysctl_teardown(&sysctl_log); */
133 }
134 #endif
135
136
137 /*
138 * generic dirhash implementation
139 */
140
141 void
142 dirhash_purge_entries(struct dirhash *dirh)
143 {
144 struct dirhash_entry *dirh_e;
145 uint32_t hashline;
146
147 if (dirh == NULL)
148 return;
149
150 if (dirh->size == 0)
151 return;
152
153 for (hashline = 0; hashline < DIRHASH_HASHSIZE; hashline++) {
154 dirh_e = LIST_FIRST(&dirh->entries[hashline]);
155 while (dirh_e) {
156 LIST_REMOVE(dirh_e, next);
157 pool_put(&dirhash_entry_pool, dirh_e);
158 dirh_e = LIST_FIRST(&dirh->entries[hashline]);
159 }
160 }
161 dirh_e = LIST_FIRST(&dirh->free_entries);
162
163 while (dirh_e) {
164 LIST_REMOVE(dirh_e, next);
165 pool_put(&dirhash_entry_pool, dirh_e);
166 dirh_e = LIST_FIRST(&dirh->entries[hashline]);
167 }
168
169 dirh->flags &= ~DIRH_COMPLETE;
170 dirh->flags |= DIRH_PURGED;
171
172 dirhashsize -= dirh->size;
173 dirh->size = 0;
174 }
175
176
177 void
178 dirhash_purge(struct dirhash **dirhp)
179 {
180 struct dirhash *dirh = *dirhp;
181
182 if (dirh == NULL)
183 return;
184
185 /* purge its entries */
186 dirhash_purge_entries(dirh);
187
188 /* recycle */
189 mutex_enter(&dirhashmutex);
190 TAILQ_REMOVE(&dirhash_queue, dirh, next);
191 mutex_exit(&dirhashmutex);
192
193 pool_put(&dirhash_pool, dirh);
194 *dirhp = NULL;
195 }
196
197
198 void
199 dirhash_get(struct dirhash **dirhp)
200 {
201 struct dirhash *dirh;
202 uint32_t hashline;
203
204 /* if no dirhash was given, allocate one */
205 dirh = *dirhp;
206 if (dirh == NULL) {
207 dirh = pool_get(&dirhash_pool, PR_WAITOK);
208 memset(dirh, 0, sizeof(struct dirhash));
209 for (hashline = 0; hashline < DIRHASH_HASHSIZE; hashline++) {
210 LIST_INIT(&dirh->entries[hashline]);
211 }
212 }
213
214 /* implement LRU on the dirhash queue */
215 mutex_enter(&dirhashmutex);
216 if (*dirhp) {
217 /* remove from queue to be requeued */
218 TAILQ_REMOVE(&dirhash_queue, dirh, next);
219 }
220 dirh->refcnt++;
221 TAILQ_INSERT_HEAD(&dirhash_queue, dirh, next);
222 mutex_exit(&dirhashmutex);
223
224 *dirhp = dirh;
225 }
226
227
228 void
229 dirhash_put(struct dirhash *dirh)
230 {
231
232 mutex_enter(&dirhashmutex);
233 dirh->refcnt--;
234 mutex_exit(&dirhashmutex);
235 }
236
237
238 void
239 dirhash_enter(struct dirhash *dirh,
240 struct dirent *dirent, uint64_t offset, uint32_t entry_size, int new)
241 {
242 struct dirhash *del_dirh, *prev_dirh;
243 struct dirhash_entry *dirh_e;
244 uint32_t hashvalue, hashline;
245 int entrysize;
246
247 /* make sure we have a dirhash to work on */
248 KASSERT(dirh);
249 KASSERT(dirh->refcnt > 0);
250
251 /* are we trying to re-enter an entry? */
252 if (!new && (dirh->flags & DIRH_COMPLETE))
253 return;
254
255 /* calculate our hash */
256 hashvalue = hash32_strn(dirent->d_name, dirent->d_namlen, HASH32_STR_INIT);
257 hashline = hashvalue & DIRHASH_HASHMASK;
258
259 /* lookup and insert entry if not there yet */
260 LIST_FOREACH(dirh_e, &dirh->entries[hashline], next) {
261 /* check for hash collision */
262 if (dirh_e->hashvalue != hashvalue)
263 continue;
264 if (dirh_e->offset != offset)
265 continue;
266 /* got it already */
267 KASSERT(dirh_e->d_namlen == dirent->d_namlen);
268 KASSERT(dirh_e->entry_size == entry_size);
269 return;
270 }
271
272 DPRINTF(("dirhash enter %"PRIu64", %d, %d for `%*.*s`\n",
273 offset, entry_size, dirent->d_namlen,
274 dirent->d_namlen, dirent->d_namlen, dirent->d_name));
275
276 /* check if entry is in free space list */
277 LIST_FOREACH(dirh_e, &dirh->free_entries, next) {
278 if (dirh_e->offset == offset) {
279 DPRINTF(("\tremoving free entry\n"));
280 LIST_REMOVE(dirh_e, next);
281 break;
282 }
283 }
284
285 /* ensure we are not passing the dirhash limit */
286 entrysize = sizeof(struct dirhash_entry);
287 if (dirhashsize + entrysize > maxdirhashsize) {
288 /* we walk the dirhash_queue, so need to lock it */
289 mutex_enter(&dirhashmutex);
290 del_dirh = TAILQ_LAST(&dirhash_queue, _dirhash);
291 KASSERT(del_dirh);
292 while (dirhashsize + entrysize > maxdirhashsize) {
293 /* no use trying to delete myself */
294 if (del_dirh == dirh)
295 break;
296 prev_dirh = TAILQ_PREV(del_dirh, _dirhash, next);
297 if (del_dirh->refcnt == 0)
298 dirhash_purge_entries(del_dirh);
299 del_dirh = prev_dirh;
300 }
301 mutex_exit(&dirhashmutex);
302 }
303
304 /* add to the hashline */
305 dirh_e = pool_get(&dirhash_entry_pool, PR_WAITOK);
306 memset(dirh_e, 0, sizeof(struct dirhash_entry));
307
308 dirh_e->hashvalue = hashvalue;
309 dirh_e->offset = offset;
310 dirh_e->d_namlen = dirent->d_namlen;
311 dirh_e->entry_size = entry_size;
312
313 dirh->size += sizeof(struct dirhash_entry);
314 dirhashsize += sizeof(struct dirhash_entry);
315 LIST_INSERT_HEAD(&dirh->entries[hashline], dirh_e, next);
316 }
317
318
319 void
320 dirhash_enter_freed(struct dirhash *dirh, uint64_t offset,
321 uint32_t entry_size)
322 {
323 struct dirhash_entry *dirh_e;
324
325 /* make sure we have a dirhash to work on */
326 KASSERT(dirh);
327 KASSERT(dirh->refcnt > 0);
328
329 /* check for double entry of free space */
330 LIST_FOREACH(dirh_e, &dirh->free_entries, next) {
331 KASSERT(dirh_e->offset != offset);
332 }
333
334 DPRINTF(("dirhash enter FREED %"PRIu64", %d\n",
335 offset, entry_size));
336 dirh_e = pool_get(&dirhash_entry_pool, PR_WAITOK);
337 memset(dirh_e, 0, sizeof(struct dirhash_entry));
338
339 dirh_e->hashvalue = 0; /* not relevant */
340 dirh_e->offset = offset;
341 dirh_e->d_namlen = 0; /* not relevant */
342 dirh_e->entry_size = entry_size;
343
344 /* XXX it might be preferable to append them at the tail */
345 LIST_INSERT_HEAD(&dirh->free_entries, dirh_e, next);
346 dirh->size += sizeof(struct dirhash_entry);
347 dirhashsize += sizeof(struct dirhash_entry);
348 }
349
350
351 void
352 dirhash_remove(struct dirhash *dirh, struct dirent *dirent,
353 uint64_t offset, uint32_t entry_size)
354 {
355 struct dirhash_entry *dirh_e;
356 uint32_t hashvalue, hashline;
357
358 DPRINTF(("dirhash remove %"PRIu64", %d for `%*.*s`\n",
359 offset, entry_size,
360 dirent->d_namlen, dirent->d_namlen, dirent->d_name));
361
362 /* make sure we have a dirhash to work on */
363 KASSERT(dirh);
364 KASSERT(dirh->refcnt > 0);
365
366 /* calculate our hash */
367 hashvalue = hash32_strn(dirent->d_name, dirent->d_namlen, HASH32_STR_INIT);
368 hashline = hashvalue & DIRHASH_HASHMASK;
369
370 /* lookup entry */
371 LIST_FOREACH(dirh_e, &dirh->entries[hashline], next) {
372 /* check for hash collision */
373 if (dirh_e->hashvalue != hashvalue)
374 continue;
375 if (dirh_e->offset != offset)
376 continue;
377
378 /* got it! */
379 KASSERT(dirh_e->d_namlen == dirent->d_namlen);
380 KASSERT(dirh_e->entry_size == entry_size);
381 LIST_REMOVE(dirh_e, next);
382 dirh->size -= sizeof(struct dirhash_entry);
383 dirhashsize -= sizeof(struct dirhash_entry);
384
385 dirhash_enter_freed(dirh, offset, entry_size);
386 return;
387 }
388
389 /* not found! */
390 panic("dirhash_remove couldn't find entry in hash table\n");
391 }
392
393
394 /*
395 * BUGALERT: don't use result longer than needed, never past the node lock.
396 * Call with NULL *result initially and it will return nonzero if again.
397 */
398 int
399 dirhash_lookup(struct dirhash *dirh, const char *d_name, int d_namlen,
400 struct dirhash_entry **result)
401 {
402 struct dirhash_entry *dirh_e;
403 uint32_t hashvalue, hashline;
404
405 /* make sure we have a dirhash to work on */
406 KASSERT(dirh);
407 KASSERT(dirh->refcnt > 0);
408
409 /* start where we were */
410 if (*result) {
411 dirh_e = *result;
412
413 /* retrieve information to avoid recalculation and advance */
414 hashvalue = dirh_e->hashvalue;
415 dirh_e = LIST_NEXT(*result, next);
416 } else {
417 /* calculate our hash and lookup all entries in hashline */
418 hashvalue = hash32_strn(d_name, d_namlen, HASH32_STR_INIT);
419 hashline = hashvalue & DIRHASH_HASHMASK;
420 dirh_e = LIST_FIRST(&dirh->entries[hashline]);
421 }
422
423 for (; dirh_e; dirh_e = LIST_NEXT(dirh_e, next)) {
424 /* check for hash collision */
425 if (dirh_e->hashvalue != hashvalue)
426 continue;
427 if (dirh_e->d_namlen != d_namlen)
428 continue;
429 /* might have an entry in the cache */
430 *result = dirh_e;
431 return 1;
432 }
433
434 *result = NULL;
435 return 0;
436 }
437
438
439 /*
440 * BUGALERT: don't use result longer than needed, never past the node lock.
441 * Call with NULL *result initially and it will return nonzero if again.
442 */
443
444 int
445 dirhash_lookup_freed(struct dirhash *dirh, uint32_t min_entrysize,
446 struct dirhash_entry **result)
447 {
448 struct dirhash_entry *dirh_e;
449
450 /* make sure we have a dirhash to work on */
451 KASSERT(dirh);
452 KASSERT(dirh->refcnt > 0);
453
454 /* start where we were */
455 if (*result) {
456 dirh_e = LIST_NEXT(*result, next);
457 } else {
458 /* lookup all entries that match */
459 dirh_e = LIST_FIRST(&dirh->free_entries);
460 }
461
462 for (; dirh_e; dirh_e = LIST_NEXT(dirh_e, next)) {
463 /* check for minimum size */
464 if (dirh_e->entry_size < min_entrysize)
465 continue;
466 /* might be a candidate */
467 *result = dirh_e;
468 return 1;
469 }
470
471 *result = NULL;
472 return 0;
473 }
474