subr_thmap.c revision 1.6 1 /* $NetBSD: subr_thmap.c,v 1.6 2020/05/23 19:52:12 rmind Exp $ */
2
3 /*-
4 * Copyright (c) 2018 Mindaugas Rasiukevicius <rmind at noxt eu>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * Upstream: https://github.com/rmind/thmap/
29 */
30
31 /*
32 * Concurrent trie-hash map.
33 *
34 * The data structure is conceptually a radix trie on hashed keys.
35 * Keys are hashed using a 32-bit function. The root level is a special
36 * case: it is managed using the compare-and-swap (CAS) atomic operation
37 * and has a fanout of 64. The subsequent levels are constructed using
38 * intermediate nodes with a fanout of 16 (using 4 bits). As more levels
39 * are created, more blocks of the 32-bit hash value might be generated
40 * by incrementing the seed parameter of the hash function.
41 *
42 * Concurrency
43 *
44 * - READERS: Descending is simply walking through the slot values of
45 * the intermediate nodes. It is lock-free as there is no intermediate
46 * state: the slot is either empty or has a pointer to the child node.
47 * The main assumptions here are the following:
48 *
49 * i) modifications must preserve consistency with the respect to the
50 * readers i.e. the readers can only see the valid node values;
51 *
52 * ii) any invalid view must "fail" the reads, e.g. by making them
53 * re-try from the root; this is a case for deletions and is achieved
54 * using the NODE_DELETED flag.
55 *
56 * iii) the node destruction must be synchronized with the readers,
57 * e.g. by using the Epoch-based reclamation or other techniques.
58 *
59 * - WRITERS AND LOCKING: Each intermediate node has a spin-lock (which
60 * is implemented using the NODE_LOCKED bit) -- it provides mutual
61 * exclusion amongst concurrent writers. The lock order for the nodes
62 * is "bottom-up" i.e. they are locked as we ascend the trie. A key
63 * constraint here is that parent pointer never changes.
64 *
65 * - DELETES: In addition to writer's locking, the deletion keeps the
66 * intermediate nodes in a valid state and sets the NODE_DELETED flag,
67 * to indicate that the readers must re-start the walk from the root.
68 * As the levels are collapsed, NODE_DELETED gets propagated up-tree.
69 * The leaf nodes just stay as-is until they are reclaimed.
70 *
71 * - ROOT LEVEL: The root level is a special case, as it is implemented
72 * as an array (rather than intermediate node). The root-level slot can
73 * only be set using CAS and it can only be set to a valid intermediate
74 * node. The root-level slot can only be cleared when the node it points
75 * at becomes empty, is locked and marked as NODE_DELETED (this causes
76 * the insert/delete operations to re-try until the slot is set to NULL).
77 *
78 * References:
79 *
80 * W. Litwin, 1981, Trie Hashing.
81 * Proceedings of the 1981 ACM SIGMOD, p. 19-29
82 * https://dl.acm.org/citation.cfm?id=582322
83 *
84 * P. L. Lehman and S. B. Yao.
85 * Efficient locking for concurrent operations on B-trees.
86 * ACM TODS, 6(4):650-670, 1981
87 * https://www.csd.uoc.gr/~hy460/pdf/p650-lehman.pdf
88 */
89
90 #ifdef _KERNEL
91 #include <sys/cdefs.h>
92 #include <sys/param.h>
93 #include <sys/types.h>
94 #include <sys/thmap.h>
95 #include <sys/kmem.h>
96 #include <sys/lock.h>
97 #include <sys/atomic.h>
98 #include <sys/hash.h>
99 #define THMAP_RCSID(a) __KERNEL_RCSID(0, a)
100 #else
101 #include <stdio.h>
102 #include <stdlib.h>
103 #include <stdbool.h>
104 #include <stddef.h>
105 #include <inttypes.h>
106 #include <string.h>
107 #include <limits.h>
108 #define THMAP_RCSID(a) __RCSID(a)
109
110 #include "thmap.h"
111 #include "utils.h"
112 #endif
113
114 THMAP_RCSID("$NetBSD: subr_thmap.c,v 1.6 2020/05/23 19:52:12 rmind Exp $");
115
116 /*
117 * NetBSD kernel wrappers
118 */
119 #ifdef _KERNEL
120 #define ASSERT KASSERT
121 #define atomic_thread_fence(x) membar_sync()
122 #define atomic_compare_exchange_weak_explicit_32(p, e, n, m1, m2) \
123 (atomic_cas_32((p), *(e), (n)) == *(e))
124 #define atomic_compare_exchange_weak_explicit_ptr(p, e, n, m1, m2) \
125 (atomic_cas_ptr((p), *(void **)(e), (void *)(n)) == *(void **)(e))
126 #define atomic_exchange_explicit(o, n, m1) atomic_swap_ptr((o), (n))
127 #define murmurhash3 murmurhash2
128 #endif
129
130 /*
131 * The root level fanout is 64 (indexed by the last 6 bits of the hash
132 * value XORed with the length). Each subsequent level, represented by
133 * intermediate nodes, has a fanout of 16 (using 4 bits).
134 *
135 * The hash function produces 32-bit values.
136 */
137
138 #define HASHVAL_BITS (32)
139 #define HASHVAL_MOD (HASHVAL_BITS - 1)
140 #define HASHVAL_SHIFT (5)
141
142 #define ROOT_BITS (6)
143 #define ROOT_SIZE (1 << ROOT_BITS)
144 #define ROOT_MASK (ROOT_SIZE - 1)
145 #define ROOT_MSBITS (HASHVAL_BITS - ROOT_BITS)
146
147 #define LEVEL_BITS (4)
148 #define LEVEL_SIZE (1 << LEVEL_BITS)
149 #define LEVEL_MASK (LEVEL_SIZE - 1)
150
151 /*
152 * Instead of raw pointers, we use offsets from the base address.
153 * This accommodates the use of this data structure in shared memory,
154 * where mappings can be in different address spaces.
155 *
156 * The pointers must be aligned, since pointer tagging is used to
157 * differentiate the intermediate nodes from leaves. We reserve the
158 * least significant bit.
159 */
160 typedef uintptr_t thmap_ptr_t;
161 typedef uintptr_t atomic_thmap_ptr_t; // C11 _Atomic
162
163 #define THMAP_NULL ((thmap_ptr_t)0)
164
165 #define THMAP_LEAF_BIT (0x1)
166
167 #define THMAP_ALIGNED_P(p) (((uintptr_t)(p) & 3) == 0)
168 #define THMAP_ALIGN(p) ((uintptr_t)(p) & ~(uintptr_t)3)
169 #define THMAP_INODE_P(p) (((uintptr_t)(p) & THMAP_LEAF_BIT) == 0)
170
171 #define THMAP_GETPTR(th, p) ((void *)((th)->baseptr + (uintptr_t)(p)))
172 #define THMAP_GETOFF(th, p) ((thmap_ptr_t)((uintptr_t)(p) - (th)->baseptr))
173 #define THMAP_NODE(th, p) THMAP_GETPTR(th, THMAP_ALIGN(p))
174
175 /*
176 * State field.
177 */
178
179 #define NODE_LOCKED (1U << 31) // lock (writers)
180 #define NODE_DELETED (1U << 30) // node deleted
181 #define NODE_COUNT(s) ((s) & 0x3fffffff) // slot count mask
182
183 /*
184 * There are two types of nodes:
185 * - Intermediate nodes -- arrays pointing to another level or a leaf;
186 * - Leaves, which store a key-value pair.
187 */
188
189 typedef struct {
190 uint32_t state; // C11 _Atomic
191 thmap_ptr_t parent;
192 atomic_thmap_ptr_t slots[LEVEL_SIZE];
193 } thmap_inode_t;
194
195 #define THMAP_INODE_LEN sizeof(thmap_inode_t)
196
197 typedef struct {
198 thmap_ptr_t key;
199 size_t len;
200 void * val;
201 } thmap_leaf_t;
202
203 typedef struct {
204 unsigned rslot; // root-level slot index
205 unsigned level; // current level in the tree
206 unsigned hashidx; // current hash index (block of bits)
207 uint32_t hashval; // current hash value
208 } thmap_query_t;
209
210 typedef struct {
211 uintptr_t addr;
212 size_t len;
213 void * next;
214 } thmap_gc_t;
215
216 #define THMAP_ROOT_LEN (sizeof(thmap_ptr_t) * ROOT_SIZE)
217
218 struct thmap {
219 uintptr_t baseptr;
220 atomic_thmap_ptr_t * root;
221 unsigned flags;
222 const thmap_ops_t * ops;
223 thmap_gc_t * gc_list; // C11 _Atomic
224 };
225
226 static void stage_mem_gc(thmap_t *, uintptr_t, size_t);
227
228 /*
229 * A few low-level helper routines.
230 */
231
232 static uintptr_t
233 alloc_wrapper(size_t len)
234 {
235 return (uintptr_t)kmem_intr_alloc(len, KM_NOSLEEP);
236 }
237
238 static void
239 free_wrapper(uintptr_t addr, size_t len)
240 {
241 kmem_intr_free((void *)addr, len);
242 }
243
244 static const thmap_ops_t thmap_default_ops = {
245 .alloc = alloc_wrapper,
246 .free = free_wrapper
247 };
248
249 /*
250 * NODE LOCKING.
251 */
252
253 #ifdef DIAGNOSTIC
254 static inline bool
255 node_locked_p(thmap_inode_t *node)
256 {
257 return (atomic_load_relaxed(&node->state) & NODE_LOCKED) != 0;
258 }
259 #endif
260
261 static void
262 lock_node(thmap_inode_t *node)
263 {
264 unsigned bcount = SPINLOCK_BACKOFF_MIN;
265 uint32_t s;
266 again:
267 s = atomic_load_relaxed(&node->state);
268 if (s & NODE_LOCKED) {
269 SPINLOCK_BACKOFF(bcount);
270 goto again;
271 }
272 /* Acquire from prior release in unlock_node.() */
273 if (!atomic_compare_exchange_weak_explicit_32(&node->state,
274 &s, s | NODE_LOCKED, memory_order_acquire, memory_order_relaxed)) {
275 bcount = SPINLOCK_BACKOFF_MIN;
276 goto again;
277 }
278 }
279
280 static void
281 unlock_node(thmap_inode_t *node)
282 {
283 uint32_t s = atomic_load_relaxed(&node->state) & ~NODE_LOCKED;
284
285 ASSERT(node_locked_p(node));
286 /* Release to subsequent acquire in lock_node(). */
287 atomic_store_release(&node->state, s);
288 }
289
290 /*
291 * HASH VALUE AND KEY OPERATIONS.
292 */
293
294 static inline void
295 hashval_init(thmap_query_t *query, const void * restrict key, size_t len)
296 {
297 const uint32_t hashval = murmurhash3(key, len, 0);
298
299 query->rslot = ((hashval >> ROOT_MSBITS) ^ len) & ROOT_MASK;
300 query->level = 0;
301 query->hashval = hashval;
302 query->hashidx = 0;
303 }
304
305 /*
306 * hashval_getslot: given the key, compute the hash (if not already cached)
307 * and return the offset for the current level.
308 */
309 static unsigned
310 hashval_getslot(thmap_query_t *query, const void * restrict key, size_t len)
311 {
312 const unsigned offset = query->level * LEVEL_BITS;
313 const unsigned shift = offset & HASHVAL_MOD;
314 const unsigned i = offset >> HASHVAL_SHIFT;
315
316 if (query->hashidx != i) {
317 /* Generate a hash value for a required range. */
318 query->hashval = murmurhash3(key, len, i);
319 query->hashidx = i;
320 }
321 return (query->hashval >> shift) & LEVEL_MASK;
322 }
323
324 static unsigned
325 hashval_getleafslot(const thmap_t *thmap,
326 const thmap_leaf_t *leaf, unsigned level)
327 {
328 const void *key = THMAP_GETPTR(thmap, leaf->key);
329 const unsigned offset = level * LEVEL_BITS;
330 const unsigned shift = offset & HASHVAL_MOD;
331 const unsigned i = offset >> HASHVAL_SHIFT;
332
333 return (murmurhash3(key, leaf->len, i) >> shift) & LEVEL_MASK;
334 }
335
336 static inline unsigned
337 hashval_getl0slot(const thmap_t *thmap, const thmap_query_t *query,
338 const thmap_leaf_t *leaf)
339 {
340 if (__predict_true(query->hashidx == 0)) {
341 return query->hashval & LEVEL_MASK;
342 }
343 return hashval_getleafslot(thmap, leaf, 0);
344 }
345
346 static bool
347 key_cmp_p(const thmap_t *thmap, const thmap_leaf_t *leaf,
348 const void * restrict key, size_t len)
349 {
350 const void *leafkey = THMAP_GETPTR(thmap, leaf->key);
351 return len == leaf->len && memcmp(key, leafkey, len) == 0;
352 }
353
354 /*
355 * INTER-NODE OPERATIONS.
356 */
357
358 static thmap_inode_t *
359 node_create(thmap_t *thmap, thmap_inode_t *parent)
360 {
361 thmap_inode_t *node;
362 uintptr_t p;
363
364 p = thmap->ops->alloc(THMAP_INODE_LEN);
365 if (!p) {
366 return NULL;
367 }
368 node = THMAP_GETPTR(thmap, p);
369 ASSERT(THMAP_ALIGNED_P(node));
370
371 memset(node, 0, THMAP_INODE_LEN);
372 if (parent) {
373 /* Not yet published, no need for ordering. */
374 atomic_store_relaxed(&node->state, NODE_LOCKED);
375 node->parent = THMAP_GETOFF(thmap, parent);
376 }
377 return node;
378 }
379
380 static void
381 node_insert(thmap_inode_t *node, unsigned slot, thmap_ptr_t child)
382 {
383 ASSERT(node_locked_p(node) || node->parent == THMAP_NULL);
384 ASSERT((atomic_load_relaxed(&node->state) & NODE_DELETED) == 0);
385 ASSERT(atomic_load_relaxed(&node->slots[slot]) == THMAP_NULL);
386
387 ASSERT(NODE_COUNT(atomic_load_relaxed(&node->state)) < LEVEL_SIZE);
388
389 /*
390 * If node is public already, caller is responsible for issuing
391 * release fence; if node is not public, no ordering is needed.
392 * Hence relaxed ordering.
393 */
394 atomic_store_relaxed(&node->slots[slot], child);
395 atomic_store_relaxed(&node->state,
396 atomic_load_relaxed(&node->state) + 1);
397 }
398
399 static void
400 node_remove(thmap_inode_t *node, unsigned slot)
401 {
402 ASSERT(node_locked_p(node));
403 ASSERT((atomic_load_relaxed(&node->state) & NODE_DELETED) == 0);
404 ASSERT(atomic_load_relaxed(&node->slots[slot]) != THMAP_NULL);
405
406 ASSERT(NODE_COUNT(atomic_load_relaxed(&node->state)) > 0);
407 ASSERT(NODE_COUNT(atomic_load_relaxed(&node->state)) <= LEVEL_SIZE);
408
409 /* Element will be GC-ed later; no need for ordering here. */
410 atomic_store_relaxed(&node->slots[slot], THMAP_NULL);
411 atomic_store_relaxed(&node->state,
412 atomic_load_relaxed(&node->state) - 1);
413 }
414
415 /*
416 * LEAF OPERATIONS.
417 */
418
419 static thmap_leaf_t *
420 leaf_create(const thmap_t *thmap, const void *key, size_t len, void *val)
421 {
422 thmap_leaf_t *leaf;
423 uintptr_t leaf_off, key_off;
424
425 leaf_off = thmap->ops->alloc(sizeof(thmap_leaf_t));
426 if (!leaf_off) {
427 return NULL;
428 }
429 leaf = THMAP_GETPTR(thmap, leaf_off);
430 ASSERT(THMAP_ALIGNED_P(leaf));
431
432 if ((thmap->flags & THMAP_NOCOPY) == 0) {
433 /*
434 * Copy the key.
435 */
436 key_off = thmap->ops->alloc(len);
437 if (!key_off) {
438 thmap->ops->free(leaf_off, sizeof(thmap_leaf_t));
439 return NULL;
440 }
441 memcpy(THMAP_GETPTR(thmap, key_off), key, len);
442 leaf->key = key_off;
443 } else {
444 /* Otherwise, we use a reference. */
445 leaf->key = (uintptr_t)key;
446 }
447 leaf->len = len;
448 leaf->val = val;
449 return leaf;
450 }
451
452 static void
453 leaf_free(const thmap_t *thmap, thmap_leaf_t *leaf)
454 {
455 if ((thmap->flags & THMAP_NOCOPY) == 0) {
456 thmap->ops->free(leaf->key, leaf->len);
457 }
458 thmap->ops->free(THMAP_GETOFF(thmap, leaf), sizeof(thmap_leaf_t));
459 }
460
461 static thmap_leaf_t *
462 get_leaf(const thmap_t *thmap, thmap_inode_t *parent, unsigned slot)
463 {
464 thmap_ptr_t node;
465
466 /* Consume from prior release in thmap_put(). */
467 node = atomic_load_consume(&parent->slots[slot]);
468 if (THMAP_INODE_P(node)) {
469 return NULL;
470 }
471 return THMAP_NODE(thmap, node);
472 }
473
474 /*
475 * ROOT OPERATIONS.
476 */
477
478 /*
479 * root_try_put: Try to set a root pointer at query->rslot.
480 *
481 * => Implies release operation on success.
482 * => Implies no ordering on failure.
483 */
484 static inline bool
485 root_try_put(thmap_t *thmap, const thmap_query_t *query, thmap_leaf_t *leaf)
486 {
487 thmap_ptr_t expected;
488 const unsigned i = query->rslot;
489 thmap_inode_t *node;
490 thmap_ptr_t nptr;
491 unsigned slot;
492
493 /*
494 * Must pre-check first. No ordering required because we will
495 * check again before taking any actions, and start over if
496 * this changes from null.
497 */
498 if (atomic_load_relaxed(&thmap->root[i])) {
499 return false;
500 }
501
502 /*
503 * Create an intermediate node. Since there is no parent set,
504 * it will be created unlocked and the CAS operation will
505 * release it to readers.
506 */
507 node = node_create(thmap, NULL);
508 slot = hashval_getl0slot(thmap, query, leaf);
509 node_insert(node, slot, THMAP_GETOFF(thmap, leaf) | THMAP_LEAF_BIT);
510 nptr = THMAP_GETOFF(thmap, node);
511 again:
512 if (atomic_load_relaxed(&thmap->root[i])) {
513 thmap->ops->free(nptr, THMAP_INODE_LEN);
514 return false;
515 }
516 /* Release to subsequent consume in find_edge_node(). */
517 expected = THMAP_NULL;
518 if (!atomic_compare_exchange_weak_explicit_ptr(&thmap->root[i], &expected,
519 nptr, memory_order_release, memory_order_relaxed)) {
520 goto again;
521 }
522 return true;
523 }
524
525 /*
526 * find_edge_node: given the hash, traverse the tree to find the edge node.
527 *
528 * => Returns an aligned (clean) pointer to the parent node.
529 * => Returns the slot number and sets current level.
530 */
531 static thmap_inode_t *
532 find_edge_node(const thmap_t *thmap, thmap_query_t *query,
533 const void * restrict key, size_t len, unsigned *slot)
534 {
535 thmap_ptr_t root_slot;
536 thmap_inode_t *parent;
537 thmap_ptr_t node;
538 unsigned off;
539
540 ASSERT(query->level == 0);
541
542 /* Consume from prior release in root_try_put(). */
543 root_slot = atomic_load_consume(&thmap->root[query->rslot]);
544 parent = THMAP_NODE(thmap, root_slot);
545 if (!parent) {
546 return NULL;
547 }
548 descend:
549 off = hashval_getslot(query, key, len);
550 /* Consume from prior release in thmap_put(). */
551 node = atomic_load_consume(&parent->slots[off]);
552
553 /* Descend the tree until we find a leaf or empty slot. */
554 if (node && THMAP_INODE_P(node)) {
555 parent = THMAP_NODE(thmap, node);
556 query->level++;
557 goto descend;
558 }
559 /*
560 * NODE_DELETED does not become stale until GC runs, which
561 * cannot happen while we are in the middle of an operation,
562 * hence relaxed ordering.
563 */
564 if (atomic_load_relaxed(&parent->state) & NODE_DELETED) {
565 return NULL;
566 }
567 *slot = off;
568 return parent;
569 }
570
571 /*
572 * find_edge_node_locked: traverse the tree, like find_edge_node(),
573 * but attempt to lock the edge node.
574 *
575 * => Returns NULL if the deleted node is found. This indicates that
576 * the caller must re-try from the root, as the root slot might have
577 * changed too.
578 */
579 static thmap_inode_t *
580 find_edge_node_locked(const thmap_t *thmap, thmap_query_t *query,
581 const void * restrict key, size_t len, unsigned *slot)
582 {
583 thmap_inode_t *node;
584 thmap_ptr_t target;
585 retry:
586 /*
587 * Find the edge node and lock it! Re-check the state since
588 * the tree might change by the time we acquire the lock.
589 */
590 node = find_edge_node(thmap, query, key, len, slot);
591 if (!node) {
592 /* The root slot is empty -- let the caller decide. */
593 query->level = 0;
594 return NULL;
595 }
596 lock_node(node);
597 if (__predict_false(atomic_load_relaxed(&node->state) & NODE_DELETED)) {
598 /*
599 * The node has been deleted. The tree might have a new
600 * shape now, therefore we must re-start from the root.
601 */
602 unlock_node(node);
603 query->level = 0;
604 return NULL;
605 }
606 target = atomic_load_relaxed(&node->slots[*slot]);
607 if (__predict_false(target && THMAP_INODE_P(target))) {
608 /*
609 * The target slot has been changed and it is now an
610 * intermediate node. Re-start from the top internode.
611 */
612 unlock_node(node);
613 query->level = 0;
614 goto retry;
615 }
616 return node;
617 }
618
619 /*
620 * thmap_get: lookup a value given the key.
621 */
622 void *
623 thmap_get(thmap_t *thmap, const void *key, size_t len)
624 {
625 thmap_query_t query;
626 thmap_inode_t *parent;
627 thmap_leaf_t *leaf;
628 unsigned slot;
629
630 hashval_init(&query, key, len);
631 parent = find_edge_node(thmap, &query, key, len, &slot);
632 if (!parent) {
633 return NULL;
634 }
635 leaf = get_leaf(thmap, parent, slot);
636 if (!leaf) {
637 return NULL;
638 }
639 if (!key_cmp_p(thmap, leaf, key, len)) {
640 return NULL;
641 }
642 return leaf->val;
643 }
644
645 /*
646 * thmap_put: insert a value given the key.
647 *
648 * => If the key is already present, return the associated value.
649 * => Otherwise, on successful insert, return the given value.
650 */
651 void *
652 thmap_put(thmap_t *thmap, const void *key, size_t len, void *val)
653 {
654 thmap_query_t query;
655 thmap_leaf_t *leaf, *other;
656 thmap_inode_t *parent, *child;
657 unsigned slot, other_slot;
658 thmap_ptr_t target;
659
660 /*
661 * First, pre-allocate and initialize the leaf node.
662 */
663 leaf = leaf_create(thmap, key, len, val);
664 if (__predict_false(!leaf)) {
665 return NULL;
666 }
667 hashval_init(&query, key, len);
668 retry:
669 /*
670 * Try to insert into the root first, if its slot is empty.
671 */
672 if (root_try_put(thmap, &query, leaf)) {
673 /* Success: the leaf was inserted; no locking involved. */
674 return val;
675 }
676
677 /*
678 * Release node via store in node_insert (*) to subsequent
679 * consume in get_leaf() or find_edge_node().
680 */
681 atomic_thread_fence(memory_order_release);
682
683 /*
684 * Find the edge node and the target slot.
685 */
686 parent = find_edge_node_locked(thmap, &query, key, len, &slot);
687 if (!parent) {
688 goto retry;
689 }
690 target = atomic_load_relaxed(&parent->slots[slot]); // tagged offset
691 if (THMAP_INODE_P(target)) {
692 /*
693 * Empty slot: simply insert the new leaf. The release
694 * fence is already issued for us.
695 */
696 target = THMAP_GETOFF(thmap, leaf) | THMAP_LEAF_BIT;
697 node_insert(parent, slot, target); /* (*) */
698 goto out;
699 }
700
701 /*
702 * Collision or duplicate.
703 */
704 other = THMAP_NODE(thmap, target);
705 if (key_cmp_p(thmap, other, key, len)) {
706 /*
707 * Duplicate. Free the pre-allocated leaf and
708 * return the present value.
709 */
710 leaf_free(thmap, leaf);
711 val = other->val;
712 goto out;
713 }
714 descend:
715 /*
716 * Collision -- expand the tree. Create an intermediate node
717 * which will be locked (NODE_LOCKED) for us. At this point,
718 * we advance to the next level.
719 */
720 child = node_create(thmap, parent);
721 if (__predict_false(!child)) {
722 leaf_free(thmap, leaf);
723 val = NULL;
724 goto out;
725 }
726 query.level++;
727
728 /*
729 * Insert the other (colliding) leaf first. The new child is
730 * not yet published, so memory order is relaxed.
731 */
732 other_slot = hashval_getleafslot(thmap, other, query.level);
733 target = THMAP_GETOFF(thmap, other) | THMAP_LEAF_BIT;
734 node_insert(child, other_slot, target);
735
736 /*
737 * Insert the intermediate node into the parent node.
738 * It becomes the new parent for the our new leaf.
739 *
740 * Ensure that stores to the child (and leaf) reach global
741 * visibility before it gets inserted to the parent, as
742 * consumed by get_leaf() or find_edge_node().
743 */
744 atomic_store_release(&parent->slots[slot], THMAP_GETOFF(thmap, child));
745
746 unlock_node(parent);
747 ASSERT(node_locked_p(child));
748 parent = child;
749
750 /*
751 * Get the new slot and check for another collision
752 * at the next level.
753 */
754 slot = hashval_getslot(&query, key, len);
755 if (slot == other_slot) {
756 /* Another collision -- descend and expand again. */
757 goto descend;
758 }
759
760 /*
761 * Insert our new leaf once we expanded enough. The release
762 * fence is already issued for us.
763 */
764 target = THMAP_GETOFF(thmap, leaf) | THMAP_LEAF_BIT;
765 node_insert(parent, slot, target); /* (*) */
766 out:
767 unlock_node(parent);
768 return val;
769 }
770
771 /*
772 * thmap_del: remove the entry given the key.
773 */
774 void *
775 thmap_del(thmap_t *thmap, const void *key, size_t len)
776 {
777 thmap_query_t query;
778 thmap_leaf_t *leaf;
779 thmap_inode_t *parent;
780 unsigned slot;
781 void *val;
782
783 hashval_init(&query, key, len);
784 parent = find_edge_node_locked(thmap, &query, key, len, &slot);
785 if (!parent) {
786 /* Root slot empty: not found. */
787 return NULL;
788 }
789 leaf = get_leaf(thmap, parent, slot);
790 if (!leaf || !key_cmp_p(thmap, leaf, key, len)) {
791 /* Not found. */
792 unlock_node(parent);
793 return NULL;
794 }
795
796 /* Remove the leaf. */
797 ASSERT(THMAP_NODE(thmap, atomic_load_relaxed(&parent->slots[slot]))
798 == leaf);
799 node_remove(parent, slot);
800
801 /*
802 * Collapse the levels if removing the last item.
803 */
804 while (query.level &&
805 NODE_COUNT(atomic_load_relaxed(&parent->state)) == 0) {
806 thmap_inode_t *node = parent;
807
808 ASSERT(atomic_load_relaxed(&node->state) == NODE_LOCKED);
809
810 /*
811 * Ascend one level up.
812 * => Mark our current parent as deleted.
813 * => Lock the parent one level up.
814 */
815 query.level--;
816 slot = hashval_getslot(&query, key, len);
817 parent = THMAP_NODE(thmap, node->parent);
818 ASSERT(parent != NULL);
819
820 lock_node(parent);
821 ASSERT((atomic_load_relaxed(&parent->state) & NODE_DELETED)
822 == 0);
823
824 /*
825 * Lock is exclusive, so nobody else can be writing at
826 * the same time, and no need for atomic R/M/W, but
827 * readers may read without the lock and so need atomic
828 * load/store. No ordering here needed because the
829 * entry itself stays valid until GC.
830 */
831 atomic_store_relaxed(&node->state,
832 atomic_load_relaxed(&node->state) | NODE_DELETED);
833 unlock_node(node); // memory_order_release
834
835 ASSERT(THMAP_NODE(thmap,
836 atomic_load_relaxed(&parent->slots[slot])) == node);
837 node_remove(parent, slot);
838
839 /* Stage the removed node for G/C. */
840 stage_mem_gc(thmap, THMAP_GETOFF(thmap, node), THMAP_INODE_LEN);
841 }
842
843 /*
844 * If the top node is empty, then we need to remove it from the
845 * root level. Mark the node as deleted and clear the slot.
846 *
847 * Note: acquiring the lock on the top node effectively prevents
848 * the root slot from changing.
849 */
850 if (NODE_COUNT(atomic_load_relaxed(&parent->state)) == 0) {
851 const unsigned rslot = query.rslot;
852 const thmap_ptr_t nptr =
853 atomic_load_relaxed(&thmap->root[rslot]);
854
855 ASSERT(query.level == 0);
856 ASSERT(parent->parent == THMAP_NULL);
857 ASSERT(THMAP_GETOFF(thmap, parent) == nptr);
858
859 /* Mark as deleted and remove from the root-level slot. */
860 atomic_store_relaxed(&parent->state,
861 atomic_load_relaxed(&parent->state) | NODE_DELETED);
862 atomic_store_relaxed(&thmap->root[rslot], THMAP_NULL);
863
864 stage_mem_gc(thmap, nptr, THMAP_INODE_LEN);
865 }
866 unlock_node(parent);
867
868 /*
869 * Save the value and stage the leaf for G/C.
870 */
871 val = leaf->val;
872 if ((thmap->flags & THMAP_NOCOPY) == 0) {
873 stage_mem_gc(thmap, leaf->key, leaf->len);
874 }
875 stage_mem_gc(thmap, THMAP_GETOFF(thmap, leaf), sizeof(thmap_leaf_t));
876 return val;
877 }
878
879 /*
880 * G/C routines.
881 */
882
883 static void
884 stage_mem_gc(thmap_t *thmap, uintptr_t addr, size_t len)
885 {
886 thmap_gc_t *head, *gc;
887
888 gc = kmem_intr_alloc(sizeof(thmap_gc_t), KM_NOSLEEP);
889 gc->addr = addr;
890 gc->len = len;
891 retry:
892 head = atomic_load_relaxed(&thmap->gc_list);
893 gc->next = head; // not yet published
894
895 /* Release to subsequent acquire in thmap_stage_gc(). */
896 if (!atomic_compare_exchange_weak_explicit_ptr(&thmap->gc_list, &head, gc,
897 memory_order_release, memory_order_relaxed)) {
898 goto retry;
899 }
900 }
901
902 void *
903 thmap_stage_gc(thmap_t *thmap)
904 {
905 /* Acquire from prior release in stage_mem_gc(). */
906 return atomic_exchange_explicit(&thmap->gc_list, NULL,
907 memory_order_acquire);
908 }
909
910 void
911 thmap_gc(thmap_t *thmap, void *ref)
912 {
913 thmap_gc_t *gc = ref;
914
915 while (gc) {
916 thmap_gc_t *next = gc->next;
917 thmap->ops->free(gc->addr, gc->len);
918 kmem_intr_free(gc, sizeof(thmap_gc_t));
919 gc = next;
920 }
921 }
922
923 /*
924 * thmap_create: construct a new trie-hash map object.
925 */
926 thmap_t *
927 thmap_create(uintptr_t baseptr, const thmap_ops_t *ops, unsigned flags)
928 {
929 thmap_t *thmap;
930 uintptr_t root;
931
932 /*
933 * Setup the map object.
934 */
935 if (!THMAP_ALIGNED_P(baseptr)) {
936 return NULL;
937 }
938 thmap = kmem_zalloc(sizeof(thmap_t), KM_SLEEP);
939 if (!thmap) {
940 return NULL;
941 }
942 thmap->baseptr = baseptr;
943 thmap->ops = ops ? ops : &thmap_default_ops;
944 thmap->flags = flags;
945
946 if ((thmap->flags & THMAP_SETROOT) == 0) {
947 /* Allocate the root level. */
948 root = thmap->ops->alloc(THMAP_ROOT_LEN);
949 thmap->root = THMAP_GETPTR(thmap, root);
950 if (!thmap->root) {
951 kmem_free(thmap, sizeof(thmap_t));
952 return NULL;
953 }
954 memset(thmap->root, 0, THMAP_ROOT_LEN);
955 atomic_thread_fence(memory_order_release); /* XXX */
956 }
957 return thmap;
958 }
959
960 int
961 thmap_setroot(thmap_t *thmap, uintptr_t root_off)
962 {
963 if (thmap->root) {
964 return -1;
965 }
966 thmap->root = THMAP_GETPTR(thmap, root_off);
967 atomic_thread_fence(memory_order_release); /* XXX */
968 return 0;
969 }
970
971 uintptr_t
972 thmap_getroot(const thmap_t *thmap)
973 {
974 return THMAP_GETOFF(thmap, thmap->root);
975 }
976
977 void
978 thmap_destroy(thmap_t *thmap)
979 {
980 uintptr_t root = THMAP_GETOFF(thmap, thmap->root);
981 void *ref;
982
983 ref = thmap_stage_gc(thmap);
984 thmap_gc(thmap, ref);
985
986 if ((thmap->flags & THMAP_SETROOT) == 0) {
987 thmap->ops->free(root, THMAP_ROOT_LEN);
988 }
989 kmem_free(thmap, sizeof(thmap_t));
990 }
991