Home | History | Annotate | Line # | Download | only in kern
subr_thmap.c revision 1.5.6.2
      1  1.5.6.2    martin /*	$NetBSD: subr_thmap.c,v 1.5.6.2 2023/10/18 15:07:06 martin Exp $	*/
      2      1.2  christos 
      3      1.1     rmind /*-
      4      1.1     rmind  * Copyright (c) 2018 Mindaugas Rasiukevicius <rmind at noxt eu>
      5      1.1     rmind  * All rights reserved.
      6      1.1     rmind  *
      7      1.1     rmind  * Redistribution and use in source and binary forms, with or without
      8      1.1     rmind  * modification, are permitted provided that the following conditions
      9      1.1     rmind  * are met:
     10      1.1     rmind  * 1. Redistributions of source code must retain the above copyright
     11      1.1     rmind  *    notice, this list of conditions and the following disclaimer.
     12      1.1     rmind  * 2. Redistributions in binary form must reproduce the above copyright
     13      1.1     rmind  *    notice, this list of conditions and the following disclaimer in the
     14      1.1     rmind  *    documentation and/or other materials provided with the distribution.
     15      1.1     rmind  *
     16      1.1     rmind  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     17      1.1     rmind  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     18      1.1     rmind  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     19      1.1     rmind  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     20      1.1     rmind  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     21      1.1     rmind  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     22      1.1     rmind  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     23      1.1     rmind  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     24      1.1     rmind  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     25      1.1     rmind  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26      1.1     rmind  * SUCH DAMAGE.
     27      1.1     rmind  *
     28      1.1     rmind  * Upstream: https://github.com/rmind/thmap/
     29      1.1     rmind  */
     30      1.1     rmind 
     31      1.1     rmind /*
     32      1.1     rmind  * Concurrent trie-hash map.
     33      1.1     rmind  *
     34      1.1     rmind  * The data structure is conceptually a radix trie on hashed keys.
     35      1.1     rmind  * Keys are hashed using a 32-bit function.  The root level is a special
     36      1.1     rmind  * case: it is managed using the compare-and-swap (CAS) atomic operation
     37      1.1     rmind  * and has a fanout of 64.  The subsequent levels are constructed using
     38      1.1     rmind  * intermediate nodes with a fanout of 16 (using 4 bits).  As more levels
     39      1.1     rmind  * are created, more blocks of the 32-bit hash value might be generated
     40      1.1     rmind  * by incrementing the seed parameter of the hash function.
     41      1.1     rmind  *
     42      1.1     rmind  * Concurrency
     43      1.1     rmind  *
     44      1.1     rmind  * - READERS: Descending is simply walking through the slot values of
     45      1.1     rmind  *   the intermediate nodes.  It is lock-free as there is no intermediate
     46      1.1     rmind  *   state: the slot is either empty or has a pointer to the child node.
     47      1.1     rmind  *   The main assumptions here are the following:
     48      1.1     rmind  *
     49      1.1     rmind  *   i) modifications must preserve consistency with the respect to the
     50      1.1     rmind  *   readers i.e. the readers can only see the valid node values;
     51      1.1     rmind  *
     52      1.1     rmind  *   ii) any invalid view must "fail" the reads, e.g. by making them
     53      1.1     rmind  *   re-try from the root; this is a case for deletions and is achieved
     54      1.1     rmind  *   using the NODE_DELETED flag.
     55      1.1     rmind  *
     56  1.5.6.1    martin  *   iii) the node destruction must be synchronized with the readers,
     57      1.1     rmind  *   e.g. by using the Epoch-based reclamation or other techniques.
     58      1.1     rmind  *
     59      1.1     rmind  * - WRITERS AND LOCKING: Each intermediate node has a spin-lock (which
     60      1.1     rmind  *   is implemented using the NODE_LOCKED bit) -- it provides mutual
     61      1.1     rmind  *   exclusion amongst concurrent writers.  The lock order for the nodes
     62      1.1     rmind  *   is "bottom-up" i.e. they are locked as we ascend the trie.  A key
     63      1.1     rmind  *   constraint here is that parent pointer never changes.
     64      1.1     rmind  *
     65      1.1     rmind  * - DELETES: In addition to writer's locking, the deletion keeps the
     66      1.1     rmind  *   intermediate nodes in a valid state and sets the NODE_DELETED flag,
     67      1.1     rmind  *   to indicate that the readers must re-start the walk from the root.
     68      1.1     rmind  *   As the levels are collapsed, NODE_DELETED gets propagated up-tree.
     69      1.1     rmind  *   The leaf nodes just stay as-is until they are reclaimed.
     70      1.1     rmind  *
     71      1.1     rmind  * - ROOT LEVEL: The root level is a special case, as it is implemented
     72      1.1     rmind  *   as an array (rather than intermediate node).  The root-level slot can
     73      1.1     rmind  *   only be set using CAS and it can only be set to a valid intermediate
     74      1.1     rmind  *   node.  The root-level slot can only be cleared when the node it points
     75      1.1     rmind  *   at becomes empty, is locked and marked as NODE_DELETED (this causes
     76      1.1     rmind  *   the insert/delete operations to re-try until the slot is set to NULL).
     77      1.1     rmind  *
     78      1.1     rmind  * References:
     79      1.1     rmind  *
     80      1.1     rmind  *	W. Litwin, 1981, Trie Hashing.
     81      1.1     rmind  *	Proceedings of the 1981 ACM SIGMOD, p. 19-29
     82      1.1     rmind  *	https://dl.acm.org/citation.cfm?id=582322
     83      1.1     rmind  *
     84      1.1     rmind  *	P. L. Lehman and S. B. Yao.
     85      1.1     rmind  *	Efficient locking for concurrent operations on B-trees.
     86      1.1     rmind  *	ACM TODS, 6(4):650-670, 1981
     87      1.1     rmind  *	https://www.csd.uoc.gr/~hy460/pdf/p650-lehman.pdf
     88      1.1     rmind  */
     89      1.1     rmind 
     90      1.1     rmind #ifdef _KERNEL
     91      1.1     rmind #include <sys/cdefs.h>
     92      1.1     rmind #include <sys/param.h>
     93      1.1     rmind #include <sys/types.h>
     94      1.1     rmind #include <sys/thmap.h>
     95      1.1     rmind #include <sys/kmem.h>
     96      1.1     rmind #include <sys/lock.h>
     97      1.1     rmind #include <sys/atomic.h>
     98      1.1     rmind #include <sys/hash.h>
     99      1.2  christos #define THMAP_RCSID(a) __KERNEL_RCSID(0, a)
    100      1.1     rmind #else
    101      1.1     rmind #include <stdio.h>
    102      1.1     rmind #include <stdlib.h>
    103      1.1     rmind #include <stdbool.h>
    104      1.1     rmind #include <stddef.h>
    105      1.1     rmind #include <inttypes.h>
    106      1.1     rmind #include <string.h>
    107      1.1     rmind #include <limits.h>
    108      1.2  christos #define THMAP_RCSID(a) __RCSID(a)
    109      1.1     rmind 
    110      1.1     rmind #include "thmap.h"
    111      1.1     rmind #include "utils.h"
    112      1.1     rmind #endif
    113      1.1     rmind 
    114  1.5.6.2    martin THMAP_RCSID("$NetBSD: subr_thmap.c,v 1.5.6.2 2023/10/18 15:07:06 martin Exp $");
    115      1.2  christos 
    116      1.1     rmind /*
    117      1.1     rmind  * NetBSD kernel wrappers
    118      1.1     rmind  */
    119      1.1     rmind #ifdef _KERNEL
    120      1.1     rmind #define	ASSERT KASSERT
    121  1.5.6.1    martin #define	atomic_thread_fence(x) membar_sync()
    122  1.5.6.1    martin #define	atomic_compare_exchange_weak_explicit_32(p, e, n, m1, m2) \
    123  1.5.6.1    martin     (atomic_cas_32((p), *(e), (n)) == *(e))
    124  1.5.6.1    martin #define	atomic_compare_exchange_weak_explicit_ptr(p, e, n, m1, m2) \
    125  1.5.6.1    martin     (atomic_cas_ptr((p), *(void **)(e), (void *)(n)) == *(void **)(e))
    126  1.5.6.1    martin #define	atomic_exchange_explicit(o, n, m1) atomic_swap_ptr((o), (n))
    127      1.1     rmind #define	murmurhash3 murmurhash2
    128      1.1     rmind #endif
    129      1.1     rmind 
    130      1.1     rmind /*
    131      1.1     rmind  * The root level fanout is 64 (indexed by the last 6 bits of the hash
    132      1.1     rmind  * value XORed with the length).  Each subsequent level, represented by
    133      1.1     rmind  * intermediate nodes, has a fanout of 16 (using 4 bits).
    134      1.1     rmind  *
    135      1.1     rmind  * The hash function produces 32-bit values.
    136      1.1     rmind  */
    137      1.1     rmind 
    138      1.1     rmind #define	HASHVAL_BITS	(32)
    139      1.1     rmind #define	HASHVAL_MOD	(HASHVAL_BITS - 1)
    140      1.1     rmind #define	HASHVAL_SHIFT	(5)
    141      1.1     rmind 
    142      1.1     rmind #define	ROOT_BITS	(6)
    143      1.1     rmind #define	ROOT_SIZE	(1 << ROOT_BITS)
    144      1.1     rmind #define	ROOT_MASK	(ROOT_SIZE - 1)
    145      1.1     rmind #define	ROOT_MSBITS	(HASHVAL_BITS - ROOT_BITS)
    146      1.1     rmind 
    147      1.1     rmind #define	LEVEL_BITS	(4)
    148      1.1     rmind #define	LEVEL_SIZE	(1 << LEVEL_BITS)
    149      1.1     rmind #define	LEVEL_MASK	(LEVEL_SIZE - 1)
    150      1.1     rmind 
    151      1.1     rmind /*
    152      1.1     rmind  * Instead of raw pointers, we use offsets from the base address.
    153      1.1     rmind  * This accommodates the use of this data structure in shared memory,
    154      1.1     rmind  * where mappings can be in different address spaces.
    155      1.1     rmind  *
    156      1.1     rmind  * The pointers must be aligned, since pointer tagging is used to
    157      1.1     rmind  * differentiate the intermediate nodes from leaves.  We reserve the
    158      1.1     rmind  * least significant bit.
    159      1.1     rmind  */
    160      1.1     rmind typedef uintptr_t thmap_ptr_t;
    161  1.5.6.1    martin typedef uintptr_t atomic_thmap_ptr_t;			// C11 _Atomic
    162      1.1     rmind 
    163      1.1     rmind #define	THMAP_NULL		((thmap_ptr_t)0)
    164      1.1     rmind 
    165      1.1     rmind #define	THMAP_LEAF_BIT		(0x1)
    166      1.1     rmind 
    167      1.1     rmind #define	THMAP_ALIGNED_P(p)	(((uintptr_t)(p) & 3) == 0)
    168      1.1     rmind #define	THMAP_ALIGN(p)		((uintptr_t)(p) & ~(uintptr_t)3)
    169      1.1     rmind #define	THMAP_INODE_P(p)	(((uintptr_t)(p) & THMAP_LEAF_BIT) == 0)
    170      1.1     rmind 
    171      1.1     rmind #define	THMAP_GETPTR(th, p)	((void *)((th)->baseptr + (uintptr_t)(p)))
    172      1.1     rmind #define	THMAP_GETOFF(th, p)	((thmap_ptr_t)((uintptr_t)(p) - (th)->baseptr))
    173      1.1     rmind #define	THMAP_NODE(th, p)	THMAP_GETPTR(th, THMAP_ALIGN(p))
    174      1.1     rmind 
    175      1.1     rmind /*
    176      1.1     rmind  * State field.
    177      1.1     rmind  */
    178      1.1     rmind 
    179      1.1     rmind #define	NODE_LOCKED		(1U << 31)		// lock (writers)
    180      1.1     rmind #define	NODE_DELETED		(1U << 30)		// node deleted
    181      1.1     rmind #define	NODE_COUNT(s)		((s) & 0x3fffffff)	// slot count mask
    182      1.1     rmind 
    183      1.1     rmind /*
    184      1.1     rmind  * There are two types of nodes:
    185      1.1     rmind  * - Intermediate nodes -- arrays pointing to another level or a leaf;
    186      1.1     rmind  * - Leaves, which store a key-value pair.
    187      1.1     rmind  */
    188      1.1     rmind 
    189      1.1     rmind typedef struct {
    190  1.5.6.1    martin 	uint32_t		state;			// C11 _Atomic
    191  1.5.6.1    martin 	thmap_ptr_t		parent;
    192  1.5.6.1    martin 	atomic_thmap_ptr_t	slots[LEVEL_SIZE];
    193      1.1     rmind } thmap_inode_t;
    194      1.1     rmind 
    195      1.1     rmind #define	THMAP_INODE_LEN	sizeof(thmap_inode_t)
    196      1.1     rmind 
    197      1.1     rmind typedef struct {
    198      1.1     rmind 	thmap_ptr_t	key;
    199      1.1     rmind 	size_t		len;
    200      1.1     rmind 	void *		val;
    201      1.1     rmind } thmap_leaf_t;
    202      1.1     rmind 
    203      1.1     rmind typedef struct {
    204      1.1     rmind 	unsigned	rslot;		// root-level slot index
    205      1.1     rmind 	unsigned	level;		// current level in the tree
    206      1.1     rmind 	unsigned	hashidx;	// current hash index (block of bits)
    207      1.1     rmind 	uint32_t	hashval;	// current hash value
    208      1.1     rmind } thmap_query_t;
    209      1.1     rmind 
    210  1.5.6.2    martin union thmap_align {
    211  1.5.6.2    martin 	void *		p;
    212  1.5.6.2    martin 	uint64_t	v;
    213  1.5.6.2    martin };
    214  1.5.6.2    martin 
    215  1.5.6.2    martin typedef struct thmap_gc thmap_gc_t;
    216  1.5.6.2    martin struct thmap_gc {
    217      1.1     rmind 	size_t		len;
    218  1.5.6.2    martin 	thmap_gc_t *	next;
    219  1.5.6.2    martin 	char		data[] __aligned(sizeof(union thmap_align));
    220  1.5.6.2    martin };
    221      1.1     rmind 
    222      1.1     rmind #define	THMAP_ROOT_LEN	(sizeof(thmap_ptr_t) * ROOT_SIZE)
    223      1.1     rmind 
    224      1.1     rmind struct thmap {
    225  1.5.6.1    martin 	uintptr_t		baseptr;
    226  1.5.6.1    martin 	atomic_thmap_ptr_t *	root;
    227  1.5.6.1    martin 	unsigned		flags;
    228  1.5.6.1    martin 	const thmap_ops_t *	ops;
    229  1.5.6.1    martin 	thmap_gc_t *		gc_list;		// C11 _Atomic
    230      1.1     rmind };
    231      1.1     rmind 
    232      1.1     rmind static void	stage_mem_gc(thmap_t *, uintptr_t, size_t);
    233      1.1     rmind 
    234      1.1     rmind /*
    235      1.1     rmind  * A few low-level helper routines.
    236      1.1     rmind  */
    237      1.1     rmind 
    238      1.1     rmind static uintptr_t
    239      1.1     rmind alloc_wrapper(size_t len)
    240      1.1     rmind {
    241      1.4     rmind 	return (uintptr_t)kmem_intr_alloc(len, KM_NOSLEEP);
    242      1.1     rmind }
    243      1.1     rmind 
    244      1.1     rmind static void
    245      1.1     rmind free_wrapper(uintptr_t addr, size_t len)
    246      1.1     rmind {
    247      1.1     rmind 	kmem_intr_free((void *)addr, len);
    248      1.1     rmind }
    249      1.1     rmind 
    250      1.1     rmind static const thmap_ops_t thmap_default_ops = {
    251      1.1     rmind 	.alloc = alloc_wrapper,
    252      1.1     rmind 	.free = free_wrapper
    253      1.1     rmind };
    254      1.1     rmind 
    255  1.5.6.2    martin static uintptr_t
    256  1.5.6.2    martin gc_alloc(const thmap_t *thmap, size_t len)
    257  1.5.6.2    martin {
    258  1.5.6.2    martin 	const size_t alloclen = offsetof(struct thmap_gc, data[len]);
    259  1.5.6.2    martin 	const uintptr_t gcaddr = thmap->ops->alloc(alloclen);
    260  1.5.6.2    martin 
    261  1.5.6.2    martin 	if (!gcaddr)
    262  1.5.6.2    martin 		return 0;
    263  1.5.6.2    martin 
    264  1.5.6.2    martin 	thmap_gc_t *const gc = THMAP_GETPTR(thmap, gcaddr);
    265  1.5.6.2    martin 	gc->len = len;
    266  1.5.6.2    martin 	return THMAP_GETOFF(thmap, &gc->data[0]);
    267  1.5.6.2    martin }
    268  1.5.6.2    martin 
    269  1.5.6.2    martin static void
    270  1.5.6.2    martin gc_free(const thmap_t *thmap, uintptr_t addr, size_t len)
    271  1.5.6.2    martin {
    272  1.5.6.2    martin 	const size_t alloclen = offsetof(struct thmap_gc, data[len]);
    273  1.5.6.2    martin 	char *const ptr = THMAP_GETPTR(thmap, addr);
    274  1.5.6.2    martin 	thmap_gc_t *const gc = container_of(ptr, struct thmap_gc, data[0]);
    275  1.5.6.2    martin 	const uintptr_t gcaddr = THMAP_GETOFF(thmap, gc);
    276  1.5.6.2    martin 
    277  1.5.6.2    martin 	KASSERTMSG(gc->len == len, "thmap=%p ops=%p addr=%p len=%zu"
    278  1.5.6.2    martin 	    " gc=%p gc->len=%zu",
    279  1.5.6.2    martin 	    thmap, thmap->ops, (void *)addr, len, gc, gc->len);
    280  1.5.6.2    martin 	thmap->ops->free(gcaddr, alloclen);
    281  1.5.6.2    martin }
    282  1.5.6.2    martin 
    283      1.1     rmind /*
    284      1.1     rmind  * NODE LOCKING.
    285      1.1     rmind  */
    286      1.1     rmind 
    287      1.2  christos #ifdef DIAGNOSTIC
    288      1.1     rmind static inline bool
    289  1.5.6.1    martin node_locked_p(thmap_inode_t *node)
    290      1.1     rmind {
    291  1.5.6.1    martin 	return (atomic_load_relaxed(&node->state) & NODE_LOCKED) != 0;
    292      1.1     rmind }
    293      1.1     rmind #endif
    294      1.1     rmind 
    295      1.1     rmind static void
    296      1.1     rmind lock_node(thmap_inode_t *node)
    297      1.1     rmind {
    298      1.1     rmind 	unsigned bcount = SPINLOCK_BACKOFF_MIN;
    299      1.1     rmind 	uint32_t s;
    300      1.1     rmind again:
    301  1.5.6.1    martin 	s = atomic_load_relaxed(&node->state);
    302      1.1     rmind 	if (s & NODE_LOCKED) {
    303      1.1     rmind 		SPINLOCK_BACKOFF(bcount);
    304      1.1     rmind 		goto again;
    305      1.1     rmind 	}
    306  1.5.6.1    martin 	/* Acquire from prior release in unlock_node.() */
    307  1.5.6.1    martin 	if (!atomic_compare_exchange_weak_explicit_32(&node->state,
    308  1.5.6.1    martin 	    &s, s | NODE_LOCKED, memory_order_acquire, memory_order_relaxed)) {
    309      1.1     rmind 		bcount = SPINLOCK_BACKOFF_MIN;
    310      1.1     rmind 		goto again;
    311      1.1     rmind 	}
    312      1.1     rmind }
    313      1.1     rmind 
    314      1.1     rmind static void
    315      1.1     rmind unlock_node(thmap_inode_t *node)
    316      1.1     rmind {
    317  1.5.6.1    martin 	uint32_t s = atomic_load_relaxed(&node->state) & ~NODE_LOCKED;
    318      1.1     rmind 
    319      1.1     rmind 	ASSERT(node_locked_p(node));
    320  1.5.6.1    martin 	/* Release to subsequent acquire in lock_node(). */
    321  1.5.6.1    martin 	atomic_store_release(&node->state, s);
    322      1.1     rmind }
    323      1.1     rmind 
    324      1.1     rmind /*
    325      1.1     rmind  * HASH VALUE AND KEY OPERATIONS.
    326      1.1     rmind  */
    327      1.1     rmind 
    328      1.1     rmind static inline void
    329      1.1     rmind hashval_init(thmap_query_t *query, const void * restrict key, size_t len)
    330      1.1     rmind {
    331      1.1     rmind 	const uint32_t hashval = murmurhash3(key, len, 0);
    332      1.1     rmind 
    333      1.1     rmind 	query->rslot = ((hashval >> ROOT_MSBITS) ^ len) & ROOT_MASK;
    334      1.1     rmind 	query->level = 0;
    335      1.1     rmind 	query->hashval = hashval;
    336      1.1     rmind 	query->hashidx = 0;
    337      1.1     rmind }
    338      1.1     rmind 
    339      1.1     rmind /*
    340      1.1     rmind  * hashval_getslot: given the key, compute the hash (if not already cached)
    341      1.1     rmind  * and return the offset for the current level.
    342      1.1     rmind  */
    343      1.1     rmind static unsigned
    344      1.1     rmind hashval_getslot(thmap_query_t *query, const void * restrict key, size_t len)
    345      1.1     rmind {
    346      1.1     rmind 	const unsigned offset = query->level * LEVEL_BITS;
    347      1.1     rmind 	const unsigned shift = offset & HASHVAL_MOD;
    348      1.1     rmind 	const unsigned i = offset >> HASHVAL_SHIFT;
    349      1.1     rmind 
    350      1.1     rmind 	if (query->hashidx != i) {
    351      1.1     rmind 		/* Generate a hash value for a required range. */
    352      1.1     rmind 		query->hashval = murmurhash3(key, len, i);
    353      1.1     rmind 		query->hashidx = i;
    354      1.1     rmind 	}
    355      1.1     rmind 	return (query->hashval >> shift) & LEVEL_MASK;
    356      1.1     rmind }
    357      1.1     rmind 
    358      1.1     rmind static unsigned
    359      1.1     rmind hashval_getleafslot(const thmap_t *thmap,
    360      1.1     rmind     const thmap_leaf_t *leaf, unsigned level)
    361      1.1     rmind {
    362      1.1     rmind 	const void *key = THMAP_GETPTR(thmap, leaf->key);
    363      1.1     rmind 	const unsigned offset = level * LEVEL_BITS;
    364      1.1     rmind 	const unsigned shift = offset & HASHVAL_MOD;
    365      1.1     rmind 	const unsigned i = offset >> HASHVAL_SHIFT;
    366      1.1     rmind 
    367      1.1     rmind 	return (murmurhash3(key, leaf->len, i) >> shift) & LEVEL_MASK;
    368      1.1     rmind }
    369      1.1     rmind 
    370      1.1     rmind static inline unsigned
    371      1.1     rmind hashval_getl0slot(const thmap_t *thmap, const thmap_query_t *query,
    372      1.1     rmind     const thmap_leaf_t *leaf)
    373      1.1     rmind {
    374      1.1     rmind 	if (__predict_true(query->hashidx == 0)) {
    375      1.1     rmind 		return query->hashval & LEVEL_MASK;
    376      1.1     rmind 	}
    377      1.1     rmind 	return hashval_getleafslot(thmap, leaf, 0);
    378      1.1     rmind }
    379      1.1     rmind 
    380      1.1     rmind static bool
    381      1.1     rmind key_cmp_p(const thmap_t *thmap, const thmap_leaf_t *leaf,
    382      1.1     rmind     const void * restrict key, size_t len)
    383      1.1     rmind {
    384      1.1     rmind 	const void *leafkey = THMAP_GETPTR(thmap, leaf->key);
    385      1.1     rmind 	return len == leaf->len && memcmp(key, leafkey, len) == 0;
    386      1.1     rmind }
    387      1.1     rmind 
    388      1.1     rmind /*
    389      1.1     rmind  * INTER-NODE OPERATIONS.
    390      1.1     rmind  */
    391      1.1     rmind 
    392      1.1     rmind static thmap_inode_t *
    393      1.1     rmind node_create(thmap_t *thmap, thmap_inode_t *parent)
    394      1.1     rmind {
    395      1.1     rmind 	thmap_inode_t *node;
    396      1.1     rmind 	uintptr_t p;
    397      1.1     rmind 
    398  1.5.6.2    martin 	p = gc_alloc(thmap, THMAP_INODE_LEN);
    399      1.1     rmind 	if (!p) {
    400      1.1     rmind 		return NULL;
    401      1.1     rmind 	}
    402      1.1     rmind 	node = THMAP_GETPTR(thmap, p);
    403      1.1     rmind 	ASSERT(THMAP_ALIGNED_P(node));
    404      1.1     rmind 
    405      1.1     rmind 	memset(node, 0, THMAP_INODE_LEN);
    406      1.1     rmind 	if (parent) {
    407  1.5.6.1    martin 		/* Not yet published, no need for ordering. */
    408  1.5.6.1    martin 		atomic_store_relaxed(&node->state, NODE_LOCKED);
    409      1.1     rmind 		node->parent = THMAP_GETOFF(thmap, parent);
    410      1.1     rmind 	}
    411      1.1     rmind 	return node;
    412      1.1     rmind }
    413      1.1     rmind 
    414      1.1     rmind static void
    415      1.1     rmind node_insert(thmap_inode_t *node, unsigned slot, thmap_ptr_t child)
    416      1.1     rmind {
    417      1.1     rmind 	ASSERT(node_locked_p(node) || node->parent == THMAP_NULL);
    418  1.5.6.1    martin 	ASSERT((atomic_load_relaxed(&node->state) & NODE_DELETED) == 0);
    419  1.5.6.1    martin 	ASSERT(atomic_load_relaxed(&node->slots[slot]) == THMAP_NULL);
    420      1.1     rmind 
    421  1.5.6.1    martin 	ASSERT(NODE_COUNT(atomic_load_relaxed(&node->state)) < LEVEL_SIZE);
    422      1.1     rmind 
    423  1.5.6.1    martin 	/*
    424  1.5.6.1    martin 	 * If node is public already, caller is responsible for issuing
    425  1.5.6.1    martin 	 * release fence; if node is not public, no ordering is needed.
    426  1.5.6.1    martin 	 * Hence relaxed ordering.
    427  1.5.6.1    martin 	 */
    428  1.5.6.1    martin 	atomic_store_relaxed(&node->slots[slot], child);
    429  1.5.6.1    martin 	atomic_store_relaxed(&node->state,
    430  1.5.6.1    martin 	    atomic_load_relaxed(&node->state) + 1);
    431      1.1     rmind }
    432      1.1     rmind 
    433      1.1     rmind static void
    434      1.1     rmind node_remove(thmap_inode_t *node, unsigned slot)
    435      1.1     rmind {
    436      1.1     rmind 	ASSERT(node_locked_p(node));
    437  1.5.6.1    martin 	ASSERT((atomic_load_relaxed(&node->state) & NODE_DELETED) == 0);
    438  1.5.6.1    martin 	ASSERT(atomic_load_relaxed(&node->slots[slot]) != THMAP_NULL);
    439      1.1     rmind 
    440  1.5.6.1    martin 	ASSERT(NODE_COUNT(atomic_load_relaxed(&node->state)) > 0);
    441  1.5.6.1    martin 	ASSERT(NODE_COUNT(atomic_load_relaxed(&node->state)) <= LEVEL_SIZE);
    442      1.1     rmind 
    443  1.5.6.1    martin 	/* Element will be GC-ed later; no need for ordering here. */
    444  1.5.6.1    martin 	atomic_store_relaxed(&node->slots[slot], THMAP_NULL);
    445  1.5.6.1    martin 	atomic_store_relaxed(&node->state,
    446  1.5.6.1    martin 	    atomic_load_relaxed(&node->state) - 1);
    447      1.1     rmind }
    448      1.1     rmind 
    449      1.1     rmind /*
    450      1.1     rmind  * LEAF OPERATIONS.
    451      1.1     rmind  */
    452      1.1     rmind 
    453      1.1     rmind static thmap_leaf_t *
    454      1.1     rmind leaf_create(const thmap_t *thmap, const void *key, size_t len, void *val)
    455      1.1     rmind {
    456      1.1     rmind 	thmap_leaf_t *leaf;
    457      1.1     rmind 	uintptr_t leaf_off, key_off;
    458      1.1     rmind 
    459  1.5.6.2    martin 	leaf_off = gc_alloc(thmap, sizeof(thmap_leaf_t));
    460      1.1     rmind 	if (!leaf_off) {
    461      1.1     rmind 		return NULL;
    462      1.1     rmind 	}
    463      1.1     rmind 	leaf = THMAP_GETPTR(thmap, leaf_off);
    464      1.1     rmind 	ASSERT(THMAP_ALIGNED_P(leaf));
    465      1.1     rmind 
    466      1.1     rmind 	if ((thmap->flags & THMAP_NOCOPY) == 0) {
    467      1.1     rmind 		/*
    468      1.1     rmind 		 * Copy the key.
    469      1.1     rmind 		 */
    470  1.5.6.2    martin 		key_off = gc_alloc(thmap, len);
    471      1.1     rmind 		if (!key_off) {
    472  1.5.6.2    martin 			gc_free(thmap, leaf_off, sizeof(thmap_leaf_t));
    473      1.1     rmind 			return NULL;
    474      1.1     rmind 		}
    475      1.1     rmind 		memcpy(THMAP_GETPTR(thmap, key_off), key, len);
    476      1.1     rmind 		leaf->key = key_off;
    477      1.1     rmind 	} else {
    478      1.1     rmind 		/* Otherwise, we use a reference. */
    479      1.1     rmind 		leaf->key = (uintptr_t)key;
    480      1.1     rmind 	}
    481      1.1     rmind 	leaf->len = len;
    482      1.1     rmind 	leaf->val = val;
    483      1.1     rmind 	return leaf;
    484      1.1     rmind }
    485      1.1     rmind 
    486      1.1     rmind static void
    487      1.1     rmind leaf_free(const thmap_t *thmap, thmap_leaf_t *leaf)
    488      1.1     rmind {
    489      1.1     rmind 	if ((thmap->flags & THMAP_NOCOPY) == 0) {
    490  1.5.6.2    martin 		gc_free(thmap, leaf->key, leaf->len);
    491      1.1     rmind 	}
    492  1.5.6.2    martin 	gc_free(thmap, THMAP_GETOFF(thmap, leaf), sizeof(thmap_leaf_t));
    493      1.1     rmind }
    494      1.1     rmind 
    495      1.1     rmind static thmap_leaf_t *
    496      1.1     rmind get_leaf(const thmap_t *thmap, thmap_inode_t *parent, unsigned slot)
    497      1.1     rmind {
    498      1.1     rmind 	thmap_ptr_t node;
    499      1.1     rmind 
    500  1.5.6.1    martin 	/* Consume from prior release in thmap_put(). */
    501  1.5.6.1    martin 	node = atomic_load_consume(&parent->slots[slot]);
    502      1.1     rmind 	if (THMAP_INODE_P(node)) {
    503      1.1     rmind 		return NULL;
    504      1.1     rmind 	}
    505      1.1     rmind 	return THMAP_NODE(thmap, node);
    506      1.1     rmind }
    507      1.1     rmind 
    508      1.1     rmind /*
    509      1.1     rmind  * ROOT OPERATIONS.
    510      1.1     rmind  */
    511      1.1     rmind 
    512  1.5.6.1    martin /*
    513  1.5.6.1    martin  * root_try_put: Try to set a root pointer at query->rslot.
    514  1.5.6.1    martin  *
    515  1.5.6.1    martin  * => Implies release operation on success.
    516  1.5.6.1    martin  * => Implies no ordering on failure.
    517  1.5.6.1    martin  */
    518      1.1     rmind static inline bool
    519      1.1     rmind root_try_put(thmap_t *thmap, const thmap_query_t *query, thmap_leaf_t *leaf)
    520      1.1     rmind {
    521  1.5.6.1    martin 	thmap_ptr_t expected;
    522      1.1     rmind 	const unsigned i = query->rslot;
    523      1.1     rmind 	thmap_inode_t *node;
    524      1.1     rmind 	thmap_ptr_t nptr;
    525      1.1     rmind 	unsigned slot;
    526      1.1     rmind 
    527      1.1     rmind 	/*
    528  1.5.6.1    martin 	 * Must pre-check first.  No ordering required because we will
    529  1.5.6.1    martin 	 * check again before taking any actions, and start over if
    530  1.5.6.1    martin 	 * this changes from null.
    531      1.1     rmind 	 */
    532  1.5.6.1    martin 	if (atomic_load_relaxed(&thmap->root[i])) {
    533      1.1     rmind 		return false;
    534      1.1     rmind 	}
    535      1.1     rmind 
    536      1.1     rmind 	/*
    537      1.1     rmind 	 * Create an intermediate node.  Since there is no parent set,
    538  1.5.6.1    martin 	 * it will be created unlocked and the CAS operation will
    539  1.5.6.1    martin 	 * release it to readers.
    540      1.1     rmind 	 */
    541      1.1     rmind 	node = node_create(thmap, NULL);
    542      1.1     rmind 	slot = hashval_getl0slot(thmap, query, leaf);
    543      1.1     rmind 	node_insert(node, slot, THMAP_GETOFF(thmap, leaf) | THMAP_LEAF_BIT);
    544      1.1     rmind 	nptr = THMAP_GETOFF(thmap, node);
    545      1.1     rmind again:
    546  1.5.6.1    martin 	if (atomic_load_relaxed(&thmap->root[i])) {
    547  1.5.6.2    martin 		gc_free(thmap, nptr, THMAP_INODE_LEN);
    548      1.1     rmind 		return false;
    549      1.1     rmind 	}
    550  1.5.6.1    martin 	/* Release to subsequent consume in find_edge_node(). */
    551  1.5.6.1    martin 	expected = THMAP_NULL;
    552  1.5.6.1    martin 	if (!atomic_compare_exchange_weak_explicit_ptr(&thmap->root[i], &expected,
    553  1.5.6.1    martin 	    nptr, memory_order_release, memory_order_relaxed)) {
    554      1.1     rmind 		goto again;
    555      1.1     rmind 	}
    556      1.1     rmind 	return true;
    557      1.1     rmind }
    558      1.1     rmind 
    559      1.1     rmind /*
    560      1.1     rmind  * find_edge_node: given the hash, traverse the tree to find the edge node.
    561      1.1     rmind  *
    562      1.1     rmind  * => Returns an aligned (clean) pointer to the parent node.
    563      1.1     rmind  * => Returns the slot number and sets current level.
    564      1.1     rmind  */
    565      1.1     rmind static thmap_inode_t *
    566      1.1     rmind find_edge_node(const thmap_t *thmap, thmap_query_t *query,
    567      1.1     rmind     const void * restrict key, size_t len, unsigned *slot)
    568      1.1     rmind {
    569  1.5.6.1    martin 	thmap_ptr_t root_slot;
    570      1.1     rmind 	thmap_inode_t *parent;
    571      1.1     rmind 	thmap_ptr_t node;
    572      1.1     rmind 	unsigned off;
    573      1.1     rmind 
    574      1.1     rmind 	ASSERT(query->level == 0);
    575      1.1     rmind 
    576  1.5.6.1    martin 	/* Consume from prior release in root_try_put(). */
    577  1.5.6.1    martin 	root_slot = atomic_load_consume(&thmap->root[query->rslot]);
    578      1.1     rmind 	parent = THMAP_NODE(thmap, root_slot);
    579      1.1     rmind 	if (!parent) {
    580      1.1     rmind 		return NULL;
    581      1.1     rmind 	}
    582      1.1     rmind descend:
    583      1.1     rmind 	off = hashval_getslot(query, key, len);
    584  1.5.6.1    martin 	/* Consume from prior release in thmap_put(). */
    585  1.5.6.1    martin 	node = atomic_load_consume(&parent->slots[off]);
    586      1.1     rmind 
    587      1.1     rmind 	/* Descend the tree until we find a leaf or empty slot. */
    588      1.1     rmind 	if (node && THMAP_INODE_P(node)) {
    589      1.1     rmind 		parent = THMAP_NODE(thmap, node);
    590      1.1     rmind 		query->level++;
    591      1.1     rmind 		goto descend;
    592      1.1     rmind 	}
    593  1.5.6.1    martin 	/*
    594  1.5.6.1    martin 	 * NODE_DELETED does not become stale until GC runs, which
    595  1.5.6.1    martin 	 * cannot happen while we are in the middle of an operation,
    596  1.5.6.1    martin 	 * hence relaxed ordering.
    597  1.5.6.1    martin 	 */
    598  1.5.6.1    martin 	if (atomic_load_relaxed(&parent->state) & NODE_DELETED) {
    599      1.1     rmind 		return NULL;
    600      1.1     rmind 	}
    601      1.1     rmind 	*slot = off;
    602      1.1     rmind 	return parent;
    603      1.1     rmind }
    604      1.1     rmind 
    605      1.1     rmind /*
    606      1.1     rmind  * find_edge_node_locked: traverse the tree, like find_edge_node(),
    607      1.1     rmind  * but attempt to lock the edge node.
    608      1.1     rmind  *
    609      1.1     rmind  * => Returns NULL if the deleted node is found.  This indicates that
    610      1.1     rmind  *    the caller must re-try from the root, as the root slot might have
    611      1.1     rmind  *    changed too.
    612      1.1     rmind  */
    613      1.1     rmind static thmap_inode_t *
    614      1.1     rmind find_edge_node_locked(const thmap_t *thmap, thmap_query_t *query,
    615      1.1     rmind     const void * restrict key, size_t len, unsigned *slot)
    616      1.1     rmind {
    617      1.1     rmind 	thmap_inode_t *node;
    618      1.1     rmind 	thmap_ptr_t target;
    619      1.1     rmind retry:
    620      1.1     rmind 	/*
    621      1.1     rmind 	 * Find the edge node and lock it!  Re-check the state since
    622      1.1     rmind 	 * the tree might change by the time we acquire the lock.
    623      1.1     rmind 	 */
    624      1.1     rmind 	node = find_edge_node(thmap, query, key, len, slot);
    625      1.1     rmind 	if (!node) {
    626      1.1     rmind 		/* The root slot is empty -- let the caller decide. */
    627      1.1     rmind 		query->level = 0;
    628      1.1     rmind 		return NULL;
    629      1.1     rmind 	}
    630      1.1     rmind 	lock_node(node);
    631  1.5.6.1    martin 	if (__predict_false(atomic_load_relaxed(&node->state) & NODE_DELETED)) {
    632      1.1     rmind 		/*
    633      1.1     rmind 		 * The node has been deleted.  The tree might have a new
    634      1.1     rmind 		 * shape now, therefore we must re-start from the root.
    635      1.1     rmind 		 */
    636      1.1     rmind 		unlock_node(node);
    637      1.1     rmind 		query->level = 0;
    638      1.1     rmind 		return NULL;
    639      1.1     rmind 	}
    640  1.5.6.1    martin 	target = atomic_load_relaxed(&node->slots[*slot]);
    641      1.1     rmind 	if (__predict_false(target && THMAP_INODE_P(target))) {
    642      1.1     rmind 		/*
    643      1.1     rmind 		 * The target slot has been changed and it is now an
    644      1.1     rmind 		 * intermediate node.  Re-start from the top internode.
    645      1.1     rmind 		 */
    646      1.1     rmind 		unlock_node(node);
    647      1.1     rmind 		query->level = 0;
    648      1.1     rmind 		goto retry;
    649      1.1     rmind 	}
    650      1.1     rmind 	return node;
    651      1.1     rmind }
    652      1.1     rmind 
    653      1.1     rmind /*
    654      1.1     rmind  * thmap_get: lookup a value given the key.
    655      1.1     rmind  */
    656      1.1     rmind void *
    657      1.1     rmind thmap_get(thmap_t *thmap, const void *key, size_t len)
    658      1.1     rmind {
    659      1.1     rmind 	thmap_query_t query;
    660      1.1     rmind 	thmap_inode_t *parent;
    661      1.1     rmind 	thmap_leaf_t *leaf;
    662      1.1     rmind 	unsigned slot;
    663      1.1     rmind 
    664      1.1     rmind 	hashval_init(&query, key, len);
    665      1.1     rmind 	parent = find_edge_node(thmap, &query, key, len, &slot);
    666      1.1     rmind 	if (!parent) {
    667      1.1     rmind 		return NULL;
    668      1.1     rmind 	}
    669      1.1     rmind 	leaf = get_leaf(thmap, parent, slot);
    670      1.1     rmind 	if (!leaf) {
    671      1.1     rmind 		return NULL;
    672      1.1     rmind 	}
    673      1.1     rmind 	if (!key_cmp_p(thmap, leaf, key, len)) {
    674      1.1     rmind 		return NULL;
    675      1.1     rmind 	}
    676      1.1     rmind 	return leaf->val;
    677      1.1     rmind }
    678      1.1     rmind 
    679      1.1     rmind /*
    680      1.1     rmind  * thmap_put: insert a value given the key.
    681      1.1     rmind  *
    682      1.1     rmind  * => If the key is already present, return the associated value.
    683      1.1     rmind  * => Otherwise, on successful insert, return the given value.
    684      1.1     rmind  */
    685      1.1     rmind void *
    686      1.1     rmind thmap_put(thmap_t *thmap, const void *key, size_t len, void *val)
    687      1.1     rmind {
    688      1.1     rmind 	thmap_query_t query;
    689      1.1     rmind 	thmap_leaf_t *leaf, *other;
    690      1.1     rmind 	thmap_inode_t *parent, *child;
    691      1.1     rmind 	unsigned slot, other_slot;
    692      1.1     rmind 	thmap_ptr_t target;
    693      1.1     rmind 
    694      1.1     rmind 	/*
    695  1.5.6.1    martin 	 * First, pre-allocate and initialize the leaf node.
    696      1.1     rmind 	 */
    697      1.1     rmind 	leaf = leaf_create(thmap, key, len, val);
    698      1.1     rmind 	if (__predict_false(!leaf)) {
    699      1.1     rmind 		return NULL;
    700      1.1     rmind 	}
    701      1.1     rmind 	hashval_init(&query, key, len);
    702      1.1     rmind retry:
    703      1.1     rmind 	/*
    704      1.1     rmind 	 * Try to insert into the root first, if its slot is empty.
    705      1.1     rmind 	 */
    706      1.1     rmind 	if (root_try_put(thmap, &query, leaf)) {
    707      1.1     rmind 		/* Success: the leaf was inserted; no locking involved. */
    708      1.1     rmind 		return val;
    709      1.1     rmind 	}
    710      1.1     rmind 
    711      1.1     rmind 	/*
    712  1.5.6.1    martin 	 * Release node via store in node_insert (*) to subsequent
    713  1.5.6.1    martin 	 * consume in get_leaf() or find_edge_node().
    714  1.5.6.1    martin 	 */
    715  1.5.6.1    martin 	atomic_thread_fence(memory_order_release);
    716  1.5.6.1    martin 
    717  1.5.6.1    martin 	/*
    718      1.1     rmind 	 * Find the edge node and the target slot.
    719      1.1     rmind 	 */
    720      1.1     rmind 	parent = find_edge_node_locked(thmap, &query, key, len, &slot);
    721      1.1     rmind 	if (!parent) {
    722      1.1     rmind 		goto retry;
    723      1.1     rmind 	}
    724  1.5.6.1    martin 	target = atomic_load_relaxed(&parent->slots[slot]); // tagged offset
    725      1.1     rmind 	if (THMAP_INODE_P(target)) {
    726      1.1     rmind 		/*
    727  1.5.6.1    martin 		 * Empty slot: simply insert the new leaf.  The release
    728      1.1     rmind 		 * fence is already issued for us.
    729      1.1     rmind 		 */
    730      1.1     rmind 		target = THMAP_GETOFF(thmap, leaf) | THMAP_LEAF_BIT;
    731  1.5.6.1    martin 		node_insert(parent, slot, target); /* (*) */
    732      1.1     rmind 		goto out;
    733      1.1     rmind 	}
    734      1.1     rmind 
    735      1.1     rmind 	/*
    736      1.1     rmind 	 * Collision or duplicate.
    737      1.1     rmind 	 */
    738      1.1     rmind 	other = THMAP_NODE(thmap, target);
    739      1.1     rmind 	if (key_cmp_p(thmap, other, key, len)) {
    740      1.1     rmind 		/*
    741      1.1     rmind 		 * Duplicate.  Free the pre-allocated leaf and
    742      1.1     rmind 		 * return the present value.
    743      1.1     rmind 		 */
    744      1.1     rmind 		leaf_free(thmap, leaf);
    745      1.1     rmind 		val = other->val;
    746      1.1     rmind 		goto out;
    747      1.1     rmind 	}
    748      1.1     rmind descend:
    749      1.1     rmind 	/*
    750      1.1     rmind 	 * Collision -- expand the tree.  Create an intermediate node
    751      1.1     rmind 	 * which will be locked (NODE_LOCKED) for us.  At this point,
    752      1.1     rmind 	 * we advance to the next level.
    753      1.1     rmind 	 */
    754      1.1     rmind 	child = node_create(thmap, parent);
    755      1.1     rmind 	if (__predict_false(!child)) {
    756      1.1     rmind 		leaf_free(thmap, leaf);
    757      1.1     rmind 		val = NULL;
    758      1.1     rmind 		goto out;
    759      1.1     rmind 	}
    760      1.1     rmind 	query.level++;
    761      1.1     rmind 
    762      1.1     rmind 	/*
    763  1.5.6.1    martin 	 * Insert the other (colliding) leaf first.  The new child is
    764  1.5.6.1    martin 	 * not yet published, so memory order is relaxed.
    765      1.1     rmind 	 */
    766      1.1     rmind 	other_slot = hashval_getleafslot(thmap, other, query.level);
    767      1.1     rmind 	target = THMAP_GETOFF(thmap, other) | THMAP_LEAF_BIT;
    768      1.1     rmind 	node_insert(child, other_slot, target);
    769      1.1     rmind 
    770      1.1     rmind 	/*
    771      1.1     rmind 	 * Insert the intermediate node into the parent node.
    772      1.1     rmind 	 * It becomes the new parent for the our new leaf.
    773      1.1     rmind 	 *
    774  1.5.6.1    martin 	 * Ensure that stores to the child (and leaf) reach global
    775  1.5.6.1    martin 	 * visibility before it gets inserted to the parent, as
    776  1.5.6.1    martin 	 * consumed by get_leaf() or find_edge_node().
    777      1.1     rmind 	 */
    778  1.5.6.1    martin 	atomic_store_release(&parent->slots[slot], THMAP_GETOFF(thmap, child));
    779      1.1     rmind 
    780      1.1     rmind 	unlock_node(parent);
    781      1.1     rmind 	ASSERT(node_locked_p(child));
    782      1.1     rmind 	parent = child;
    783      1.1     rmind 
    784      1.1     rmind 	/*
    785      1.1     rmind 	 * Get the new slot and check for another collision
    786      1.1     rmind 	 * at the next level.
    787      1.1     rmind 	 */
    788      1.1     rmind 	slot = hashval_getslot(&query, key, len);
    789      1.1     rmind 	if (slot == other_slot) {
    790      1.1     rmind 		/* Another collision -- descend and expand again. */
    791      1.1     rmind 		goto descend;
    792      1.1     rmind 	}
    793      1.1     rmind 
    794  1.5.6.1    martin 	/*
    795  1.5.6.1    martin 	 * Insert our new leaf once we expanded enough.  The release
    796  1.5.6.1    martin 	 * fence is already issued for us.
    797  1.5.6.1    martin 	 */
    798      1.1     rmind 	target = THMAP_GETOFF(thmap, leaf) | THMAP_LEAF_BIT;
    799  1.5.6.1    martin 	node_insert(parent, slot, target); /* (*) */
    800      1.1     rmind out:
    801      1.1     rmind 	unlock_node(parent);
    802      1.1     rmind 	return val;
    803      1.1     rmind }
    804      1.1     rmind 
    805      1.1     rmind /*
    806      1.1     rmind  * thmap_del: remove the entry given the key.
    807      1.1     rmind  */
    808      1.1     rmind void *
    809      1.1     rmind thmap_del(thmap_t *thmap, const void *key, size_t len)
    810      1.1     rmind {
    811      1.1     rmind 	thmap_query_t query;
    812      1.1     rmind 	thmap_leaf_t *leaf;
    813      1.1     rmind 	thmap_inode_t *parent;
    814      1.1     rmind 	unsigned slot;
    815      1.1     rmind 	void *val;
    816      1.1     rmind 
    817      1.1     rmind 	hashval_init(&query, key, len);
    818      1.1     rmind 	parent = find_edge_node_locked(thmap, &query, key, len, &slot);
    819      1.1     rmind 	if (!parent) {
    820      1.1     rmind 		/* Root slot empty: not found. */
    821      1.1     rmind 		return NULL;
    822      1.1     rmind 	}
    823      1.1     rmind 	leaf = get_leaf(thmap, parent, slot);
    824      1.1     rmind 	if (!leaf || !key_cmp_p(thmap, leaf, key, len)) {
    825      1.1     rmind 		/* Not found. */
    826      1.1     rmind 		unlock_node(parent);
    827      1.1     rmind 		return NULL;
    828      1.1     rmind 	}
    829      1.1     rmind 
    830      1.1     rmind 	/* Remove the leaf. */
    831  1.5.6.1    martin 	ASSERT(THMAP_NODE(thmap, atomic_load_relaxed(&parent->slots[slot]))
    832  1.5.6.1    martin 	    == leaf);
    833      1.1     rmind 	node_remove(parent, slot);
    834      1.1     rmind 
    835      1.1     rmind 	/*
    836      1.1     rmind 	 * Collapse the levels if removing the last item.
    837      1.1     rmind 	 */
    838  1.5.6.1    martin 	while (query.level &&
    839  1.5.6.1    martin 	    NODE_COUNT(atomic_load_relaxed(&parent->state)) == 0) {
    840      1.1     rmind 		thmap_inode_t *node = parent;
    841      1.1     rmind 
    842  1.5.6.1    martin 		ASSERT(atomic_load_relaxed(&node->state) == NODE_LOCKED);
    843      1.1     rmind 
    844      1.1     rmind 		/*
    845      1.1     rmind 		 * Ascend one level up.
    846      1.1     rmind 		 * => Mark our current parent as deleted.
    847      1.1     rmind 		 * => Lock the parent one level up.
    848      1.1     rmind 		 */
    849      1.1     rmind 		query.level--;
    850      1.1     rmind 		slot = hashval_getslot(&query, key, len);
    851      1.1     rmind 		parent = THMAP_NODE(thmap, node->parent);
    852      1.1     rmind 		ASSERT(parent != NULL);
    853      1.1     rmind 
    854      1.1     rmind 		lock_node(parent);
    855  1.5.6.1    martin 		ASSERT((atomic_load_relaxed(&parent->state) & NODE_DELETED)
    856  1.5.6.1    martin 		    == 0);
    857      1.1     rmind 
    858  1.5.6.1    martin 		/*
    859  1.5.6.1    martin 		 * Lock is exclusive, so nobody else can be writing at
    860  1.5.6.1    martin 		 * the same time, and no need for atomic R/M/W, but
    861  1.5.6.1    martin 		 * readers may read without the lock and so need atomic
    862  1.5.6.1    martin 		 * load/store.  No ordering here needed because the
    863  1.5.6.1    martin 		 * entry itself stays valid until GC.
    864  1.5.6.1    martin 		 */
    865  1.5.6.1    martin 		atomic_store_relaxed(&node->state,
    866  1.5.6.1    martin 		    atomic_load_relaxed(&node->state) | NODE_DELETED);
    867  1.5.6.1    martin 		unlock_node(node); // memory_order_release
    868      1.1     rmind 
    869  1.5.6.1    martin 		ASSERT(THMAP_NODE(thmap,
    870  1.5.6.1    martin 		    atomic_load_relaxed(&parent->slots[slot])) == node);
    871      1.1     rmind 		node_remove(parent, slot);
    872      1.1     rmind 
    873      1.1     rmind 		/* Stage the removed node for G/C. */
    874      1.1     rmind 		stage_mem_gc(thmap, THMAP_GETOFF(thmap, node), THMAP_INODE_LEN);
    875      1.1     rmind 	}
    876      1.1     rmind 
    877      1.1     rmind 	/*
    878      1.1     rmind 	 * If the top node is empty, then we need to remove it from the
    879      1.1     rmind 	 * root level.  Mark the node as deleted and clear the slot.
    880      1.1     rmind 	 *
    881      1.1     rmind 	 * Note: acquiring the lock on the top node effectively prevents
    882      1.1     rmind 	 * the root slot from changing.
    883      1.1     rmind 	 */
    884  1.5.6.1    martin 	if (NODE_COUNT(atomic_load_relaxed(&parent->state)) == 0) {
    885      1.1     rmind 		const unsigned rslot = query.rslot;
    886  1.5.6.1    martin 		const thmap_ptr_t nptr =
    887  1.5.6.1    martin 		    atomic_load_relaxed(&thmap->root[rslot]);
    888      1.1     rmind 
    889      1.1     rmind 		ASSERT(query.level == 0);
    890      1.1     rmind 		ASSERT(parent->parent == THMAP_NULL);
    891      1.1     rmind 		ASSERT(THMAP_GETOFF(thmap, parent) == nptr);
    892      1.1     rmind 
    893      1.1     rmind 		/* Mark as deleted and remove from the root-level slot. */
    894  1.5.6.1    martin 		atomic_store_relaxed(&parent->state,
    895  1.5.6.1    martin 		    atomic_load_relaxed(&parent->state) | NODE_DELETED);
    896  1.5.6.1    martin 		atomic_store_relaxed(&thmap->root[rslot], THMAP_NULL);
    897      1.1     rmind 
    898      1.1     rmind 		stage_mem_gc(thmap, nptr, THMAP_INODE_LEN);
    899      1.1     rmind 	}
    900      1.1     rmind 	unlock_node(parent);
    901      1.1     rmind 
    902      1.1     rmind 	/*
    903      1.1     rmind 	 * Save the value and stage the leaf for G/C.
    904      1.1     rmind 	 */
    905      1.1     rmind 	val = leaf->val;
    906      1.1     rmind 	if ((thmap->flags & THMAP_NOCOPY) == 0) {
    907      1.1     rmind 		stage_mem_gc(thmap, leaf->key, leaf->len);
    908      1.1     rmind 	}
    909      1.1     rmind 	stage_mem_gc(thmap, THMAP_GETOFF(thmap, leaf), sizeof(thmap_leaf_t));
    910      1.1     rmind 	return val;
    911      1.1     rmind }
    912      1.1     rmind 
    913      1.1     rmind /*
    914      1.1     rmind  * G/C routines.
    915      1.1     rmind  */
    916      1.1     rmind 
    917      1.1     rmind static void
    918      1.1     rmind stage_mem_gc(thmap_t *thmap, uintptr_t addr, size_t len)
    919      1.1     rmind {
    920  1.5.6.2    martin 	char *const ptr = THMAP_GETPTR(thmap, addr);
    921      1.1     rmind 	thmap_gc_t *head, *gc;
    922      1.1     rmind 
    923  1.5.6.2    martin 	gc = container_of(ptr, struct thmap_gc, data[0]);
    924  1.5.6.2    martin 	KASSERTMSG(gc->len == len,
    925  1.5.6.2    martin 	    "thmap=%p ops=%p ptr=%p len=%zu gc=%p gc->len=%zu",
    926  1.5.6.2    martin 	    thmap, thmap->ops, (char *)addr, len, gc, gc->len);
    927      1.1     rmind retry:
    928  1.5.6.1    martin 	head = atomic_load_relaxed(&thmap->gc_list);
    929  1.5.6.1    martin 	gc->next = head; // not yet published
    930  1.5.6.1    martin 
    931  1.5.6.1    martin 	/* Release to subsequent acquire in thmap_stage_gc(). */
    932  1.5.6.1    martin 	if (!atomic_compare_exchange_weak_explicit_ptr(&thmap->gc_list, &head, gc,
    933  1.5.6.1    martin 	    memory_order_release, memory_order_relaxed)) {
    934      1.1     rmind 		goto retry;
    935      1.1     rmind 	}
    936      1.1     rmind }
    937      1.1     rmind 
    938      1.1     rmind void *
    939      1.1     rmind thmap_stage_gc(thmap_t *thmap)
    940      1.1     rmind {
    941  1.5.6.1    martin 	/* Acquire from prior release in stage_mem_gc(). */
    942  1.5.6.1    martin 	return atomic_exchange_explicit(&thmap->gc_list, NULL,
    943  1.5.6.1    martin 	    memory_order_acquire);
    944      1.1     rmind }
    945      1.1     rmind 
    946      1.1     rmind void
    947      1.1     rmind thmap_gc(thmap_t *thmap, void *ref)
    948      1.1     rmind {
    949      1.1     rmind 	thmap_gc_t *gc = ref;
    950      1.1     rmind 
    951      1.1     rmind 	while (gc) {
    952      1.1     rmind 		thmap_gc_t *next = gc->next;
    953  1.5.6.2    martin 		gc_free(thmap, THMAP_GETOFF(thmap, &gc->data[0]), gc->len);
    954      1.1     rmind 		gc = next;
    955      1.1     rmind 	}
    956      1.1     rmind }
    957      1.1     rmind 
    958      1.1     rmind /*
    959      1.1     rmind  * thmap_create: construct a new trie-hash map object.
    960      1.1     rmind  */
    961      1.1     rmind thmap_t *
    962      1.1     rmind thmap_create(uintptr_t baseptr, const thmap_ops_t *ops, unsigned flags)
    963      1.1     rmind {
    964      1.1     rmind 	thmap_t *thmap;
    965      1.1     rmind 	uintptr_t root;
    966      1.1     rmind 
    967      1.1     rmind 	/*
    968      1.1     rmind 	 * Setup the map object.
    969      1.1     rmind 	 */
    970      1.1     rmind 	if (!THMAP_ALIGNED_P(baseptr)) {
    971      1.1     rmind 		return NULL;
    972      1.1     rmind 	}
    973      1.1     rmind 	thmap = kmem_zalloc(sizeof(thmap_t), KM_SLEEP);
    974      1.1     rmind 	if (!thmap) {
    975      1.1     rmind 		return NULL;
    976      1.1     rmind 	}
    977      1.1     rmind 	thmap->baseptr = baseptr;
    978      1.1     rmind 	thmap->ops = ops ? ops : &thmap_default_ops;
    979      1.1     rmind 	thmap->flags = flags;
    980      1.1     rmind 
    981      1.1     rmind 	if ((thmap->flags & THMAP_SETROOT) == 0) {
    982      1.1     rmind 		/* Allocate the root level. */
    983  1.5.6.2    martin 		root = gc_alloc(thmap, THMAP_ROOT_LEN);
    984  1.5.6.2    martin 		if (!root) {
    985      1.1     rmind 			kmem_free(thmap, sizeof(thmap_t));
    986      1.1     rmind 			return NULL;
    987      1.1     rmind 		}
    988  1.5.6.2    martin 		thmap->root = THMAP_GETPTR(thmap, root);
    989      1.1     rmind 		memset(thmap->root, 0, THMAP_ROOT_LEN);
    990  1.5.6.1    martin 		atomic_thread_fence(memory_order_release); /* XXX */
    991      1.1     rmind 	}
    992      1.1     rmind 	return thmap;
    993      1.1     rmind }
    994      1.1     rmind 
    995      1.1     rmind int
    996      1.1     rmind thmap_setroot(thmap_t *thmap, uintptr_t root_off)
    997      1.1     rmind {
    998      1.1     rmind 	if (thmap->root) {
    999      1.1     rmind 		return -1;
   1000      1.1     rmind 	}
   1001      1.1     rmind 	thmap->root = THMAP_GETPTR(thmap, root_off);
   1002  1.5.6.1    martin 	atomic_thread_fence(memory_order_release); /* XXX */
   1003      1.1     rmind 	return 0;
   1004      1.1     rmind }
   1005      1.1     rmind 
   1006      1.1     rmind uintptr_t
   1007      1.1     rmind thmap_getroot(const thmap_t *thmap)
   1008      1.1     rmind {
   1009      1.1     rmind 	return THMAP_GETOFF(thmap, thmap->root);
   1010      1.1     rmind }
   1011      1.1     rmind 
   1012      1.1     rmind void
   1013      1.1     rmind thmap_destroy(thmap_t *thmap)
   1014      1.1     rmind {
   1015      1.1     rmind 	uintptr_t root = THMAP_GETOFF(thmap, thmap->root);
   1016      1.1     rmind 	void *ref;
   1017      1.1     rmind 
   1018      1.1     rmind 	ref = thmap_stage_gc(thmap);
   1019      1.1     rmind 	thmap_gc(thmap, ref);
   1020      1.1     rmind 
   1021      1.1     rmind 	if ((thmap->flags & THMAP_SETROOT) == 0) {
   1022  1.5.6.2    martin 		gc_free(thmap, root, THMAP_ROOT_LEN);
   1023      1.1     rmind 	}
   1024      1.1     rmind 	kmem_free(thmap, sizeof(thmap_t));
   1025      1.1     rmind }
   1026