Home | History | Annotate | Line # | Download | only in linux
linux_idr.c revision 1.8
      1  1.7  riastrad /*	$NetBSD: linux_idr.c,v 1.8 2018/08/27 14:14:42 riastradh Exp $	*/
      2  1.2  riastrad 
      3  1.2  riastrad /*-
      4  1.2  riastrad  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5  1.2  riastrad  * All rights reserved.
      6  1.2  riastrad  *
      7  1.2  riastrad  * This code is derived from software contributed to The NetBSD Foundation
      8  1.2  riastrad  * by Taylor R. Campbell.
      9  1.2  riastrad  *
     10  1.2  riastrad  * Redistribution and use in source and binary forms, with or without
     11  1.2  riastrad  * modification, are permitted provided that the following conditions
     12  1.2  riastrad  * are met:
     13  1.2  riastrad  * 1. Redistributions of source code must retain the above copyright
     14  1.2  riastrad  *    notice, this list of conditions and the following disclaimer.
     15  1.2  riastrad  * 2. Redistributions in binary form must reproduce the above copyright
     16  1.2  riastrad  *    notice, this list of conditions and the following disclaimer in the
     17  1.2  riastrad  *    documentation and/or other materials provided with the distribution.
     18  1.2  riastrad  *
     19  1.2  riastrad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  1.2  riastrad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  1.2  riastrad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  1.2  riastrad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  1.2  riastrad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  1.2  riastrad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  1.2  riastrad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  1.2  riastrad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  1.2  riastrad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  1.2  riastrad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  1.2  riastrad  * POSSIBILITY OF SUCH DAMAGE.
     30  1.2  riastrad  */
     31  1.2  riastrad 
     32  1.2  riastrad #include <sys/cdefs.h>
     33  1.7  riastrad __KERNEL_RCSID(0, "$NetBSD: linux_idr.c,v 1.8 2018/08/27 14:14:42 riastradh Exp $");
     34  1.2  riastrad 
     35  1.2  riastrad #include <sys/param.h>
     36  1.2  riastrad #include <sys/atomic.h>
     37  1.2  riastrad #include <sys/rbtree.h>
     38  1.2  riastrad 
     39  1.2  riastrad #include <linux/err.h>
     40  1.2  riastrad #include <linux/idr.h>
     41  1.3  riastrad #include <linux/slab.h>
     42  1.2  riastrad 
     43  1.8  riastrad #ifdef _KERNEL_OPT
     44  1.8  riastrad #include "opt_ddb.h"
     45  1.8  riastrad #endif
     46  1.8  riastrad 
     47  1.8  riastrad #ifdef DDB
     48  1.8  riastrad #include <ddb/ddb.h>
     49  1.8  riastrad #endif
     50  1.8  riastrad 
     51  1.2  riastrad struct idr_node {
     52  1.3  riastrad 	rb_node_t		in_rb_node;
     53  1.3  riastrad 	int			in_index;
     54  1.3  riastrad 	void			*in_data;
     55  1.2  riastrad };
     56  1.3  riastrad 
     57  1.8  riastrad struct idr_cache {
     58  1.8  riastrad 	struct idr_node		*ic_node;
     59  1.8  riastrad 	void			*ic_where;
     60  1.8  riastrad };
     61  1.8  riastrad 
     62  1.8  riastrad static specificdata_key_t idr_cache_key __read_mostly;
     63  1.8  riastrad 
     64  1.8  riastrad static void
     65  1.8  riastrad idr_cache_warning(struct idr_cache *cache)
     66  1.8  riastrad {
     67  1.8  riastrad #ifdef DDB
     68  1.8  riastrad 	const char *name;
     69  1.8  riastrad 	db_expr_t offset;
     70  1.8  riastrad #endif
     71  1.8  riastrad 
     72  1.8  riastrad 	KASSERT(cache->ic_node != NULL);
     73  1.8  riastrad 
     74  1.8  riastrad #ifdef DDB
     75  1.8  riastrad 	db_find_sym_and_offset((db_addr_t)(uintptr_t)cache->ic_where,
     76  1.8  riastrad 	    &name, &offset);
     77  1.8  riastrad 	if (name) {
     78  1.8  riastrad 		printf("WARNING: idr preload at %s+%#"DDB_EXPR_FMT"x"
     79  1.8  riastrad 		    " leaked in lwp %s @ %p\n",
     80  1.8  riastrad 		    name, offset, curlwp->l_name, curlwp);
     81  1.8  riastrad 	} else
     82  1.8  riastrad #endif
     83  1.8  riastrad 	{
     84  1.8  riastrad 		printf("WARNING: idr preload at %p leaked in lwp %s @ %p\n",
     85  1.8  riastrad 		    cache->ic_where, curlwp->l_name, curlwp);
     86  1.8  riastrad 	}
     87  1.8  riastrad }
     88  1.8  riastrad 
     89  1.8  riastrad static void
     90  1.8  riastrad idr_cache_dtor(void *cookie)
     91  1.8  riastrad {
     92  1.8  riastrad 	struct idr_cache *cache = cookie;
     93  1.8  riastrad 
     94  1.8  riastrad 	if (cache->ic_node) {
     95  1.8  riastrad 		idr_cache_warning(cache);
     96  1.8  riastrad 		kmem_free(cache->ic_node, sizeof(*cache->ic_node));
     97  1.8  riastrad 	}
     98  1.8  riastrad 	kmem_free(cache, sizeof(*cache));
     99  1.8  riastrad }
    100  1.3  riastrad 
    101  1.3  riastrad int
    102  1.3  riastrad linux_idr_module_init(void)
    103  1.3  riastrad {
    104  1.8  riastrad 	int error;
    105  1.8  riastrad 
    106  1.8  riastrad 	error = lwp_specific_key_create(&idr_cache_key, &idr_cache_dtor);
    107  1.8  riastrad 	if (error)
    108  1.8  riastrad 		return error;
    109  1.3  riastrad 
    110  1.3  riastrad 	return 0;
    111  1.3  riastrad }
    112  1.3  riastrad 
    113  1.3  riastrad void
    114  1.3  riastrad linux_idr_module_fini(void)
    115  1.3  riastrad {
    116  1.3  riastrad 
    117  1.8  riastrad 	lwp_specific_key_delete(idr_cache_key);
    118  1.3  riastrad }
    119  1.2  riastrad 
    120  1.2  riastrad static signed int idr_tree_compare_nodes(void *, const void *, const void *);
    121  1.2  riastrad static signed int idr_tree_compare_key(void *, const void *, const void *);
    122  1.2  riastrad 
    123  1.2  riastrad static const rb_tree_ops_t idr_rb_ops = {
    124  1.2  riastrad 	.rbto_compare_nodes = &idr_tree_compare_nodes,
    125  1.2  riastrad 	.rbto_compare_key = &idr_tree_compare_key,
    126  1.2  riastrad 	.rbto_node_offset = offsetof(struct idr_node, in_rb_node),
    127  1.2  riastrad 	.rbto_context = NULL,
    128  1.2  riastrad };
    129  1.2  riastrad 
    130  1.2  riastrad static signed int
    131  1.2  riastrad idr_tree_compare_nodes(void *ctx __unused, const void *na, const void *nb)
    132  1.2  riastrad {
    133  1.2  riastrad 	const int a = ((const struct idr_node *)na)->in_index;
    134  1.2  riastrad 	const int b = ((const struct idr_node *)nb)->in_index;
    135  1.2  riastrad 
    136  1.2  riastrad 	if (a < b)
    137  1.2  riastrad 		return -1;
    138  1.2  riastrad 	else if (b < a)
    139  1.2  riastrad 		 return +1;
    140  1.2  riastrad 	else
    141  1.2  riastrad 		return 0;
    142  1.2  riastrad }
    143  1.2  riastrad 
    144  1.2  riastrad static signed int
    145  1.2  riastrad idr_tree_compare_key(void *ctx __unused, const void *n, const void *key)
    146  1.2  riastrad {
    147  1.2  riastrad 	const int a = ((const struct idr_node *)n)->in_index;
    148  1.2  riastrad 	const int b = *(const int *)key;
    149  1.2  riastrad 
    150  1.2  riastrad 	if (a < b)
    151  1.2  riastrad 		return -1;
    152  1.2  riastrad 	else if (b < a)
    153  1.2  riastrad 		return +1;
    154  1.2  riastrad 	else
    155  1.2  riastrad 		return 0;
    156  1.2  riastrad }
    157  1.2  riastrad 
    158  1.2  riastrad void
    159  1.2  riastrad idr_init(struct idr *idr)
    160  1.2  riastrad {
    161  1.2  riastrad 
    162  1.5       mrg 	mutex_init(&idr->idr_lock, MUTEX_DEFAULT, IPL_VM);
    163  1.2  riastrad 	rb_tree_init(&idr->idr_tree, &idr_rb_ops);
    164  1.2  riastrad }
    165  1.2  riastrad 
    166  1.2  riastrad void
    167  1.2  riastrad idr_destroy(struct idr *idr)
    168  1.2  riastrad {
    169  1.2  riastrad 
    170  1.2  riastrad #if 0				/* XXX No rb_tree_destroy?  */
    171  1.2  riastrad 	rb_tree_destroy(&idr->idr_tree);
    172  1.2  riastrad #endif
    173  1.2  riastrad 	mutex_destroy(&idr->idr_lock);
    174  1.2  riastrad }
    175  1.2  riastrad 
    176  1.3  riastrad bool
    177  1.3  riastrad idr_is_empty(struct idr *idr)
    178  1.3  riastrad {
    179  1.3  riastrad 
    180  1.3  riastrad 	return (RB_TREE_MIN(&idr->idr_tree) == NULL);
    181  1.3  riastrad }
    182  1.3  riastrad 
    183  1.2  riastrad void *
    184  1.2  riastrad idr_find(struct idr *idr, int id)
    185  1.2  riastrad {
    186  1.2  riastrad 	const struct idr_node *node;
    187  1.2  riastrad 	void *data;
    188  1.2  riastrad 
    189  1.2  riastrad 	mutex_spin_enter(&idr->idr_lock);
    190  1.2  riastrad 	node = rb_tree_find_node(&idr->idr_tree, &id);
    191  1.2  riastrad 	data = (node == NULL? NULL : node->in_data);
    192  1.2  riastrad 	mutex_spin_exit(&idr->idr_lock);
    193  1.2  riastrad 
    194  1.2  riastrad 	return data;
    195  1.2  riastrad }
    196  1.2  riastrad 
    197  1.2  riastrad void *
    198  1.7  riastrad idr_get_next(struct idr *idr, int *idp)
    199  1.7  riastrad {
    200  1.7  riastrad 	const struct idr_node *node;
    201  1.7  riastrad 	void *data;
    202  1.7  riastrad 
    203  1.7  riastrad 	mutex_spin_enter(&idr->idr_lock);
    204  1.7  riastrad 	node = rb_tree_find_node_geq(&idr->idr_tree, idp);
    205  1.7  riastrad 	if (node == NULL) {
    206  1.7  riastrad 		data = NULL;
    207  1.7  riastrad 	} else {
    208  1.7  riastrad 		data = node->in_data;
    209  1.7  riastrad 		*idp = node->in_index;
    210  1.7  riastrad 	}
    211  1.7  riastrad 	mutex_spin_exit(&idr->idr_lock);
    212  1.7  riastrad 
    213  1.7  riastrad 	return data;
    214  1.7  riastrad }
    215  1.7  riastrad 
    216  1.7  riastrad void *
    217  1.2  riastrad idr_replace(struct idr *idr, void *replacement, int id)
    218  1.2  riastrad {
    219  1.2  riastrad 	struct idr_node *node;
    220  1.2  riastrad 	void *result;
    221  1.2  riastrad 
    222  1.2  riastrad 	mutex_spin_enter(&idr->idr_lock);
    223  1.2  riastrad 	node = rb_tree_find_node(&idr->idr_tree, &id);
    224  1.2  riastrad 	if (node == NULL) {
    225  1.2  riastrad 		result = ERR_PTR(-ENOENT);
    226  1.2  riastrad 	} else {
    227  1.2  riastrad 		result = node->in_data;
    228  1.2  riastrad 		node->in_data = replacement;
    229  1.2  riastrad 	}
    230  1.2  riastrad 	mutex_spin_exit(&idr->idr_lock);
    231  1.2  riastrad 
    232  1.2  riastrad 	return result;
    233  1.2  riastrad }
    234  1.2  riastrad 
    235  1.2  riastrad void
    236  1.2  riastrad idr_remove(struct idr *idr, int id)
    237  1.2  riastrad {
    238  1.2  riastrad 	struct idr_node *node;
    239  1.2  riastrad 
    240  1.2  riastrad 	mutex_spin_enter(&idr->idr_lock);
    241  1.2  riastrad 	node = rb_tree_find_node(&idr->idr_tree, &id);
    242  1.3  riastrad 	KASSERTMSG((node != NULL), "idr %p has no entry for id %d", idr, id);
    243  1.2  riastrad 	rb_tree_remove_node(&idr->idr_tree, node);
    244  1.2  riastrad 	mutex_spin_exit(&idr->idr_lock);
    245  1.8  riastrad 
    246  1.8  riastrad 	kmem_free(node, sizeof(*node));
    247  1.2  riastrad }
    248  1.2  riastrad 
    249  1.2  riastrad void
    250  1.3  riastrad idr_preload(gfp_t gfp)
    251  1.2  riastrad {
    252  1.8  riastrad 	struct idr_cache *cache;
    253  1.2  riastrad 	struct idr_node *node;
    254  1.8  riastrad 	km_flag_t kmflag = ISSET(gfp, __GFP_WAIT) ? KM_SLEEP : KM_NOSLEEP;
    255  1.2  riastrad 
    256  1.8  riastrad 	/* If caller asked to wait, we had better be sleepable.  */
    257  1.3  riastrad 	if (ISSET(gfp, __GFP_WAIT))
    258  1.3  riastrad 		ASSERT_SLEEPABLE();
    259  1.2  riastrad 
    260  1.8  riastrad 	/*
    261  1.8  riastrad 	 * Get the current lwp's private idr cache.
    262  1.8  riastrad 	 */
    263  1.8  riastrad 	cache = lwp_getspecific(idr_cache_key);
    264  1.8  riastrad 	if (cache == NULL) {
    265  1.8  riastrad 		/* lwp_setspecific must be sleepable.  */
    266  1.8  riastrad 		if (!ISSET(gfp, __GFP_WAIT))
    267  1.8  riastrad 			return;
    268  1.8  riastrad 		cache = kmem_alloc(sizeof(*cache), kmflag);
    269  1.8  riastrad 		if (cache == NULL)
    270  1.8  riastrad 			return;
    271  1.8  riastrad 		lwp_setspecific(idr_cache_key, cache);
    272  1.8  riastrad 	}
    273  1.8  riastrad 
    274  1.8  riastrad 	/*
    275  1.8  riastrad 	 * If there already is a node, a prior call to idr_preload must
    276  1.8  riastrad 	 * not have been matched by idr_preload_end.  Print a warning,
    277  1.8  riastrad 	 * claim the node, and record our return address for where this
    278  1.8  riastrad 	 * node came from so the next leak is attributed to us.
    279  1.8  riastrad 	 */
    280  1.8  riastrad 	if (cache->ic_node) {
    281  1.8  riastrad 		idr_cache_warning(cache);
    282  1.8  riastrad 		goto out;
    283  1.8  riastrad 	}
    284  1.8  riastrad 
    285  1.8  riastrad 	/*
    286  1.8  riastrad 	 * No cached node.  Allocate a new one, store it in the cache,
    287  1.8  riastrad 	 * and record our return address for where this node came from
    288  1.8  riastrad 	 * so the next leak is attributed to us.
    289  1.8  riastrad 	 */
    290  1.8  riastrad 	node = kmem_alloc(sizeof(*node), kmflag);
    291  1.6  riastrad 	KASSERT(node != NULL || !ISSET(gfp, __GFP_WAIT));
    292  1.3  riastrad 	if (node == NULL)
    293  1.3  riastrad 		return;
    294  1.3  riastrad 
    295  1.8  riastrad 	cache->ic_node = node;
    296  1.8  riastrad out:	cache->ic_where = __builtin_return_address(0);
    297  1.2  riastrad }
    298  1.2  riastrad 
    299  1.2  riastrad int
    300  1.3  riastrad idr_alloc(struct idr *idr, void *data, int start, int end, gfp_t gfp)
    301  1.2  riastrad {
    302  1.3  riastrad 	int maximum = (end <= 0? INT_MAX : (end - 1));
    303  1.8  riastrad 	struct idr_cache *cache;
    304  1.3  riastrad 	struct idr_node *node, *search, *collision __diagused;
    305  1.3  riastrad 	int id = start;
    306  1.3  riastrad 
    307  1.3  riastrad 	/* Sanity-check inputs.  */
    308  1.3  riastrad 	if (ISSET(gfp, __GFP_WAIT))
    309  1.3  riastrad 		ASSERT_SLEEPABLE();
    310  1.3  riastrad 	if (__predict_false(start < 0))
    311  1.3  riastrad 		return -EINVAL;
    312  1.3  riastrad 	if (__predict_false(maximum < start))
    313  1.3  riastrad 		return -ENOSPC;
    314  1.3  riastrad 
    315  1.8  riastrad 	/*
    316  1.8  riastrad 	 * Grab a node allocated by idr_preload, if we have a cache and
    317  1.8  riastrad 	 * it is populated.
    318  1.8  riastrad 	 */
    319  1.8  riastrad 	cache = lwp_getspecific(idr_cache_key);
    320  1.8  riastrad 	if (cache == NULL || cache->ic_node == NULL)
    321  1.6  riastrad 		return -ENOMEM;
    322  1.8  riastrad 	node = cache->ic_node;
    323  1.8  riastrad 	cache->ic_node = NULL;
    324  1.2  riastrad 
    325  1.3  riastrad 	/* Find an id.  */
    326  1.2  riastrad 	mutex_spin_enter(&idr->idr_lock);
    327  1.3  riastrad 	search = rb_tree_find_node_geq(&idr->idr_tree, &start);
    328  1.3  riastrad 	while ((search != NULL) && (search->in_index == id)) {
    329  1.3  riastrad 		if (maximum <= id) {
    330  1.3  riastrad 			id = -ENOSPC;
    331  1.2  riastrad 			goto out;
    332  1.2  riastrad 		}
    333  1.2  riastrad 		search = rb_tree_iterate(&idr->idr_tree, search, RB_DIR_RIGHT);
    334  1.3  riastrad 		id++;
    335  1.2  riastrad 	}
    336  1.3  riastrad 	node->in_index = id;
    337  1.2  riastrad 	node->in_data = data;
    338  1.2  riastrad 	collision = rb_tree_insert_node(&idr->idr_tree, node);
    339  1.2  riastrad 	KASSERT(collision == node);
    340  1.3  riastrad out:	mutex_spin_exit(&idr->idr_lock);
    341  1.3  riastrad 
    342  1.8  riastrad 	/* Discard the node on failure.  */
    343  1.8  riastrad 	if (id < 0)
    344  1.8  riastrad 		cache->ic_node = node;
    345  1.3  riastrad 	return id;
    346  1.3  riastrad }
    347  1.2  riastrad 
    348  1.3  riastrad void
    349  1.3  riastrad idr_preload_end(void)
    350  1.3  riastrad {
    351  1.8  riastrad 	struct idr_cache *cache;
    352  1.2  riastrad 
    353  1.8  riastrad 	/* Get the cache, or bail if it's not there.  */
    354  1.8  riastrad 	cache = lwp_getspecific(idr_cache_key);
    355  1.8  riastrad 	if (cache == NULL)
    356  1.8  riastrad 		return;
    357  1.8  riastrad 
    358  1.8  riastrad 	/*
    359  1.8  riastrad 	 * If there is a node, either because we didn't idr_alloc or
    360  1.8  riastrad 	 * because idr_alloc failed, chuck it.
    361  1.8  riastrad 	 *
    362  1.8  riastrad 	 * XXX If we are not sleepable, then while the caller may have
    363  1.8  riastrad 	 * used idr_preload(GFP_ATOMIC), kmem_free may still sleep.
    364  1.8  riastrad 	 * What to do?
    365  1.8  riastrad 	 */
    366  1.8  riastrad 	if (cache->ic_node) {
    367  1.8  riastrad 		struct idr_node *node;
    368  1.8  riastrad 
    369  1.8  riastrad 		node = cache->ic_node;
    370  1.8  riastrad 		cache->ic_node = NULL;
    371  1.8  riastrad 		cache->ic_where = NULL;
    372  1.3  riastrad 
    373  1.8  riastrad 		kmem_free(node, sizeof(*node));
    374  1.3  riastrad 	}
    375  1.2  riastrad }
    376  1.2  riastrad 
    377  1.2  riastrad int
    378  1.2  riastrad idr_for_each(struct idr *idr, int (*proc)(int, void *, void *), void *arg)
    379  1.2  riastrad {
    380  1.2  riastrad 	struct idr_node *node;
    381  1.2  riastrad 	int error = 0;
    382  1.2  riastrad 
    383  1.2  riastrad 	/* XXX Caller must exclude modifications.  */
    384  1.2  riastrad 	membar_consumer();
    385  1.2  riastrad 	RB_TREE_FOREACH(node, &idr->idr_tree) {
    386  1.2  riastrad 		error = (*proc)(node->in_index, node->in_data, arg);
    387  1.2  riastrad 		if (error)
    388  1.2  riastrad 			break;
    389  1.2  riastrad 	}
    390  1.2  riastrad 
    391  1.2  riastrad 	return error;
    392  1.2  riastrad }
    393