Home | History | Annotate | Line # | Download | only in linux
linux_idr.c revision 1.3.4.2
      1  1.3.4.2  tls /*	$NetBSD: linux_idr.c,v 1.3.4.2 2014/08/20 00:04:22 tls Exp $	*/
      2  1.3.4.2  tls 
      3  1.3.4.2  tls /*-
      4  1.3.4.2  tls  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5  1.3.4.2  tls  * All rights reserved.
      6  1.3.4.2  tls  *
      7  1.3.4.2  tls  * This code is derived from software contributed to The NetBSD Foundation
      8  1.3.4.2  tls  * by Taylor R. Campbell.
      9  1.3.4.2  tls  *
     10  1.3.4.2  tls  * Redistribution and use in source and binary forms, with or without
     11  1.3.4.2  tls  * modification, are permitted provided that the following conditions
     12  1.3.4.2  tls  * are met:
     13  1.3.4.2  tls  * 1. Redistributions of source code must retain the above copyright
     14  1.3.4.2  tls  *    notice, this list of conditions and the following disclaimer.
     15  1.3.4.2  tls  * 2. Redistributions in binary form must reproduce the above copyright
     16  1.3.4.2  tls  *    notice, this list of conditions and the following disclaimer in the
     17  1.3.4.2  tls  *    documentation and/or other materials provided with the distribution.
     18  1.3.4.2  tls  *
     19  1.3.4.2  tls  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  1.3.4.2  tls  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  1.3.4.2  tls  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  1.3.4.2  tls  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  1.3.4.2  tls  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  1.3.4.2  tls  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  1.3.4.2  tls  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  1.3.4.2  tls  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  1.3.4.2  tls  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  1.3.4.2  tls  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  1.3.4.2  tls  * POSSIBILITY OF SUCH DAMAGE.
     30  1.3.4.2  tls  */
     31  1.3.4.2  tls 
     32  1.3.4.2  tls #include <sys/cdefs.h>
     33  1.3.4.2  tls __KERNEL_RCSID(0, "$NetBSD: linux_idr.c,v 1.3.4.2 2014/08/20 00:04:22 tls Exp $");
     34  1.3.4.2  tls 
     35  1.3.4.2  tls #include <sys/param.h>
     36  1.3.4.2  tls #include <sys/atomic.h>
     37  1.3.4.2  tls #include <sys/rbtree.h>
     38  1.3.4.2  tls 
     39  1.3.4.2  tls #include <linux/err.h>
     40  1.3.4.2  tls #include <linux/idr.h>
     41  1.3.4.2  tls #include <linux/slab.h>
     42  1.3.4.2  tls 
     43  1.3.4.2  tls struct idr_node {
     44  1.3.4.2  tls 	rb_node_t		in_rb_node;
     45  1.3.4.2  tls 	int			in_index;
     46  1.3.4.2  tls 	void			*in_data;
     47  1.3.4.2  tls 	SIMPLEQ_ENTRY(idr_node)	in_list;
     48  1.3.4.2  tls };
     49  1.3.4.2  tls SIMPLEQ_HEAD(idr_head, idr_node);
     50  1.3.4.2  tls 
     51  1.3.4.2  tls static struct {
     52  1.3.4.2  tls 	kmutex_t	lock;
     53  1.3.4.2  tls 	struct idr_head	preloaded_nodes;
     54  1.3.4.2  tls 	struct idr_head	discarded_nodes;
     55  1.3.4.2  tls } idr_cache __cacheline_aligned;
     56  1.3.4.2  tls 
     57  1.3.4.2  tls int
     58  1.3.4.2  tls linux_idr_module_init(void)
     59  1.3.4.2  tls {
     60  1.3.4.2  tls 
     61  1.3.4.2  tls 	mutex_init(&idr_cache.lock, MUTEX_DEFAULT, IPL_VM);
     62  1.3.4.2  tls 	SIMPLEQ_INIT(&idr_cache.preloaded_nodes);
     63  1.3.4.2  tls 	SIMPLEQ_INIT(&idr_cache.discarded_nodes);
     64  1.3.4.2  tls 	return 0;
     65  1.3.4.2  tls }
     66  1.3.4.2  tls 
     67  1.3.4.2  tls void
     68  1.3.4.2  tls linux_idr_module_fini(void)
     69  1.3.4.2  tls {
     70  1.3.4.2  tls 
     71  1.3.4.2  tls 	KASSERT(SIMPLEQ_EMPTY(&idr_cache.discarded_nodes));
     72  1.3.4.2  tls 	KASSERT(SIMPLEQ_EMPTY(&idr_cache.preloaded_nodes));
     73  1.3.4.2  tls 	mutex_destroy(&idr_cache.lock);
     74  1.3.4.2  tls }
     75  1.3.4.2  tls 
     76  1.3.4.2  tls static signed int idr_tree_compare_nodes(void *, const void *, const void *);
     77  1.3.4.2  tls static signed int idr_tree_compare_key(void *, const void *, const void *);
     78  1.3.4.2  tls 
     79  1.3.4.2  tls static const rb_tree_ops_t idr_rb_ops = {
     80  1.3.4.2  tls 	.rbto_compare_nodes = &idr_tree_compare_nodes,
     81  1.3.4.2  tls 	.rbto_compare_key = &idr_tree_compare_key,
     82  1.3.4.2  tls 	.rbto_node_offset = offsetof(struct idr_node, in_rb_node),
     83  1.3.4.2  tls 	.rbto_context = NULL,
     84  1.3.4.2  tls };
     85  1.3.4.2  tls 
     86  1.3.4.2  tls static signed int
     87  1.3.4.2  tls idr_tree_compare_nodes(void *ctx __unused, const void *na, const void *nb)
     88  1.3.4.2  tls {
     89  1.3.4.2  tls 	const int a = ((const struct idr_node *)na)->in_index;
     90  1.3.4.2  tls 	const int b = ((const struct idr_node *)nb)->in_index;
     91  1.3.4.2  tls 
     92  1.3.4.2  tls 	if (a < b)
     93  1.3.4.2  tls 		return -1;
     94  1.3.4.2  tls 	else if (b < a)
     95  1.3.4.2  tls 		 return +1;
     96  1.3.4.2  tls 	else
     97  1.3.4.2  tls 		return 0;
     98  1.3.4.2  tls }
     99  1.3.4.2  tls 
    100  1.3.4.2  tls static signed int
    101  1.3.4.2  tls idr_tree_compare_key(void *ctx __unused, const void *n, const void *key)
    102  1.3.4.2  tls {
    103  1.3.4.2  tls 	const int a = ((const struct idr_node *)n)->in_index;
    104  1.3.4.2  tls 	const int b = *(const int *)key;
    105  1.3.4.2  tls 
    106  1.3.4.2  tls 	if (a < b)
    107  1.3.4.2  tls 		return -1;
    108  1.3.4.2  tls 	else if (b < a)
    109  1.3.4.2  tls 		return +1;
    110  1.3.4.2  tls 	else
    111  1.3.4.2  tls 		return 0;
    112  1.3.4.2  tls }
    113  1.3.4.2  tls 
    114  1.3.4.2  tls void
    115  1.3.4.2  tls idr_init(struct idr *idr)
    116  1.3.4.2  tls {
    117  1.3.4.2  tls 
    118  1.3.4.2  tls 	mutex_init(&idr->idr_lock, MUTEX_DEFAULT, IPL_VM);
    119  1.3.4.2  tls 	rb_tree_init(&idr->idr_tree, &idr_rb_ops);
    120  1.3.4.2  tls }
    121  1.3.4.2  tls 
    122  1.3.4.2  tls void
    123  1.3.4.2  tls idr_destroy(struct idr *idr)
    124  1.3.4.2  tls {
    125  1.3.4.2  tls 
    126  1.3.4.2  tls #if 0				/* XXX No rb_tree_destroy?  */
    127  1.3.4.2  tls 	rb_tree_destroy(&idr->idr_tree);
    128  1.3.4.2  tls #endif
    129  1.3.4.2  tls 	mutex_destroy(&idr->idr_lock);
    130  1.3.4.2  tls }
    131  1.3.4.2  tls 
    132  1.3.4.2  tls bool
    133  1.3.4.2  tls idr_is_empty(struct idr *idr)
    134  1.3.4.2  tls {
    135  1.3.4.2  tls 
    136  1.3.4.2  tls 	return (RB_TREE_MIN(&idr->idr_tree) == NULL);
    137  1.3.4.2  tls }
    138  1.3.4.2  tls 
    139  1.3.4.2  tls void *
    140  1.3.4.2  tls idr_find(struct idr *idr, int id)
    141  1.3.4.2  tls {
    142  1.3.4.2  tls 	const struct idr_node *node;
    143  1.3.4.2  tls 	void *data;
    144  1.3.4.2  tls 
    145  1.3.4.2  tls 	mutex_spin_enter(&idr->idr_lock);
    146  1.3.4.2  tls 	node = rb_tree_find_node(&idr->idr_tree, &id);
    147  1.3.4.2  tls 	data = (node == NULL? NULL : node->in_data);
    148  1.3.4.2  tls 	mutex_spin_exit(&idr->idr_lock);
    149  1.3.4.2  tls 
    150  1.3.4.2  tls 	return data;
    151  1.3.4.2  tls }
    152  1.3.4.2  tls 
    153  1.3.4.2  tls void *
    154  1.3.4.2  tls idr_replace(struct idr *idr, void *replacement, int id)
    155  1.3.4.2  tls {
    156  1.3.4.2  tls 	struct idr_node *node;
    157  1.3.4.2  tls 	void *result;
    158  1.3.4.2  tls 
    159  1.3.4.2  tls 	mutex_spin_enter(&idr->idr_lock);
    160  1.3.4.2  tls 	node = rb_tree_find_node(&idr->idr_tree, &id);
    161  1.3.4.2  tls 	if (node == NULL) {
    162  1.3.4.2  tls 		result = ERR_PTR(-ENOENT);
    163  1.3.4.2  tls 	} else {
    164  1.3.4.2  tls 		result = node->in_data;
    165  1.3.4.2  tls 		node->in_data = replacement;
    166  1.3.4.2  tls 	}
    167  1.3.4.2  tls 	mutex_spin_exit(&idr->idr_lock);
    168  1.3.4.2  tls 
    169  1.3.4.2  tls 	return result;
    170  1.3.4.2  tls }
    171  1.3.4.2  tls 
    172  1.3.4.2  tls void
    173  1.3.4.2  tls idr_remove(struct idr *idr, int id)
    174  1.3.4.2  tls {
    175  1.3.4.2  tls 	struct idr_node *node;
    176  1.3.4.2  tls 
    177  1.3.4.2  tls 	mutex_spin_enter(&idr->idr_lock);
    178  1.3.4.2  tls 	node = rb_tree_find_node(&idr->idr_tree, &id);
    179  1.3.4.2  tls 	KASSERTMSG((node != NULL), "idr %p has no entry for id %d", idr, id);
    180  1.3.4.2  tls 	rb_tree_remove_node(&idr->idr_tree, node);
    181  1.3.4.2  tls 	mutex_spin_exit(&idr->idr_lock);
    182  1.3.4.2  tls 	kfree(node);
    183  1.3.4.2  tls }
    184  1.3.4.2  tls 
    185  1.3.4.2  tls void
    186  1.3.4.2  tls idr_preload(gfp_t gfp)
    187  1.3.4.2  tls {
    188  1.3.4.2  tls 	struct idr_node *node;
    189  1.3.4.2  tls 
    190  1.3.4.2  tls 	if (ISSET(gfp, __GFP_WAIT))
    191  1.3.4.2  tls 		ASSERT_SLEEPABLE();
    192  1.3.4.2  tls 
    193  1.3.4.2  tls 	node = kmalloc(sizeof(*node), gfp);
    194  1.3.4.2  tls 	if (node == NULL)
    195  1.3.4.2  tls 		return;
    196  1.3.4.2  tls 
    197  1.3.4.2  tls 	mutex_spin_enter(&idr_cache.lock);
    198  1.3.4.2  tls 	SIMPLEQ_INSERT_TAIL(&idr_cache.preloaded_nodes, node, in_list);
    199  1.3.4.2  tls 	mutex_spin_exit(&idr_cache.lock);
    200  1.3.4.2  tls }
    201  1.3.4.2  tls 
    202  1.3.4.2  tls int
    203  1.3.4.2  tls idr_alloc(struct idr *idr, void *data, int start, int end, gfp_t gfp)
    204  1.3.4.2  tls {
    205  1.3.4.2  tls 	int maximum = (end <= 0? INT_MAX : (end - 1));
    206  1.3.4.2  tls 	struct idr_node *node, *search, *collision __diagused;
    207  1.3.4.2  tls 	int id = start;
    208  1.3.4.2  tls 
    209  1.3.4.2  tls 	/* Sanity-check inputs.  */
    210  1.3.4.2  tls 	if (ISSET(gfp, __GFP_WAIT))
    211  1.3.4.2  tls 		ASSERT_SLEEPABLE();
    212  1.3.4.2  tls 	if (__predict_false(start < 0))
    213  1.3.4.2  tls 		return -EINVAL;
    214  1.3.4.2  tls 	if (__predict_false(maximum < start))
    215  1.3.4.2  tls 		return -ENOSPC;
    216  1.3.4.2  tls 
    217  1.3.4.2  tls 	/* Grab a node allocated by idr_preload.  */
    218  1.3.4.2  tls 	mutex_spin_enter(&idr_cache.lock);
    219  1.3.4.2  tls 	KASSERTMSG(!SIMPLEQ_EMPTY(&idr_cache.preloaded_nodes),
    220  1.3.4.2  tls 	    "missing call to idr_preload");
    221  1.3.4.2  tls 	node = SIMPLEQ_FIRST(&idr_cache.preloaded_nodes);
    222  1.3.4.2  tls 	SIMPLEQ_REMOVE_HEAD(&idr_cache.preloaded_nodes, in_list);
    223  1.3.4.2  tls 	mutex_spin_exit(&idr_cache.lock);
    224  1.3.4.2  tls 
    225  1.3.4.2  tls 	/* Find an id.  */
    226  1.3.4.2  tls 	mutex_spin_enter(&idr->idr_lock);
    227  1.3.4.2  tls 	search = rb_tree_find_node_geq(&idr->idr_tree, &start);
    228  1.3.4.2  tls 	while ((search != NULL) && (search->in_index == id)) {
    229  1.3.4.2  tls 		if (maximum <= id) {
    230  1.3.4.2  tls 			id = -ENOSPC;
    231  1.3.4.2  tls 			goto out;
    232  1.3.4.2  tls 		}
    233  1.3.4.2  tls 		search = rb_tree_iterate(&idr->idr_tree, search, RB_DIR_RIGHT);
    234  1.3.4.2  tls 		id++;
    235  1.3.4.2  tls 	}
    236  1.3.4.2  tls 	node->in_index = id;
    237  1.3.4.2  tls 	node->in_data = data;
    238  1.3.4.2  tls 	collision = rb_tree_insert_node(&idr->idr_tree, node);
    239  1.3.4.2  tls 	KASSERT(collision == node);
    240  1.3.4.2  tls out:	mutex_spin_exit(&idr->idr_lock);
    241  1.3.4.2  tls 
    242  1.3.4.2  tls 	if (id < 0) {
    243  1.3.4.2  tls 		/* Discard the node on failure.  */
    244  1.3.4.2  tls 		mutex_spin_enter(&idr_cache.lock);
    245  1.3.4.2  tls 		SIMPLEQ_INSERT_HEAD(&idr_cache.discarded_nodes, node, in_list);
    246  1.3.4.2  tls 		mutex_spin_exit(&idr_cache.lock);
    247  1.3.4.2  tls 	}
    248  1.3.4.2  tls 	return id;
    249  1.3.4.2  tls }
    250  1.3.4.2  tls 
    251  1.3.4.2  tls void
    252  1.3.4.2  tls idr_preload_end(void)
    253  1.3.4.2  tls {
    254  1.3.4.2  tls 	struct idr_head temp = SIMPLEQ_HEAD_INITIALIZER(temp);
    255  1.3.4.2  tls 	struct idr_node *node, *next;
    256  1.3.4.2  tls 
    257  1.3.4.2  tls 	mutex_spin_enter(&idr_cache.lock);
    258  1.3.4.2  tls 	SIMPLEQ_FOREACH_SAFE(node, &idr_cache.discarded_nodes, in_list, next) {
    259  1.3.4.2  tls 		SIMPLEQ_REMOVE_HEAD(&idr_cache.discarded_nodes, in_list);
    260  1.3.4.2  tls 		SIMPLEQ_INSERT_HEAD(&temp, node, in_list);
    261  1.3.4.2  tls 	}
    262  1.3.4.2  tls 	mutex_spin_exit(&idr_cache.lock);
    263  1.3.4.2  tls 
    264  1.3.4.2  tls 	SIMPLEQ_FOREACH_SAFE(node, &temp, in_list, next) {
    265  1.3.4.2  tls 		SIMPLEQ_REMOVE_HEAD(&temp, in_list);
    266  1.3.4.2  tls 		kfree(node);
    267  1.3.4.2  tls 	}
    268  1.3.4.2  tls }
    269  1.3.4.2  tls 
    270  1.3.4.2  tls int
    271  1.3.4.2  tls idr_for_each(struct idr *idr, int (*proc)(int, void *, void *), void *arg)
    272  1.3.4.2  tls {
    273  1.3.4.2  tls 	struct idr_node *node;
    274  1.3.4.2  tls 	int error = 0;
    275  1.3.4.2  tls 
    276  1.3.4.2  tls 	/* XXX Caller must exclude modifications.  */
    277  1.3.4.2  tls 	membar_consumer();
    278  1.3.4.2  tls 	RB_TREE_FOREACH(node, &idr->idr_tree) {
    279  1.3.4.2  tls 		error = (*proc)(node->in_index, node->in_data, arg);
    280  1.3.4.2  tls 		if (error)
    281  1.3.4.2  tls 			break;
    282  1.3.4.2  tls 	}
    283  1.3.4.2  tls 
    284  1.3.4.2  tls 	return error;
    285  1.3.4.2  tls }
    286