Home | History | Annotate | Line # | Download | only in linux
      1 /*	$NetBSD: linux_idr.c,v 1.15 2021/12/19 12:21:02 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: linux_idr.c,v 1.15 2021/12/19 12:21:02 riastradh Exp $");
     34 
     35 #include <sys/param.h>
     36 #include <sys/atomic.h>
     37 #include <sys/rbtree.h>
     38 #include <sys/sdt.h>
     39 
     40 #include <linux/err.h>
     41 #include <linux/idr.h>
     42 #include <linux/slab.h>
     43 
     44 #ifdef _KERNEL_OPT
     45 #include "opt_ddb.h"
     46 #endif
     47 
     48 #ifdef DDB
     49 #include <ddb/ddb.h>
     50 #endif
     51 
     52 struct idr_node {
     53 	rb_node_t		in_rb_node;
     54 	int			in_index;
     55 	void			*in_data;
     56 };
     57 
     58 struct idr_cache {
     59 	struct idr_node		*ic_node;
     60 	void			*ic_where;
     61 };
     62 
     63 SDT_PROBE_DEFINE0(sdt, linux, idr, leak);
     64 SDT_PROBE_DEFINE1(sdt, linux, idr, init, "struct idr *"/*idr*/);
     65 SDT_PROBE_DEFINE1(sdt, linux, idr, destroy, "struct idr *"/*idr*/);
     66 SDT_PROBE_DEFINE4(sdt, linux, idr, replace,
     67     "struct idr *"/*idr*/, "int"/*id*/, "void *"/*odata*/, "void *"/*ndata*/);
     68 SDT_PROBE_DEFINE3(sdt, linux, idr, remove,
     69     "struct idr *"/*idr*/, "int"/*id*/, "void *"/*data*/);
     70 SDT_PROBE_DEFINE0(sdt, linux, idr, preload);
     71 SDT_PROBE_DEFINE0(sdt, linux, idr, preload__end);
     72 SDT_PROBE_DEFINE3(sdt, linux, idr, alloc,
     73     "struct idr *"/*idr*/, "int"/*id*/, "void *"/*data*/);
     74 
     75 static specificdata_key_t idr_cache_key __read_mostly;
     76 
     77 static void
     78 idr_cache_warning(struct idr_cache *cache)
     79 {
     80 #ifdef DDB
     81 	const char *name;
     82 	db_expr_t offset;
     83 #endif
     84 
     85 	KASSERT(cache->ic_node != NULL);
     86 
     87 #ifdef DDB
     88 	db_find_sym_and_offset((db_addr_t)(uintptr_t)cache->ic_where,
     89 	    &name, &offset);
     90 	if (name) {
     91 		printf("WARNING: idr preload at %s+%#"DDB_EXPR_FMT"x"
     92 		    " leaked in lwp %s @ %p\n",
     93 		    name, offset, curlwp->l_name, curlwp);
     94 	} else
     95 #endif
     96 	{
     97 		printf("WARNING: idr preload at %p leaked in lwp %s @ %p\n",
     98 		    cache->ic_where, curlwp->l_name, curlwp);
     99 	}
    100 }
    101 
    102 static void
    103 idr_cache_dtor(void *cookie)
    104 {
    105 	struct idr_cache *cache = cookie;
    106 
    107 	if (cache->ic_node) {
    108 		SDT_PROBE0(sdt, linux, idr, leak);
    109 		idr_cache_warning(cache);
    110 		kmem_free(cache->ic_node, sizeof(*cache->ic_node));
    111 	}
    112 	kmem_free(cache, sizeof(*cache));
    113 }
    114 
    115 int
    116 linux_idr_module_init(void)
    117 {
    118 	int error;
    119 
    120 	error = lwp_specific_key_create(&idr_cache_key, &idr_cache_dtor);
    121 	if (error)
    122 		return error;
    123 
    124 	return 0;
    125 }
    126 
    127 void
    128 linux_idr_module_fini(void)
    129 {
    130 
    131 	lwp_specific_key_delete(idr_cache_key);
    132 }
    133 
    134 static signed int idr_tree_compare_nodes(void *, const void *, const void *);
    135 static signed int idr_tree_compare_key(void *, const void *, const void *);
    136 
    137 static const rb_tree_ops_t idr_rb_ops = {
    138 	.rbto_compare_nodes = &idr_tree_compare_nodes,
    139 	.rbto_compare_key = &idr_tree_compare_key,
    140 	.rbto_node_offset = offsetof(struct idr_node, in_rb_node),
    141 	.rbto_context = NULL,
    142 };
    143 
    144 static signed int
    145 idr_tree_compare_nodes(void *ctx __unused, const void *na, const void *nb)
    146 {
    147 	const int a = ((const struct idr_node *)na)->in_index;
    148 	const int b = ((const struct idr_node *)nb)->in_index;
    149 
    150 	if (a < b)
    151 		return -1;
    152 	else if (b < a)
    153 		 return +1;
    154 	else
    155 		return 0;
    156 }
    157 
    158 static signed int
    159 idr_tree_compare_key(void *ctx __unused, const void *n, const void *key)
    160 {
    161 	const int a = ((const struct idr_node *)n)->in_index;
    162 	const int b = *(const int *)key;
    163 
    164 	if (a < b)
    165 		return -1;
    166 	else if (b < a)
    167 		return +1;
    168 	else
    169 		return 0;
    170 }
    171 
    172 void
    173 idr_init(struct idr *idr)
    174 {
    175 
    176 	idr_init_base(idr, 0);
    177 }
    178 
    179 void
    180 idr_init_base(struct idr *idr, int base)
    181 {
    182 
    183 	mutex_init(&idr->idr_lock, MUTEX_DEFAULT, IPL_VM);
    184 	rb_tree_init(&idr->idr_tree, &idr_rb_ops);
    185 	idr->idr_base = base;
    186 
    187 	SDT_PROBE1(sdt, linux, idr, init,  idr);
    188 }
    189 
    190 void
    191 idr_destroy(struct idr *idr)
    192 {
    193 
    194 	SDT_PROBE1(sdt, linux, idr, destroy,  idr);
    195 #if 0				/* XXX No rb_tree_destroy?  */
    196 	rb_tree_destroy(&idr->idr_tree);
    197 #endif
    198 	mutex_destroy(&idr->idr_lock);
    199 }
    200 
    201 bool
    202 idr_is_empty(struct idr *idr)
    203 {
    204 
    205 	return (RB_TREE_MIN(&idr->idr_tree) == NULL);
    206 }
    207 
    208 void *
    209 idr_find(struct idr *idr, int id)
    210 {
    211 	const struct idr_node *node;
    212 	void *data;
    213 
    214 	mutex_spin_enter(&idr->idr_lock);
    215 	node = rb_tree_find_node(&idr->idr_tree, &id);
    216 	data = (node == NULL? NULL : node->in_data);
    217 	mutex_spin_exit(&idr->idr_lock);
    218 
    219 	return data;
    220 }
    221 
    222 void *
    223 idr_get_next(struct idr *idr, int *idp)
    224 {
    225 	const struct idr_node *node;
    226 	void *data;
    227 
    228 	mutex_spin_enter(&idr->idr_lock);
    229 	node = rb_tree_find_node_geq(&idr->idr_tree, idp);
    230 	if (node == NULL) {
    231 		data = NULL;
    232 	} else {
    233 		data = node->in_data;
    234 		*idp = node->in_index;
    235 	}
    236 	mutex_spin_exit(&idr->idr_lock);
    237 
    238 	return data;
    239 }
    240 
    241 void *
    242 idr_replace(struct idr *idr, void *replacement, int id)
    243 {
    244 	struct idr_node *node;
    245 	void *result;
    246 
    247 	mutex_spin_enter(&idr->idr_lock);
    248 	node = rb_tree_find_node(&idr->idr_tree, &id);
    249 	if (node == NULL) {
    250 		result = ERR_PTR(-ENOENT);
    251 	} else {
    252 		result = node->in_data;
    253 		node->in_data = replacement;
    254 		SDT_PROBE4(sdt, linux, idr, replace,
    255 		    idr, id, result, replacement);
    256 	}
    257 	mutex_spin_exit(&idr->idr_lock);
    258 
    259 	return result;
    260 }
    261 
    262 void *
    263 idr_remove(struct idr *idr, int id)
    264 {
    265 	struct idr_node *node;
    266 	void *data;
    267 
    268 	mutex_spin_enter(&idr->idr_lock);
    269 	node = rb_tree_find_node(&idr->idr_tree, &id);
    270 	if (node == NULL) {
    271 		data = NULL;
    272 	} else {
    273 		data = node->in_data;
    274 		SDT_PROBE3(sdt, linux, idr, remove,  idr, id, data);
    275 		rb_tree_remove_node(&idr->idr_tree, node);
    276 	}
    277 	mutex_spin_exit(&idr->idr_lock);
    278 
    279 	kmem_free(node, sizeof(*node));
    280 
    281 	return data;
    282 }
    283 
    284 void
    285 idr_preload(gfp_t gfp)
    286 {
    287 	struct idr_cache *cache;
    288 	struct idr_node *node;
    289 	km_flag_t kmflag = ISSET(gfp, __GFP_WAIT) ? KM_SLEEP : KM_NOSLEEP;
    290 
    291 	SDT_PROBE0(sdt, linux, idr, preload);
    292 
    293 	/* If caller asked to wait, we had better be sleepable.  */
    294 	if (ISSET(gfp, __GFP_WAIT))
    295 		ASSERT_SLEEPABLE();
    296 
    297 	/*
    298 	 * Get the current lwp's private idr cache.
    299 	 */
    300 	cache = lwp_getspecific(idr_cache_key);
    301 	if (cache == NULL) {
    302 		/* lwp_setspecific must be sleepable.  */
    303 		if (!ISSET(gfp, __GFP_WAIT))
    304 			return;
    305 		cache = kmem_zalloc(sizeof(*cache), kmflag);
    306 		if (cache == NULL)
    307 			return;
    308 		lwp_setspecific(idr_cache_key, cache);
    309 	}
    310 
    311 	/*
    312 	 * If there already is a node, a prior call to idr_preload must
    313 	 * not have been matched by idr_preload_end.  Print a warning,
    314 	 * claim the node, and record our return address for where this
    315 	 * node came from so the next leak is attributed to us.
    316 	 */
    317 	if (cache->ic_node) {
    318 		idr_cache_warning(cache);
    319 		goto out;
    320 	}
    321 
    322 	/*
    323 	 * No cached node.  Allocate a new one, store it in the cache,
    324 	 * and record our return address for where this node came from
    325 	 * so the next leak is attributed to us.
    326 	 */
    327 	node = kmem_alloc(sizeof(*node), kmflag);
    328 	KASSERT(node != NULL || !ISSET(gfp, __GFP_WAIT));
    329 	if (node == NULL)
    330 		return;
    331 
    332 	cache->ic_node = node;
    333 out:	cache->ic_where = __builtin_return_address(0);
    334 }
    335 
    336 int
    337 idr_alloc(struct idr *idr, void *data, int start, int end, gfp_t gfp)
    338 {
    339 	int maximum = (end <= 0? INT_MAX : (end - 1));
    340 	struct idr_cache *cache;
    341 	struct idr_node *node, *search, *collision __diagused;
    342 	int id = start;
    343 
    344 	/* Sanity-check inputs.  */
    345 	if (ISSET(gfp, __GFP_WAIT))
    346 		ASSERT_SLEEPABLE();
    347 	if (__predict_false(start < 0))
    348 		return -EINVAL;
    349 	if (__predict_false(maximum < start))
    350 		return -ENOSPC;
    351 
    352 	/*
    353 	 * Grab a node allocated by idr_preload, if we have a cache and
    354 	 * it is populated.
    355 	 */
    356 	cache = lwp_getspecific(idr_cache_key);
    357 	if (cache == NULL || cache->ic_node == NULL)
    358 		return -ENOMEM;
    359 	node = cache->ic_node;
    360 	cache->ic_node = NULL;
    361 
    362 	/* Find an id.  */
    363 	mutex_spin_enter(&idr->idr_lock);
    364 	search = rb_tree_find_node_geq(&idr->idr_tree, &start);
    365 	while ((search != NULL) && (search->in_index == id)) {
    366 		if (maximum <= id) {
    367 			id = -ENOSPC;
    368 			goto out;
    369 		}
    370 		search = rb_tree_iterate(&idr->idr_tree, search, RB_DIR_RIGHT);
    371 		id++;
    372 	}
    373 	node->in_index = id;
    374 	node->in_data = data;
    375 	collision = rb_tree_insert_node(&idr->idr_tree, node);
    376 	KASSERT(collision == node);
    377 out:	mutex_spin_exit(&idr->idr_lock);
    378 
    379 	/* Discard the node on failure.  */
    380 	if (id < 0) {
    381 		cache->ic_node = node;
    382 	} else {
    383 		SDT_PROBE3(sdt, linux, idr, alloc,  idr, id, data);
    384 	}
    385 	return id;
    386 }
    387 
    388 void
    389 idr_preload_end(void)
    390 {
    391 	struct idr_cache *cache;
    392 
    393 	SDT_PROBE0(sdt, linux, idr, preload__end);
    394 
    395 	/* Get the cache, or bail if it's not there.  */
    396 	cache = lwp_getspecific(idr_cache_key);
    397 	if (cache == NULL)
    398 		return;
    399 
    400 	/*
    401 	 * If there is a node, either because we didn't idr_alloc or
    402 	 * because idr_alloc failed, chuck it.
    403 	 *
    404 	 * XXX If we are not sleepable, then while the caller may have
    405 	 * used idr_preload(GFP_ATOMIC), kmem_free may still sleep.
    406 	 * What to do?
    407 	 */
    408 	if (cache->ic_node) {
    409 		struct idr_node *node;
    410 
    411 		node = cache->ic_node;
    412 		cache->ic_node = NULL;
    413 		cache->ic_where = NULL;
    414 
    415 		kmem_free(node, sizeof(*node));
    416 	}
    417 }
    418 
    419 int
    420 idr_for_each(struct idr *idr, int (*proc)(int, void *, void *), void *arg)
    421 {
    422 	struct idr_node *node;
    423 	int error = 0;
    424 
    425 	/* XXX Caller must exclude modifications.  */
    426 	RB_TREE_FOREACH(node, &idr->idr_tree) {
    427 		error = (*proc)(node->in_index, node->in_data, arg);
    428 		if (error)
    429 			break;
    430 	}
    431 
    432 	return error;
    433 }
    434