linux_idr.c revision 1.9 1 /* $NetBSD: linux_idr.c,v 1.9 2018/08/27 14:15:45 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_idr.c,v 1.9 2018/08/27 14:15:45 riastradh Exp $");
34
35 #include <sys/param.h>
36 #include <sys/atomic.h>
37 #include <sys/rbtree.h>
38
39 #include <linux/err.h>
40 #include <linux/idr.h>
41 #include <linux/slab.h>
42
43 #ifdef _KERNEL_OPT
44 #include "opt_ddb.h"
45 #endif
46
47 #ifdef DDB
48 #include <ddb/ddb.h>
49 #endif
50
51 struct idr_node {
52 rb_node_t in_rb_node;
53 int in_index;
54 void *in_data;
55 };
56
57 struct idr_cache {
58 struct idr_node *ic_node;
59 void *ic_where;
60 };
61
62 static specificdata_key_t idr_cache_key __read_mostly;
63
64 static void
65 idr_cache_warning(struct idr_cache *cache)
66 {
67 #ifdef DDB
68 const char *name;
69 db_expr_t offset;
70 #endif
71
72 KASSERT(cache->ic_node != NULL);
73
74 #ifdef DDB
75 db_find_sym_and_offset((db_addr_t)(uintptr_t)cache->ic_where,
76 &name, &offset);
77 if (name) {
78 printf("WARNING: idr preload at %s+%#"DDB_EXPR_FMT"x"
79 " leaked in lwp %s @ %p\n",
80 name, offset, curlwp->l_name, curlwp);
81 } else
82 #endif
83 {
84 printf("WARNING: idr preload at %p leaked in lwp %s @ %p\n",
85 cache->ic_where, curlwp->l_name, curlwp);
86 }
87 }
88
89 static void
90 idr_cache_dtor(void *cookie)
91 {
92 struct idr_cache *cache = cookie;
93
94 if (cache->ic_node) {
95 idr_cache_warning(cache);
96 kmem_free(cache->ic_node, sizeof(*cache->ic_node));
97 }
98 kmem_free(cache, sizeof(*cache));
99 }
100
101 int
102 linux_idr_module_init(void)
103 {
104 int error;
105
106 error = lwp_specific_key_create(&idr_cache_key, &idr_cache_dtor);
107 if (error)
108 return error;
109
110 return 0;
111 }
112
113 void
114 linux_idr_module_fini(void)
115 {
116
117 lwp_specific_key_delete(idr_cache_key);
118 }
119
120 static signed int idr_tree_compare_nodes(void *, const void *, const void *);
121 static signed int idr_tree_compare_key(void *, const void *, const void *);
122
123 static const rb_tree_ops_t idr_rb_ops = {
124 .rbto_compare_nodes = &idr_tree_compare_nodes,
125 .rbto_compare_key = &idr_tree_compare_key,
126 .rbto_node_offset = offsetof(struct idr_node, in_rb_node),
127 .rbto_context = NULL,
128 };
129
130 static signed int
131 idr_tree_compare_nodes(void *ctx __unused, const void *na, const void *nb)
132 {
133 const int a = ((const struct idr_node *)na)->in_index;
134 const int b = ((const struct idr_node *)nb)->in_index;
135
136 if (a < b)
137 return -1;
138 else if (b < a)
139 return +1;
140 else
141 return 0;
142 }
143
144 static signed int
145 idr_tree_compare_key(void *ctx __unused, const void *n, const void *key)
146 {
147 const int a = ((const struct idr_node *)n)->in_index;
148 const int b = *(const int *)key;
149
150 if (a < b)
151 return -1;
152 else if (b < a)
153 return +1;
154 else
155 return 0;
156 }
157
158 void
159 idr_init(struct idr *idr)
160 {
161
162 mutex_init(&idr->idr_lock, MUTEX_DEFAULT, IPL_VM);
163 rb_tree_init(&idr->idr_tree, &idr_rb_ops);
164 }
165
166 void
167 idr_destroy(struct idr *idr)
168 {
169
170 #if 0 /* XXX No rb_tree_destroy? */
171 rb_tree_destroy(&idr->idr_tree);
172 #endif
173 mutex_destroy(&idr->idr_lock);
174 }
175
176 bool
177 idr_is_empty(struct idr *idr)
178 {
179
180 return (RB_TREE_MIN(&idr->idr_tree) == NULL);
181 }
182
183 void *
184 idr_find(struct idr *idr, int id)
185 {
186 const struct idr_node *node;
187 void *data;
188
189 mutex_spin_enter(&idr->idr_lock);
190 node = rb_tree_find_node(&idr->idr_tree, &id);
191 data = (node == NULL? NULL : node->in_data);
192 mutex_spin_exit(&idr->idr_lock);
193
194 return data;
195 }
196
197 void *
198 idr_get_next(struct idr *idr, int *idp)
199 {
200 const struct idr_node *node;
201 void *data;
202
203 mutex_spin_enter(&idr->idr_lock);
204 node = rb_tree_find_node_geq(&idr->idr_tree, idp);
205 if (node == NULL) {
206 data = NULL;
207 } else {
208 data = node->in_data;
209 *idp = node->in_index;
210 }
211 mutex_spin_exit(&idr->idr_lock);
212
213 return data;
214 }
215
216 void *
217 idr_replace(struct idr *idr, void *replacement, int id)
218 {
219 struct idr_node *node;
220 void *result;
221
222 mutex_spin_enter(&idr->idr_lock);
223 node = rb_tree_find_node(&idr->idr_tree, &id);
224 if (node == NULL) {
225 result = ERR_PTR(-ENOENT);
226 } else {
227 result = node->in_data;
228 node->in_data = replacement;
229 }
230 mutex_spin_exit(&idr->idr_lock);
231
232 return result;
233 }
234
235 void
236 idr_remove(struct idr *idr, int id)
237 {
238 struct idr_node *node;
239
240 mutex_spin_enter(&idr->idr_lock);
241 node = rb_tree_find_node(&idr->idr_tree, &id);
242 KASSERTMSG((node != NULL), "idr %p has no entry for id %d", idr, id);
243 rb_tree_remove_node(&idr->idr_tree, node);
244 mutex_spin_exit(&idr->idr_lock);
245
246 kmem_free(node, sizeof(*node));
247 }
248
249 void
250 idr_preload(gfp_t gfp)
251 {
252 struct idr_cache *cache;
253 struct idr_node *node;
254 km_flag_t kmflag = ISSET(gfp, __GFP_WAIT) ? KM_SLEEP : KM_NOSLEEP;
255
256 /* If caller asked to wait, we had better be sleepable. */
257 if (ISSET(gfp, __GFP_WAIT))
258 ASSERT_SLEEPABLE();
259
260 /*
261 * Get the current lwp's private idr cache.
262 */
263 cache = lwp_getspecific(idr_cache_key);
264 if (cache == NULL) {
265 /* lwp_setspecific must be sleepable. */
266 if (!ISSET(gfp, __GFP_WAIT))
267 return;
268 cache = kmem_zalloc(sizeof(*cache), kmflag);
269 if (cache == NULL)
270 return;
271 lwp_setspecific(idr_cache_key, cache);
272 }
273
274 /*
275 * If there already is a node, a prior call to idr_preload must
276 * not have been matched by idr_preload_end. Print a warning,
277 * claim the node, and record our return address for where this
278 * node came from so the next leak is attributed to us.
279 */
280 if (cache->ic_node) {
281 idr_cache_warning(cache);
282 goto out;
283 }
284
285 /*
286 * No cached node. Allocate a new one, store it in the cache,
287 * and record our return address for where this node came from
288 * so the next leak is attributed to us.
289 */
290 node = kmem_alloc(sizeof(*node), kmflag);
291 KASSERT(node != NULL || !ISSET(gfp, __GFP_WAIT));
292 if (node == NULL)
293 return;
294
295 cache->ic_node = node;
296 out: cache->ic_where = __builtin_return_address(0);
297 }
298
299 int
300 idr_alloc(struct idr *idr, void *data, int start, int end, gfp_t gfp)
301 {
302 int maximum = (end <= 0? INT_MAX : (end - 1));
303 struct idr_cache *cache;
304 struct idr_node *node, *search, *collision __diagused;
305 int id = start;
306
307 /* Sanity-check inputs. */
308 if (ISSET(gfp, __GFP_WAIT))
309 ASSERT_SLEEPABLE();
310 if (__predict_false(start < 0))
311 return -EINVAL;
312 if (__predict_false(maximum < start))
313 return -ENOSPC;
314
315 /*
316 * Grab a node allocated by idr_preload, if we have a cache and
317 * it is populated.
318 */
319 cache = lwp_getspecific(idr_cache_key);
320 if (cache == NULL || cache->ic_node == NULL)
321 return -ENOMEM;
322 node = cache->ic_node;
323 cache->ic_node = NULL;
324
325 /* Find an id. */
326 mutex_spin_enter(&idr->idr_lock);
327 search = rb_tree_find_node_geq(&idr->idr_tree, &start);
328 while ((search != NULL) && (search->in_index == id)) {
329 if (maximum <= id) {
330 id = -ENOSPC;
331 goto out;
332 }
333 search = rb_tree_iterate(&idr->idr_tree, search, RB_DIR_RIGHT);
334 id++;
335 }
336 node->in_index = id;
337 node->in_data = data;
338 collision = rb_tree_insert_node(&idr->idr_tree, node);
339 KASSERT(collision == node);
340 out: mutex_spin_exit(&idr->idr_lock);
341
342 /* Discard the node on failure. */
343 if (id < 0)
344 cache->ic_node = node;
345 return id;
346 }
347
348 void
349 idr_preload_end(void)
350 {
351 struct idr_cache *cache;
352
353 /* Get the cache, or bail if it's not there. */
354 cache = lwp_getspecific(idr_cache_key);
355 if (cache == NULL)
356 return;
357
358 /*
359 * If there is a node, either because we didn't idr_alloc or
360 * because idr_alloc failed, chuck it.
361 *
362 * XXX If we are not sleepable, then while the caller may have
363 * used idr_preload(GFP_ATOMIC), kmem_free may still sleep.
364 * What to do?
365 */
366 if (cache->ic_node) {
367 struct idr_node *node;
368
369 node = cache->ic_node;
370 cache->ic_node = NULL;
371 cache->ic_where = NULL;
372
373 kmem_free(node, sizeof(*node));
374 }
375 }
376
377 int
378 idr_for_each(struct idr *idr, int (*proc)(int, void *, void *), void *arg)
379 {
380 struct idr_node *node;
381 int error = 0;
382
383 /* XXX Caller must exclude modifications. */
384 membar_consumer();
385 RB_TREE_FOREACH(node, &idr->idr_tree) {
386 error = (*proc)(node->in_index, node->in_data, arg);
387 if (error)
388 break;
389 }
390
391 return error;
392 }
393