linux_idr.c revision 1.10 1 /* $NetBSD: linux_idr.c,v 1.10 2018/08/27 15:06:54 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_idr.c,v 1.10 2018/08/27 15:06:54 riastradh Exp $");
34
35 #include <sys/param.h>
36 #include <sys/atomic.h>
37 #include <sys/rbtree.h>
38 #include <sys/sdt.h>
39
40 #include <linux/err.h>
41 #include <linux/idr.h>
42 #include <linux/slab.h>
43
44 #ifdef _KERNEL_OPT
45 #include "opt_ddb.h"
46 #endif
47
48 #ifdef DDB
49 #include <ddb/ddb.h>
50 #endif
51
52 struct idr_node {
53 rb_node_t in_rb_node;
54 int in_index;
55 void *in_data;
56 };
57
58 struct idr_cache {
59 struct idr_node *ic_node;
60 void *ic_where;
61 };
62
63 SDT_PROBE_DEFINE0(sdt, linux, idr, leak);
64 SDT_PROBE_DEFINE1(sdt, linux, idr, init, "struct idr *"/*idr*/);
65 SDT_PROBE_DEFINE1(sdt, linux, idr, destroy, "struct idr *"/*idr*/);
66 SDT_PROBE_DEFINE4(sdt, linux, idr, replace,
67 "struct idr *"/*idr*/, "int"/*id*/, "void *"/*odata*/, "void *"/*ndata*/);
68 SDT_PROBE_DEFINE3(sdt, linux, idr, remove,
69 "struct idr *"/*idr*/, "int"/*id*/, "void *"/*data*/);
70 SDT_PROBE_DEFINE0(sdt, linux, idr, preload);
71 SDT_PROBE_DEFINE0(sdt, linux, idr, preload_end);
72 SDT_PROBE_DEFINE3(sdt, linux, idr, alloc,
73 "struct idr *"/*idr*/, "int"/*id*/, "void *"/*data*/);
74
75 static specificdata_key_t idr_cache_key __read_mostly;
76
77 static void
78 idr_cache_warning(struct idr_cache *cache)
79 {
80 #ifdef DDB
81 const char *name;
82 db_expr_t offset;
83 #endif
84
85 KASSERT(cache->ic_node != NULL);
86
87 #ifdef DDB
88 db_find_sym_and_offset((db_addr_t)(uintptr_t)cache->ic_where,
89 &name, &offset);
90 if (name) {
91 printf("WARNING: idr preload at %s+%#"DDB_EXPR_FMT"x"
92 " leaked in lwp %s @ %p\n",
93 name, offset, curlwp->l_name, curlwp);
94 } else
95 #endif
96 {
97 printf("WARNING: idr preload at %p leaked in lwp %s @ %p\n",
98 cache->ic_where, curlwp->l_name, curlwp);
99 }
100 }
101
102 static void
103 idr_cache_dtor(void *cookie)
104 {
105 struct idr_cache *cache = cookie;
106
107 if (cache->ic_node) {
108 SDT_PROBE0(sdt, linux, idr, leak);
109 idr_cache_warning(cache);
110 kmem_free(cache->ic_node, sizeof(*cache->ic_node));
111 }
112 kmem_free(cache, sizeof(*cache));
113 }
114
115 int
116 linux_idr_module_init(void)
117 {
118 int error;
119
120 error = lwp_specific_key_create(&idr_cache_key, &idr_cache_dtor);
121 if (error)
122 return error;
123
124 return 0;
125 }
126
127 void
128 linux_idr_module_fini(void)
129 {
130
131 lwp_specific_key_delete(idr_cache_key);
132 }
133
134 static signed int idr_tree_compare_nodes(void *, const void *, const void *);
135 static signed int idr_tree_compare_key(void *, const void *, const void *);
136
137 static const rb_tree_ops_t idr_rb_ops = {
138 .rbto_compare_nodes = &idr_tree_compare_nodes,
139 .rbto_compare_key = &idr_tree_compare_key,
140 .rbto_node_offset = offsetof(struct idr_node, in_rb_node),
141 .rbto_context = NULL,
142 };
143
144 static signed int
145 idr_tree_compare_nodes(void *ctx __unused, const void *na, const void *nb)
146 {
147 const int a = ((const struct idr_node *)na)->in_index;
148 const int b = ((const struct idr_node *)nb)->in_index;
149
150 if (a < b)
151 return -1;
152 else if (b < a)
153 return +1;
154 else
155 return 0;
156 }
157
158 static signed int
159 idr_tree_compare_key(void *ctx __unused, const void *n, const void *key)
160 {
161 const int a = ((const struct idr_node *)n)->in_index;
162 const int b = *(const int *)key;
163
164 if (a < b)
165 return -1;
166 else if (b < a)
167 return +1;
168 else
169 return 0;
170 }
171
172 void
173 idr_init(struct idr *idr)
174 {
175
176 mutex_init(&idr->idr_lock, MUTEX_DEFAULT, IPL_VM);
177 rb_tree_init(&idr->idr_tree, &idr_rb_ops);
178 SDT_PROBE1(sdt, linux, idr, init, idr);
179 }
180
181 void
182 idr_destroy(struct idr *idr)
183 {
184
185 SDT_PROBE1(sdt, linux, idr, destroy, idr);
186 #if 0 /* XXX No rb_tree_destroy? */
187 rb_tree_destroy(&idr->idr_tree);
188 #endif
189 mutex_destroy(&idr->idr_lock);
190 }
191
192 bool
193 idr_is_empty(struct idr *idr)
194 {
195
196 return (RB_TREE_MIN(&idr->idr_tree) == NULL);
197 }
198
199 void *
200 idr_find(struct idr *idr, int id)
201 {
202 const struct idr_node *node;
203 void *data;
204
205 mutex_spin_enter(&idr->idr_lock);
206 node = rb_tree_find_node(&idr->idr_tree, &id);
207 data = (node == NULL? NULL : node->in_data);
208 mutex_spin_exit(&idr->idr_lock);
209
210 return data;
211 }
212
213 void *
214 idr_get_next(struct idr *idr, int *idp)
215 {
216 const struct idr_node *node;
217 void *data;
218
219 mutex_spin_enter(&idr->idr_lock);
220 node = rb_tree_find_node_geq(&idr->idr_tree, idp);
221 if (node == NULL) {
222 data = NULL;
223 } else {
224 data = node->in_data;
225 *idp = node->in_index;
226 }
227 mutex_spin_exit(&idr->idr_lock);
228
229 return data;
230 }
231
232 void *
233 idr_replace(struct idr *idr, void *replacement, int id)
234 {
235 struct idr_node *node;
236 void *result;
237
238 mutex_spin_enter(&idr->idr_lock);
239 node = rb_tree_find_node(&idr->idr_tree, &id);
240 if (node == NULL) {
241 result = ERR_PTR(-ENOENT);
242 } else {
243 result = node->in_data;
244 node->in_data = replacement;
245 SDT_PROBE4(sdt, linux, idr, replace,
246 idr, id, result, replacement);
247 }
248 mutex_spin_exit(&idr->idr_lock);
249
250 return result;
251 }
252
253 void
254 idr_remove(struct idr *idr, int id)
255 {
256 struct idr_node *node;
257
258 mutex_spin_enter(&idr->idr_lock);
259 node = rb_tree_find_node(&idr->idr_tree, &id);
260 KASSERTMSG((node != NULL), "idr %p has no entry for id %d", idr, id);
261 SDT_PROBE3(sdt, linux, idr, remove, idr, id, node->in_data);
262 rb_tree_remove_node(&idr->idr_tree, node);
263 mutex_spin_exit(&idr->idr_lock);
264
265 kmem_free(node, sizeof(*node));
266 }
267
268 void
269 idr_preload(gfp_t gfp)
270 {
271 struct idr_cache *cache;
272 struct idr_node *node;
273 km_flag_t kmflag = ISSET(gfp, __GFP_WAIT) ? KM_SLEEP : KM_NOSLEEP;
274
275 SDT_PROBE0(sdt, linux, idr, preload);
276
277 /* If caller asked to wait, we had better be sleepable. */
278 if (ISSET(gfp, __GFP_WAIT))
279 ASSERT_SLEEPABLE();
280
281 /*
282 * Get the current lwp's private idr cache.
283 */
284 cache = lwp_getspecific(idr_cache_key);
285 if (cache == NULL) {
286 /* lwp_setspecific must be sleepable. */
287 if (!ISSET(gfp, __GFP_WAIT))
288 return;
289 cache = kmem_zalloc(sizeof(*cache), kmflag);
290 if (cache == NULL)
291 return;
292 lwp_setspecific(idr_cache_key, cache);
293 }
294
295 /*
296 * If there already is a node, a prior call to idr_preload must
297 * not have been matched by idr_preload_end. Print a warning,
298 * claim the node, and record our return address for where this
299 * node came from so the next leak is attributed to us.
300 */
301 if (cache->ic_node) {
302 idr_cache_warning(cache);
303 goto out;
304 }
305
306 /*
307 * No cached node. Allocate a new one, store it in the cache,
308 * and record our return address for where this node came from
309 * so the next leak is attributed to us.
310 */
311 node = kmem_alloc(sizeof(*node), kmflag);
312 KASSERT(node != NULL || !ISSET(gfp, __GFP_WAIT));
313 if (node == NULL)
314 return;
315
316 cache->ic_node = node;
317 out: cache->ic_where = __builtin_return_address(0);
318 }
319
320 int
321 idr_alloc(struct idr *idr, void *data, int start, int end, gfp_t gfp)
322 {
323 int maximum = (end <= 0? INT_MAX : (end - 1));
324 struct idr_cache *cache;
325 struct idr_node *node, *search, *collision __diagused;
326 int id = start;
327
328 /* Sanity-check inputs. */
329 if (ISSET(gfp, __GFP_WAIT))
330 ASSERT_SLEEPABLE();
331 if (__predict_false(start < 0))
332 return -EINVAL;
333 if (__predict_false(maximum < start))
334 return -ENOSPC;
335
336 /*
337 * Grab a node allocated by idr_preload, if we have a cache and
338 * it is populated.
339 */
340 cache = lwp_getspecific(idr_cache_key);
341 if (cache == NULL || cache->ic_node == NULL)
342 return -ENOMEM;
343 node = cache->ic_node;
344 cache->ic_node = NULL;
345
346 /* Find an id. */
347 mutex_spin_enter(&idr->idr_lock);
348 search = rb_tree_find_node_geq(&idr->idr_tree, &start);
349 while ((search != NULL) && (search->in_index == id)) {
350 if (maximum <= id) {
351 id = -ENOSPC;
352 goto out;
353 }
354 search = rb_tree_iterate(&idr->idr_tree, search, RB_DIR_RIGHT);
355 id++;
356 }
357 node->in_index = id;
358 node->in_data = data;
359 collision = rb_tree_insert_node(&idr->idr_tree, node);
360 KASSERT(collision == node);
361 out: mutex_spin_exit(&idr->idr_lock);
362
363 /* Discard the node on failure. */
364 if (id < 0)
365 cache->ic_node = node;
366 else
367 SDT_PROBE3(sdt, linux, idr, alloc, idr, id, data);
368 return id;
369 }
370
371 void
372 idr_preload_end(void)
373 {
374 struct idr_cache *cache;
375
376 SDT_PROBE0(sdt, linux, idr, preload_end);
377
378 /* Get the cache, or bail if it's not there. */
379 cache = lwp_getspecific(idr_cache_key);
380 if (cache == NULL)
381 return;
382
383 /*
384 * If there is a node, either because we didn't idr_alloc or
385 * because idr_alloc failed, chuck it.
386 *
387 * XXX If we are not sleepable, then while the caller may have
388 * used idr_preload(GFP_ATOMIC), kmem_free may still sleep.
389 * What to do?
390 */
391 if (cache->ic_node) {
392 struct idr_node *node;
393
394 node = cache->ic_node;
395 cache->ic_node = NULL;
396 cache->ic_where = NULL;
397
398 kmem_free(node, sizeof(*node));
399 }
400 }
401
402 int
403 idr_for_each(struct idr *idr, int (*proc)(int, void *, void *), void *arg)
404 {
405 struct idr_node *node;
406 int error = 0;
407
408 /* XXX Caller must exclude modifications. */
409 membar_consumer();
410 RB_TREE_FOREACH(node, &idr->idr_tree) {
411 error = (*proc)(node->in_index, node->in_data, arg);
412 if (error)
413 break;
414 }
415
416 return error;
417 }
418