npf_tableset.c revision 1.4 1 1.4 rmind /* $NetBSD: npf_tableset.c,v 1.4 2010/12/18 01:07:25 rmind Exp $ */
2 1.1 rmind
3 1.1 rmind /*-
4 1.1 rmind * Copyright (c) 2009-2010 The NetBSD Foundation, Inc.
5 1.1 rmind * All rights reserved.
6 1.1 rmind *
7 1.1 rmind * This material is based upon work partially supported by The
8 1.1 rmind * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
9 1.1 rmind *
10 1.1 rmind * Redistribution and use in source and binary forms, with or without
11 1.1 rmind * modification, are permitted provided that the following conditions
12 1.1 rmind * are met:
13 1.1 rmind * 1. Redistributions of source code must retain the above copyright
14 1.1 rmind * notice, this list of conditions and the following disclaimer.
15 1.1 rmind * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 rmind * notice, this list of conditions and the following disclaimer in the
17 1.1 rmind * documentation and/or other materials provided with the distribution.
18 1.1 rmind *
19 1.1 rmind * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 rmind * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 rmind * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 rmind * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 rmind * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 rmind * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 rmind * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 rmind * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 rmind * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 rmind * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 rmind * POSSIBILITY OF SUCH DAMAGE.
30 1.1 rmind */
31 1.1 rmind
32 1.1 rmind /*
33 1.4 rmind * NPF tableset module.
34 1.1 rmind *
35 1.1 rmind * TODO:
36 1.1 rmind * - Currently, code is modeled to handle IPv4 CIDR blocks.
37 1.1 rmind * - Dynamic hash growing/shrinking (i.e. re-hash functionality), maybe?
38 1.1 rmind * - Dynamic array resize.
39 1.1 rmind */
40 1.1 rmind
41 1.1 rmind #include <sys/cdefs.h>
42 1.4 rmind __KERNEL_RCSID(0, "$NetBSD: npf_tableset.c,v 1.4 2010/12/18 01:07:25 rmind Exp $");
43 1.1 rmind
44 1.1 rmind #include <sys/param.h>
45 1.1 rmind #include <sys/kernel.h>
46 1.1 rmind
47 1.1 rmind #include <sys/atomic.h>
48 1.1 rmind #include <sys/hash.h>
49 1.1 rmind #include <sys/kmem.h>
50 1.1 rmind #include <sys/pool.h>
51 1.1 rmind #include <sys/queue.h>
52 1.1 rmind #include <sys/rwlock.h>
53 1.1 rmind #include <sys/systm.h>
54 1.1 rmind #include <sys/types.h>
55 1.1 rmind
56 1.1 rmind #include "npf_impl.h"
57 1.1 rmind
58 1.1 rmind /* Table entry structure. */
59 1.1 rmind struct npf_tblent {
60 1.2 rmind /* Hash/tree entry. */
61 1.1 rmind union {
62 1.1 rmind LIST_ENTRY(npf_tblent) hashq;
63 1.4 rmind rb_node_t rbnode;
64 1.1 rmind } te_entry;
65 1.2 rmind /* IPv4 CIDR block. */
66 1.2 rmind in_addr_t te_addr;
67 1.2 rmind in_addr_t te_mask;
68 1.1 rmind };
69 1.1 rmind
70 1.1 rmind LIST_HEAD(npf_hashl, npf_tblent);
71 1.1 rmind
72 1.1 rmind /* Table structure. */
73 1.1 rmind struct npf_table {
74 1.1 rmind char t_name[16];
75 1.1 rmind /* Lock and reference count. */
76 1.1 rmind krwlock_t t_lock;
77 1.1 rmind u_int t_refcnt;
78 1.1 rmind /* Table ID. */
79 1.1 rmind u_int t_id;
80 1.1 rmind /* The storage type can be: 1. Hash 2. RB-tree. */
81 1.1 rmind u_int t_type;
82 1.1 rmind struct npf_hashl * t_hashl;
83 1.1 rmind u_long t_hashmask;
84 1.2 rmind rb_tree_t t_rbtree;
85 1.1 rmind };
86 1.1 rmind
87 1.4 rmind static pool_cache_t tblent_cache __read_mostly;
88 1.1 rmind
89 1.1 rmind /*
90 1.1 rmind * npf_table_sysinit: initialise tableset structures.
91 1.1 rmind */
92 1.4 rmind void
93 1.1 rmind npf_tableset_sysinit(void)
94 1.1 rmind {
95 1.1 rmind
96 1.1 rmind tblent_cache = pool_cache_init(sizeof(npf_tblent_t), coherency_unit,
97 1.1 rmind 0, 0, "npftenpl", NULL, IPL_NONE, NULL, NULL, NULL);
98 1.1 rmind }
99 1.1 rmind
100 1.1 rmind void
101 1.1 rmind npf_tableset_sysfini(void)
102 1.1 rmind {
103 1.1 rmind
104 1.1 rmind pool_cache_destroy(tblent_cache);
105 1.1 rmind }
106 1.1 rmind
107 1.1 rmind npf_tableset_t *
108 1.1 rmind npf_tableset_create(void)
109 1.1 rmind {
110 1.1 rmind const size_t sz = NPF_TABLE_SLOTS * sizeof(npf_table_t *);
111 1.1 rmind
112 1.1 rmind return kmem_zalloc(sz, KM_SLEEP);
113 1.1 rmind }
114 1.1 rmind
115 1.1 rmind void
116 1.1 rmind npf_tableset_destroy(npf_tableset_t *tblset)
117 1.1 rmind {
118 1.1 rmind const size_t sz = NPF_TABLE_SLOTS * sizeof(npf_table_t *);
119 1.1 rmind npf_table_t *t;
120 1.1 rmind u_int tid;
121 1.1 rmind
122 1.1 rmind /*
123 1.1 rmind * Destroy all tables (no references should be held, as ruleset
124 1.1 rmind * should be destroyed before).
125 1.1 rmind */
126 1.1 rmind for (tid = 0; tid < NPF_TABLE_SLOTS; tid++) {
127 1.1 rmind t = tblset[tid];
128 1.1 rmind if (t != NULL) {
129 1.1 rmind npf_table_destroy(t);
130 1.1 rmind }
131 1.1 rmind }
132 1.1 rmind kmem_free(tblset, sz);
133 1.1 rmind }
134 1.1 rmind
135 1.1 rmind /*
136 1.1 rmind * npf_tableset_insert: insert the table into the specified tableset.
137 1.1 rmind *
138 1.1 rmind * => Returns 0 on success, fails and returns errno if ID is already used.
139 1.1 rmind */
140 1.1 rmind int
141 1.1 rmind npf_tableset_insert(npf_tableset_t *tblset, npf_table_t *t)
142 1.1 rmind {
143 1.1 rmind const u_int tid = t->t_id;
144 1.1 rmind int error;
145 1.1 rmind
146 1.1 rmind KASSERT((u_int)tid < NPF_TABLE_SLOTS);
147 1.1 rmind
148 1.1 rmind if (tblset[tid] == NULL) {
149 1.1 rmind tblset[tid] = t;
150 1.1 rmind error = 0;
151 1.1 rmind } else {
152 1.1 rmind error = EEXIST;
153 1.1 rmind }
154 1.1 rmind return error;
155 1.1 rmind }
156 1.1 rmind
157 1.1 rmind /*
158 1.1 rmind * Red-black tree storage.
159 1.1 rmind */
160 1.1 rmind
161 1.1 rmind static signed int
162 1.2 rmind table_rbtree_cmp_nodes(void *ctx, const void *n1, const void *n2)
163 1.1 rmind {
164 1.2 rmind const npf_tblent_t * const te1 = n1;
165 1.2 rmind const npf_tblent_t * const te2 = n2;
166 1.1 rmind const in_addr_t x = te1->te_addr & te1->te_mask;
167 1.1 rmind const in_addr_t y = te2->te_addr & te2->te_mask;
168 1.1 rmind
169 1.1 rmind if (x < y)
170 1.2 rmind return -1;
171 1.2 rmind if (x > y)
172 1.1 rmind return 1;
173 1.1 rmind return 0;
174 1.1 rmind }
175 1.1 rmind
176 1.1 rmind static signed int
177 1.2 rmind table_rbtree_cmp_key(void *ctx, const void *n1, const void *key)
178 1.1 rmind {
179 1.2 rmind const npf_tblent_t * const te = n1;
180 1.1 rmind const in_addr_t x = te->te_addr & te->te_mask;
181 1.1 rmind const in_addr_t y = *(const in_addr_t *)key;
182 1.1 rmind
183 1.1 rmind if (x < y)
184 1.2 rmind return -1;
185 1.2 rmind if (x > y)
186 1.1 rmind return 1;
187 1.1 rmind return 0;
188 1.1 rmind }
189 1.1 rmind
190 1.2 rmind static const rb_tree_ops_t table_rbtree_ops = {
191 1.1 rmind .rbto_compare_nodes = table_rbtree_cmp_nodes,
192 1.2 rmind .rbto_compare_key = table_rbtree_cmp_key,
193 1.2 rmind .rbto_node_offset = offsetof(npf_tblent_t, te_entry.rbnode),
194 1.2 rmind .rbto_context = NULL
195 1.1 rmind };
196 1.1 rmind
197 1.1 rmind /*
198 1.1 rmind * Hash helper routine.
199 1.1 rmind */
200 1.1 rmind
201 1.1 rmind static inline struct npf_hashl *
202 1.1 rmind table_hash_bucket(npf_table_t *t, void *buf, size_t sz)
203 1.1 rmind {
204 1.1 rmind const uint32_t hidx = hash32_buf(buf, sz, HASH32_BUF_INIT);
205 1.1 rmind
206 1.1 rmind return &t->t_hashl[hidx & t->t_hashmask];
207 1.1 rmind }
208 1.1 rmind
209 1.1 rmind /*
210 1.1 rmind * npf_table_create: create table with a specified ID.
211 1.1 rmind */
212 1.1 rmind npf_table_t *
213 1.1 rmind npf_table_create(u_int tid, int type, size_t hsize)
214 1.1 rmind {
215 1.1 rmind npf_table_t *t;
216 1.1 rmind
217 1.1 rmind KASSERT((u_int)tid < NPF_TABLE_SLOTS);
218 1.1 rmind
219 1.1 rmind t = kmem_zalloc(sizeof(npf_table_t), KM_SLEEP);
220 1.1 rmind switch (type) {
221 1.1 rmind case NPF_TABLE_RBTREE:
222 1.1 rmind rb_tree_init(&t->t_rbtree, &table_rbtree_ops);
223 1.1 rmind break;
224 1.1 rmind case NPF_TABLE_HASH:
225 1.1 rmind t->t_hashl = hashinit(hsize, HASH_LIST, true, &t->t_hashmask);
226 1.1 rmind if (t->t_hashl == NULL) {
227 1.1 rmind kmem_free(t, sizeof(npf_table_t));
228 1.1 rmind return NULL;
229 1.1 rmind }
230 1.1 rmind break;
231 1.1 rmind default:
232 1.1 rmind KASSERT(false);
233 1.1 rmind }
234 1.1 rmind rw_init(&t->t_lock);
235 1.1 rmind t->t_type = type;
236 1.1 rmind t->t_refcnt = 1;
237 1.1 rmind t->t_id = tid;
238 1.1 rmind return t;
239 1.1 rmind }
240 1.1 rmind
241 1.1 rmind /*
242 1.1 rmind * npf_table_destroy: free all table entries and table itself.
243 1.1 rmind */
244 1.1 rmind void
245 1.1 rmind npf_table_destroy(npf_table_t *t)
246 1.1 rmind {
247 1.1 rmind npf_tblent_t *e;
248 1.1 rmind u_int n;
249 1.1 rmind
250 1.1 rmind switch (t->t_type) {
251 1.1 rmind case NPF_TABLE_HASH:
252 1.1 rmind for (n = 0; n <= t->t_hashmask; n++) {
253 1.1 rmind while ((e = LIST_FIRST(&t->t_hashl[n])) != NULL) {
254 1.1 rmind LIST_REMOVE(e, te_entry.hashq);
255 1.1 rmind pool_cache_put(tblent_cache, e);
256 1.1 rmind }
257 1.1 rmind }
258 1.1 rmind hashdone(t->t_hashl, HASH_LIST, t->t_hashmask);
259 1.1 rmind break;
260 1.1 rmind case NPF_TABLE_RBTREE:
261 1.2 rmind while ((e = rb_tree_iterate(&t->t_rbtree, NULL,
262 1.2 rmind RB_DIR_LEFT)) != NULL) {
263 1.2 rmind rb_tree_remove_node(&t->t_rbtree, e);
264 1.1 rmind pool_cache_put(tblent_cache, e);
265 1.1 rmind }
266 1.1 rmind break;
267 1.1 rmind default:
268 1.1 rmind KASSERT(false);
269 1.1 rmind }
270 1.1 rmind rw_destroy(&t->t_lock);
271 1.1 rmind kmem_free(t, sizeof(npf_table_t));
272 1.1 rmind }
273 1.1 rmind
274 1.1 rmind /*
275 1.1 rmind * npf_table_ref: holds the reference on table.
276 1.1 rmind *
277 1.1 rmind * => Table must be locked.
278 1.1 rmind */
279 1.1 rmind void
280 1.1 rmind npf_table_ref(npf_table_t *t)
281 1.1 rmind {
282 1.1 rmind
283 1.1 rmind KASSERT(rw_lock_held(&t->t_lock));
284 1.1 rmind atomic_inc_uint(&t->t_refcnt);
285 1.1 rmind }
286 1.1 rmind
287 1.1 rmind /*
288 1.1 rmind * npf_table_unref: drop reference from the table and destroy the table if
289 1.1 rmind * it is the last reference.
290 1.1 rmind */
291 1.1 rmind void
292 1.1 rmind npf_table_unref(npf_table_t *t)
293 1.1 rmind {
294 1.1 rmind
295 1.1 rmind if (atomic_dec_uint_nv(&t->t_refcnt) != 0) {
296 1.1 rmind return;
297 1.1 rmind }
298 1.1 rmind npf_table_destroy(t);
299 1.1 rmind }
300 1.1 rmind
301 1.1 rmind /*
302 1.1 rmind * npf_table_get: find the table according to ID and "get it" by locking it.
303 1.1 rmind */
304 1.1 rmind npf_table_t *
305 1.1 rmind npf_table_get(npf_tableset_t *tset, u_int tid)
306 1.1 rmind {
307 1.4 rmind npf_tableset_t *rtset;
308 1.1 rmind npf_table_t *t;
309 1.1 rmind
310 1.1 rmind if ((u_int)tid >= NPF_TABLE_SLOTS) {
311 1.1 rmind return NULL;
312 1.1 rmind }
313 1.4 rmind if (tset == NULL) {
314 1.4 rmind npf_core_enter();
315 1.4 rmind rtset = npf_core_tableset();
316 1.4 rmind } else {
317 1.4 rmind rtset = tset;
318 1.1 rmind }
319 1.4 rmind t = rtset[tid];
320 1.1 rmind if (t != NULL) {
321 1.1 rmind rw_enter(&t->t_lock, RW_READER);
322 1.1 rmind }
323 1.4 rmind if (tset == NULL) {
324 1.4 rmind npf_core_exit();
325 1.4 rmind }
326 1.1 rmind return t;
327 1.1 rmind }
328 1.1 rmind
329 1.1 rmind /*
330 1.1 rmind * npf_table_put: "put table back" by unlocking it.
331 1.1 rmind */
332 1.1 rmind void
333 1.1 rmind npf_table_put(npf_table_t *t)
334 1.1 rmind {
335 1.1 rmind
336 1.1 rmind rw_exit(&t->t_lock);
337 1.1 rmind }
338 1.1 rmind
339 1.1 rmind /*
340 1.1 rmind * npf_table_check: validate ID and type.
341 1.1 rmind * */
342 1.1 rmind int
343 1.1 rmind npf_table_check(npf_tableset_t *tset, u_int tid, int type)
344 1.1 rmind {
345 1.1 rmind
346 1.1 rmind if ((u_int)tid >= NPF_TABLE_SLOTS) {
347 1.1 rmind return EINVAL;
348 1.1 rmind }
349 1.1 rmind if (tset[tid] != NULL) {
350 1.1 rmind return EEXIST;
351 1.1 rmind }
352 1.1 rmind if (type != NPF_TABLE_RBTREE && type != NPF_TABLE_HASH) {
353 1.1 rmind return EINVAL;
354 1.1 rmind }
355 1.1 rmind return 0;
356 1.1 rmind }
357 1.1 rmind
358 1.1 rmind /*
359 1.1 rmind * npf_table_add_v4cidr: add an IPv4 CIDR into the table.
360 1.1 rmind */
361 1.1 rmind int
362 1.1 rmind npf_table_add_v4cidr(npf_tableset_t *tset, u_int tid,
363 1.1 rmind in_addr_t addr, in_addr_t mask)
364 1.1 rmind {
365 1.1 rmind struct npf_hashl *htbl;
366 1.1 rmind npf_tblent_t *e, *it;
367 1.1 rmind npf_table_t *t;
368 1.1 rmind in_addr_t val;
369 1.1 rmind int error = 0;
370 1.1 rmind
371 1.1 rmind /* Allocate and setup entry. */
372 1.1 rmind e = pool_cache_get(tblent_cache, PR_WAITOK);
373 1.1 rmind e->te_addr = addr;
374 1.1 rmind e->te_mask = mask;
375 1.1 rmind
376 1.1 rmind /* Locks the table. */
377 1.1 rmind t = npf_table_get(tset, tid);
378 1.1 rmind if (__predict_false(t == NULL)) {
379 1.1 rmind pool_cache_put(tblent_cache, e);
380 1.1 rmind return EINVAL;
381 1.1 rmind }
382 1.1 rmind switch (t->t_type) {
383 1.1 rmind case NPF_TABLE_HASH:
384 1.1 rmind /* Generate hash value from: address & mask. */
385 1.1 rmind val = addr & mask;
386 1.1 rmind htbl = table_hash_bucket(t, &val, sizeof(in_addr_t));
387 1.1 rmind /* Lookup to check for duplicates. */
388 1.1 rmind LIST_FOREACH(it, htbl, te_entry.hashq) {
389 1.1 rmind if (it->te_addr == addr && it->te_mask == mask)
390 1.1 rmind break;
391 1.1 rmind }
392 1.1 rmind /* If no duplicate - insert entry. */
393 1.1 rmind if (__predict_true(it == NULL)) {
394 1.1 rmind LIST_INSERT_HEAD(htbl, e, te_entry.hashq);
395 1.1 rmind } else {
396 1.1 rmind error = EEXIST;
397 1.1 rmind }
398 1.1 rmind break;
399 1.1 rmind case NPF_TABLE_RBTREE:
400 1.1 rmind /* Insert entry. Returns false, if duplicate. */
401 1.2 rmind if (rb_tree_insert_node(&t->t_rbtree, e) != e) {
402 1.1 rmind error = EEXIST;
403 1.1 rmind }
404 1.1 rmind break;
405 1.1 rmind default:
406 1.1 rmind KASSERT(false);
407 1.1 rmind }
408 1.1 rmind npf_table_put(t);
409 1.1 rmind
410 1.1 rmind if (__predict_false(error)) {
411 1.1 rmind pool_cache_put(tblent_cache, e);
412 1.1 rmind }
413 1.1 rmind return error;
414 1.1 rmind }
415 1.1 rmind
416 1.1 rmind /*
417 1.1 rmind * npf_table_rem_v4cidr: remove an IPv4 CIDR from the table.
418 1.1 rmind */
419 1.1 rmind int
420 1.1 rmind npf_table_rem_v4cidr(npf_tableset_t *tset, u_int tid,
421 1.1 rmind in_addr_t addr, in_addr_t mask)
422 1.1 rmind {
423 1.1 rmind struct npf_hashl *htbl;
424 1.1 rmind npf_tblent_t *e;
425 1.1 rmind npf_table_t *t;
426 1.1 rmind in_addr_t val;
427 1.1 rmind int error;
428 1.1 rmind
429 1.1 rmind e = NULL;
430 1.1 rmind
431 1.1 rmind /* Locks the table. */
432 1.1 rmind t = npf_table_get(tset, tid);
433 1.1 rmind if (__predict_false(t == NULL)) {
434 1.1 rmind return EINVAL;
435 1.1 rmind }
436 1.1 rmind /* Lookup & remove. */
437 1.1 rmind switch (t->t_type) {
438 1.1 rmind case NPF_TABLE_HASH:
439 1.1 rmind /* Generate hash value from: (address & mask). */
440 1.1 rmind val = addr & mask;
441 1.1 rmind htbl = table_hash_bucket(t, &val, sizeof(in_addr_t));
442 1.1 rmind LIST_FOREACH(e, htbl, te_entry.hashq) {
443 1.1 rmind if (e->te_addr == addr && e->te_mask == mask)
444 1.1 rmind break;
445 1.1 rmind }
446 1.1 rmind if (__predict_true(e != NULL)) {
447 1.1 rmind LIST_REMOVE(e, te_entry.hashq);
448 1.1 rmind } else {
449 1.1 rmind error = ESRCH;
450 1.1 rmind }
451 1.1 rmind break;
452 1.1 rmind case NPF_TABLE_RBTREE:
453 1.1 rmind /* Key: (address & mask). */
454 1.1 rmind val = addr & mask;
455 1.2 rmind e = rb_tree_find_node(&t->t_rbtree, &val);
456 1.2 rmind if (__predict_true(e != NULL)) {
457 1.2 rmind rb_tree_remove_node(&t->t_rbtree, e);
458 1.1 rmind } else {
459 1.1 rmind error = ESRCH;
460 1.1 rmind }
461 1.1 rmind break;
462 1.1 rmind default:
463 1.1 rmind KASSERT(false);
464 1.1 rmind }
465 1.1 rmind npf_table_put(t);
466 1.1 rmind
467 1.1 rmind /* Free table the entry. */
468 1.1 rmind if (__predict_true(e != NULL)) {
469 1.1 rmind pool_cache_put(tblent_cache, e);
470 1.1 rmind }
471 1.1 rmind return e ? 0 : -1;
472 1.1 rmind }
473 1.1 rmind
474 1.1 rmind /*
475 1.1 rmind * npf_table_match_v4addr: find the table according to ID, lookup and
476 1.1 rmind * match the contents with specified IPv4 address.
477 1.1 rmind */
478 1.1 rmind int
479 1.1 rmind npf_table_match_v4addr(u_int tid, in_addr_t ip4addr)
480 1.1 rmind {
481 1.1 rmind struct npf_hashl *htbl;
482 1.1 rmind npf_tblent_t *e;
483 1.1 rmind npf_table_t *t;
484 1.1 rmind
485 1.1 rmind e = NULL;
486 1.1 rmind
487 1.1 rmind /* Locks the table. */
488 1.1 rmind t = npf_table_get(NULL, tid);
489 1.1 rmind if (__predict_false(t == NULL)) {
490 1.1 rmind return EINVAL;
491 1.1 rmind }
492 1.1 rmind switch (t->t_type) {
493 1.1 rmind case NPF_TABLE_HASH:
494 1.1 rmind htbl = table_hash_bucket(t, &ip4addr, sizeof(in_addr_t));
495 1.1 rmind LIST_FOREACH(e, htbl, te_entry.hashq) {
496 1.1 rmind if ((ip4addr & e->te_mask) == e->te_addr) {
497 1.1 rmind break;
498 1.1 rmind }
499 1.1 rmind }
500 1.1 rmind break;
501 1.1 rmind case NPF_TABLE_RBTREE:
502 1.2 rmind e = rb_tree_find_node(&t->t_rbtree, &ip4addr);
503 1.1 rmind KASSERT((ip4addr & e->te_mask) == e->te_addr);
504 1.1 rmind break;
505 1.1 rmind default:
506 1.1 rmind KASSERT(false);
507 1.1 rmind }
508 1.1 rmind npf_table_put(t);
509 1.1 rmind
510 1.1 rmind return e ? 0 : -1;
511 1.1 rmind }
512