Home | History | Annotate | Line # | Download | only in npf
npf_tableset.c revision 1.5.6.1
      1  1.5.6.1   yamt /*	$NetBSD: npf_tableset.c,v 1.5.6.1 2011/11/10 14:31:51 yamt Exp $	*/
      2      1.1  rmind 
      3      1.1  rmind /*-
      4      1.1  rmind  * Copyright (c) 2009-2010 The NetBSD Foundation, Inc.
      5      1.1  rmind  * All rights reserved.
      6      1.1  rmind  *
      7      1.1  rmind  * This material is based upon work partially supported by The
      8      1.1  rmind  * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
      9      1.1  rmind  *
     10      1.1  rmind  * Redistribution and use in source and binary forms, with or without
     11      1.1  rmind  * modification, are permitted provided that the following conditions
     12      1.1  rmind  * are met:
     13      1.1  rmind  * 1. Redistributions of source code must retain the above copyright
     14      1.1  rmind  *    notice, this list of conditions and the following disclaimer.
     15      1.1  rmind  * 2. Redistributions in binary form must reproduce the above copyright
     16      1.1  rmind  *    notice, this list of conditions and the following disclaimer in the
     17      1.1  rmind  *    documentation and/or other materials provided with the distribution.
     18      1.1  rmind  *
     19      1.1  rmind  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20      1.1  rmind  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21      1.1  rmind  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22      1.1  rmind  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23      1.1  rmind  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24      1.1  rmind  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25      1.1  rmind  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26      1.1  rmind  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27      1.1  rmind  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28      1.1  rmind  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29      1.1  rmind  * POSSIBILITY OF SUCH DAMAGE.
     30      1.1  rmind  */
     31      1.1  rmind 
     32      1.1  rmind /*
     33      1.4  rmind  * NPF tableset module.
     34      1.1  rmind  *
     35      1.1  rmind  * TODO:
     36      1.1  rmind  * - Currently, code is modeled to handle IPv4 CIDR blocks.
     37      1.1  rmind  * - Dynamic hash growing/shrinking (i.e. re-hash functionality), maybe?
     38      1.1  rmind  * - Dynamic array resize.
     39      1.1  rmind  */
     40      1.1  rmind 
     41      1.1  rmind #include <sys/cdefs.h>
     42  1.5.6.1   yamt __KERNEL_RCSID(0, "$NetBSD: npf_tableset.c,v 1.5.6.1 2011/11/10 14:31:51 yamt Exp $");
     43      1.1  rmind 
     44      1.1  rmind #include <sys/param.h>
     45      1.1  rmind #include <sys/kernel.h>
     46      1.1  rmind 
     47      1.1  rmind #include <sys/atomic.h>
     48      1.1  rmind #include <sys/hash.h>
     49      1.1  rmind #include <sys/kmem.h>
     50      1.1  rmind #include <sys/pool.h>
     51      1.1  rmind #include <sys/queue.h>
     52      1.1  rmind #include <sys/rwlock.h>
     53      1.1  rmind #include <sys/systm.h>
     54      1.1  rmind #include <sys/types.h>
     55      1.1  rmind 
     56      1.1  rmind #include "npf_impl.h"
     57      1.1  rmind 
     58      1.1  rmind /* Table entry structure. */
     59      1.1  rmind struct npf_tblent {
     60      1.2  rmind 	/* Hash/tree entry. */
     61      1.1  rmind 	union {
     62      1.1  rmind 		LIST_ENTRY(npf_tblent)	hashq;
     63      1.4  rmind 		rb_node_t		rbnode;
     64      1.1  rmind 	} te_entry;
     65      1.2  rmind 	/* IPv4 CIDR block. */
     66  1.5.6.1   yamt 	npf_addr_t			te_addr;
     67  1.5.6.1   yamt 	npf_netmask_t			te_mask;
     68      1.1  rmind };
     69      1.1  rmind 
     70      1.1  rmind LIST_HEAD(npf_hashl, npf_tblent);
     71      1.1  rmind 
     72      1.1  rmind /* Table structure. */
     73      1.1  rmind struct npf_table {
     74      1.1  rmind 	char				t_name[16];
     75      1.1  rmind 	/* Lock and reference count. */
     76      1.1  rmind 	krwlock_t			t_lock;
     77      1.1  rmind 	u_int				t_refcnt;
     78      1.1  rmind 	/* Table ID. */
     79      1.1  rmind 	u_int				t_id;
     80      1.1  rmind 	/* The storage type can be: 1. Hash 2. RB-tree. */
     81      1.5  rmind 	int				t_type;
     82      1.1  rmind 	struct npf_hashl *		t_hashl;
     83      1.1  rmind 	u_long				t_hashmask;
     84      1.2  rmind 	rb_tree_t			t_rbtree;
     85      1.1  rmind };
     86      1.1  rmind 
     87      1.4  rmind static pool_cache_t			tblent_cache	__read_mostly;
     88      1.1  rmind 
     89      1.1  rmind /*
     90      1.1  rmind  * npf_table_sysinit: initialise tableset structures.
     91      1.1  rmind  */
     92      1.4  rmind void
     93      1.1  rmind npf_tableset_sysinit(void)
     94      1.1  rmind {
     95      1.1  rmind 
     96      1.1  rmind 	tblent_cache = pool_cache_init(sizeof(npf_tblent_t), coherency_unit,
     97      1.1  rmind 	    0, 0, "npftenpl", NULL, IPL_NONE, NULL, NULL, NULL);
     98      1.1  rmind }
     99      1.1  rmind 
    100      1.1  rmind void
    101      1.1  rmind npf_tableset_sysfini(void)
    102      1.1  rmind {
    103      1.1  rmind 
    104      1.1  rmind 	pool_cache_destroy(tblent_cache);
    105      1.1  rmind }
    106      1.1  rmind 
    107      1.1  rmind npf_tableset_t *
    108      1.1  rmind npf_tableset_create(void)
    109      1.1  rmind {
    110      1.1  rmind 	const size_t sz = NPF_TABLE_SLOTS * sizeof(npf_table_t *);
    111      1.1  rmind 
    112      1.1  rmind 	return kmem_zalloc(sz, KM_SLEEP);
    113      1.1  rmind }
    114      1.1  rmind 
    115      1.1  rmind void
    116      1.1  rmind npf_tableset_destroy(npf_tableset_t *tblset)
    117      1.1  rmind {
    118      1.1  rmind 	const size_t sz = NPF_TABLE_SLOTS * sizeof(npf_table_t *);
    119      1.1  rmind 	npf_table_t *t;
    120      1.1  rmind 	u_int tid;
    121      1.1  rmind 
    122      1.1  rmind 	/*
    123      1.1  rmind 	 * Destroy all tables (no references should be held, as ruleset
    124      1.1  rmind 	 * should be destroyed before).
    125      1.1  rmind 	 */
    126      1.1  rmind 	for (tid = 0; tid < NPF_TABLE_SLOTS; tid++) {
    127      1.1  rmind 		t = tblset[tid];
    128      1.1  rmind 		if (t != NULL) {
    129      1.1  rmind 			npf_table_destroy(t);
    130      1.1  rmind 		}
    131      1.1  rmind 	}
    132      1.1  rmind 	kmem_free(tblset, sz);
    133      1.1  rmind }
    134      1.1  rmind 
    135      1.1  rmind /*
    136      1.1  rmind  * npf_tableset_insert: insert the table into the specified tableset.
    137      1.1  rmind  *
    138      1.1  rmind  * => Returns 0 on success, fails and returns errno if ID is already used.
    139      1.1  rmind  */
    140      1.1  rmind int
    141      1.1  rmind npf_tableset_insert(npf_tableset_t *tblset, npf_table_t *t)
    142      1.1  rmind {
    143      1.1  rmind 	const u_int tid = t->t_id;
    144      1.1  rmind 	int error;
    145      1.1  rmind 
    146      1.1  rmind 	KASSERT((u_int)tid < NPF_TABLE_SLOTS);
    147      1.1  rmind 
    148      1.1  rmind 	if (tblset[tid] == NULL) {
    149      1.1  rmind 		tblset[tid] = t;
    150      1.1  rmind 		error = 0;
    151      1.1  rmind 	} else {
    152      1.1  rmind 		error = EEXIST;
    153      1.1  rmind 	}
    154      1.1  rmind 	return error;
    155      1.1  rmind }
    156      1.1  rmind 
    157      1.1  rmind /*
    158      1.1  rmind  * Red-black tree storage.
    159      1.1  rmind  */
    160      1.1  rmind 
    161      1.1  rmind static signed int
    162      1.2  rmind table_rbtree_cmp_nodes(void *ctx, const void *n1, const void *n2)
    163      1.1  rmind {
    164      1.2  rmind 	const npf_tblent_t * const te1 = n1;
    165      1.2  rmind 	const npf_tblent_t * const te2 = n2;
    166      1.1  rmind 
    167  1.5.6.1   yamt 	return npf_compare_cidr(&te1->te_addr, te1->te_mask,
    168  1.5.6.1   yamt 	    &te2->te_addr, te2->te_mask);
    169      1.1  rmind }
    170      1.1  rmind 
    171      1.1  rmind static signed int
    172      1.2  rmind table_rbtree_cmp_key(void *ctx, const void *n1, const void *key)
    173      1.1  rmind {
    174      1.2  rmind 	const npf_tblent_t * const te = n1;
    175  1.5.6.1   yamt 	const npf_addr_t *t2 = key;
    176      1.1  rmind 
    177  1.5.6.1   yamt 	return npf_compare_cidr(&te->te_addr, te->te_mask, t2, NPF_NO_NETMASK);
    178      1.1  rmind }
    179      1.1  rmind 
    180      1.2  rmind static const rb_tree_ops_t table_rbtree_ops = {
    181      1.1  rmind 	.rbto_compare_nodes = table_rbtree_cmp_nodes,
    182      1.2  rmind 	.rbto_compare_key = table_rbtree_cmp_key,
    183      1.2  rmind 	.rbto_node_offset = offsetof(npf_tblent_t, te_entry.rbnode),
    184      1.2  rmind 	.rbto_context = NULL
    185      1.1  rmind };
    186      1.1  rmind 
    187      1.1  rmind /*
    188      1.1  rmind  * Hash helper routine.
    189      1.1  rmind  */
    190      1.1  rmind 
    191      1.1  rmind static inline struct npf_hashl *
    192  1.5.6.1   yamt table_hash_bucket(npf_table_t *t, const void *buf, size_t sz)
    193      1.1  rmind {
    194      1.1  rmind 	const uint32_t hidx = hash32_buf(buf, sz, HASH32_BUF_INIT);
    195      1.1  rmind 
    196      1.1  rmind 	return &t->t_hashl[hidx & t->t_hashmask];
    197      1.1  rmind }
    198      1.1  rmind 
    199      1.1  rmind /*
    200      1.1  rmind  * npf_table_create: create table with a specified ID.
    201      1.1  rmind  */
    202      1.1  rmind npf_table_t *
    203      1.1  rmind npf_table_create(u_int tid, int type, size_t hsize)
    204      1.1  rmind {
    205      1.1  rmind 	npf_table_t *t;
    206      1.1  rmind 
    207      1.1  rmind 	KASSERT((u_int)tid < NPF_TABLE_SLOTS);
    208      1.1  rmind 
    209      1.1  rmind 	t = kmem_zalloc(sizeof(npf_table_t), KM_SLEEP);
    210      1.1  rmind 	switch (type) {
    211      1.1  rmind 	case NPF_TABLE_RBTREE:
    212      1.1  rmind 		rb_tree_init(&t->t_rbtree, &table_rbtree_ops);
    213      1.1  rmind 		break;
    214      1.1  rmind 	case NPF_TABLE_HASH:
    215      1.1  rmind 		t->t_hashl = hashinit(hsize, HASH_LIST, true, &t->t_hashmask);
    216      1.1  rmind 		if (t->t_hashl == NULL) {
    217      1.1  rmind 			kmem_free(t, sizeof(npf_table_t));
    218      1.1  rmind 			return NULL;
    219      1.1  rmind 		}
    220      1.1  rmind 		break;
    221      1.1  rmind 	default:
    222      1.1  rmind 		KASSERT(false);
    223      1.1  rmind 	}
    224      1.1  rmind 	rw_init(&t->t_lock);
    225      1.1  rmind 	t->t_type = type;
    226      1.1  rmind 	t->t_refcnt = 1;
    227      1.1  rmind 	t->t_id = tid;
    228      1.1  rmind 	return t;
    229      1.1  rmind }
    230      1.1  rmind 
    231      1.1  rmind /*
    232      1.1  rmind  * npf_table_destroy: free all table entries and table itself.
    233      1.1  rmind  */
    234      1.1  rmind void
    235      1.1  rmind npf_table_destroy(npf_table_t *t)
    236      1.1  rmind {
    237      1.1  rmind 	npf_tblent_t *e;
    238      1.1  rmind 	u_int n;
    239      1.1  rmind 
    240      1.1  rmind 	switch (t->t_type) {
    241      1.1  rmind 	case NPF_TABLE_HASH:
    242      1.1  rmind 		for (n = 0; n <= t->t_hashmask; n++) {
    243      1.1  rmind 			while ((e = LIST_FIRST(&t->t_hashl[n])) != NULL) {
    244      1.1  rmind 				LIST_REMOVE(e, te_entry.hashq);
    245      1.1  rmind 				pool_cache_put(tblent_cache, e);
    246      1.1  rmind 			}
    247      1.1  rmind 		}
    248      1.1  rmind 		hashdone(t->t_hashl, HASH_LIST, t->t_hashmask);
    249      1.1  rmind 		break;
    250      1.1  rmind 	case NPF_TABLE_RBTREE:
    251      1.2  rmind 		while ((e = rb_tree_iterate(&t->t_rbtree, NULL,
    252      1.2  rmind 		    RB_DIR_LEFT)) != NULL) {
    253      1.2  rmind 			rb_tree_remove_node(&t->t_rbtree, e);
    254      1.1  rmind 			pool_cache_put(tblent_cache, e);
    255      1.1  rmind 		}
    256      1.1  rmind 		break;
    257      1.1  rmind 	default:
    258      1.1  rmind 		KASSERT(false);
    259      1.1  rmind 	}
    260      1.1  rmind 	rw_destroy(&t->t_lock);
    261      1.1  rmind 	kmem_free(t, sizeof(npf_table_t));
    262      1.1  rmind }
    263      1.1  rmind 
    264      1.1  rmind /*
    265      1.1  rmind  * npf_table_ref: holds the reference on table.
    266      1.1  rmind  *
    267      1.1  rmind  * => Table must be locked.
    268      1.1  rmind  */
    269      1.1  rmind void
    270      1.1  rmind npf_table_ref(npf_table_t *t)
    271      1.1  rmind {
    272      1.1  rmind 
    273      1.1  rmind 	KASSERT(rw_lock_held(&t->t_lock));
    274      1.1  rmind 	atomic_inc_uint(&t->t_refcnt);
    275      1.1  rmind }
    276      1.1  rmind 
    277      1.1  rmind /*
    278      1.1  rmind  * npf_table_unref: drop reference from the table and destroy the table if
    279      1.1  rmind  * it is the last reference.
    280      1.1  rmind  */
    281      1.1  rmind void
    282      1.1  rmind npf_table_unref(npf_table_t *t)
    283      1.1  rmind {
    284      1.1  rmind 
    285      1.1  rmind 	if (atomic_dec_uint_nv(&t->t_refcnt) != 0) {
    286      1.1  rmind 		return;
    287      1.1  rmind 	}
    288      1.1  rmind 	npf_table_destroy(t);
    289      1.1  rmind }
    290      1.1  rmind 
    291      1.1  rmind /*
    292      1.1  rmind  * npf_table_get: find the table according to ID and "get it" by locking it.
    293      1.1  rmind  */
    294      1.1  rmind npf_table_t *
    295      1.1  rmind npf_table_get(npf_tableset_t *tset, u_int tid)
    296      1.1  rmind {
    297      1.4  rmind 	npf_tableset_t *rtset;
    298      1.1  rmind 	npf_table_t *t;
    299      1.1  rmind 
    300      1.1  rmind 	if ((u_int)tid >= NPF_TABLE_SLOTS) {
    301      1.1  rmind 		return NULL;
    302      1.1  rmind 	}
    303      1.5  rmind 	rtset = tset ? tset : npf_core_tableset();
    304      1.4  rmind 	t = rtset[tid];
    305      1.1  rmind 	if (t != NULL) {
    306      1.1  rmind 		rw_enter(&t->t_lock, RW_READER);
    307      1.1  rmind 	}
    308      1.1  rmind 	return t;
    309      1.1  rmind }
    310      1.1  rmind 
    311      1.1  rmind /*
    312      1.1  rmind  * npf_table_put: "put table back" by unlocking it.
    313      1.1  rmind  */
    314      1.1  rmind void
    315      1.1  rmind npf_table_put(npf_table_t *t)
    316      1.1  rmind {
    317      1.1  rmind 
    318      1.1  rmind 	rw_exit(&t->t_lock);
    319      1.1  rmind }
    320      1.1  rmind 
    321      1.1  rmind /*
    322      1.1  rmind  * npf_table_check: validate ID and type.
    323      1.1  rmind  * */
    324      1.1  rmind int
    325      1.1  rmind npf_table_check(npf_tableset_t *tset, u_int tid, int type)
    326      1.1  rmind {
    327      1.1  rmind 
    328      1.1  rmind 	if ((u_int)tid >= NPF_TABLE_SLOTS) {
    329      1.1  rmind 		return EINVAL;
    330      1.1  rmind 	}
    331      1.1  rmind 	if (tset[tid] != NULL) {
    332      1.1  rmind 		return EEXIST;
    333      1.1  rmind 	}
    334      1.1  rmind 	if (type != NPF_TABLE_RBTREE && type != NPF_TABLE_HASH) {
    335      1.1  rmind 		return EINVAL;
    336      1.1  rmind 	}
    337      1.1  rmind 	return 0;
    338      1.1  rmind }
    339      1.1  rmind 
    340      1.1  rmind /*
    341  1.5.6.1   yamt  * npf_table_add_cidr: add an IPv4 or IPv6 CIDR into the table.
    342      1.1  rmind  */
    343      1.1  rmind int
    344  1.5.6.1   yamt npf_table_add_cidr(npf_tableset_t *tset, u_int tid,
    345  1.5.6.1   yamt     const npf_addr_t *addr, const npf_netmask_t mask)
    346      1.1  rmind {
    347      1.1  rmind 	struct npf_hashl *htbl;
    348      1.1  rmind 	npf_tblent_t *e, *it;
    349      1.1  rmind 	npf_table_t *t;
    350  1.5.6.1   yamt 	npf_addr_t val;
    351      1.1  rmind 	int error = 0;
    352      1.1  rmind 
    353      1.1  rmind 	/* Allocate and setup entry. */
    354      1.1  rmind 	e = pool_cache_get(tblent_cache, PR_WAITOK);
    355  1.5.6.1   yamt 	memcpy(&e->te_addr, addr, sizeof(npf_addr_t));
    356      1.1  rmind 	e->te_mask = mask;
    357      1.1  rmind 
    358      1.1  rmind 	/* Locks the table. */
    359      1.1  rmind 	t = npf_table_get(tset, tid);
    360      1.1  rmind 	if (__predict_false(t == NULL)) {
    361      1.1  rmind 		pool_cache_put(tblent_cache, e);
    362      1.1  rmind 		return EINVAL;
    363      1.1  rmind 	}
    364      1.1  rmind 	switch (t->t_type) {
    365      1.1  rmind 	case NPF_TABLE_HASH:
    366      1.1  rmind 		/* Generate hash value from: address & mask. */
    367  1.5.6.1   yamt 		npf_calculate_masked_addr(&val, addr, mask);
    368  1.5.6.1   yamt 		htbl = table_hash_bucket(t, &val, sizeof(npf_addr_t));
    369      1.1  rmind 		/* Lookup to check for duplicates. */
    370      1.1  rmind 		LIST_FOREACH(it, htbl, te_entry.hashq) {
    371  1.5.6.1   yamt 			if (it->te_mask != mask) {
    372  1.5.6.1   yamt 				continue;
    373  1.5.6.1   yamt 			}
    374  1.5.6.1   yamt 			if (!memcmp(&it->te_addr, addr, sizeof(npf_addr_t))) {
    375      1.1  rmind 				break;
    376  1.5.6.1   yamt 			}
    377      1.1  rmind 		}
    378      1.1  rmind 		/* If no duplicate - insert entry. */
    379      1.1  rmind 		if (__predict_true(it == NULL)) {
    380      1.1  rmind 			LIST_INSERT_HEAD(htbl, e, te_entry.hashq);
    381      1.1  rmind 		} else {
    382      1.1  rmind 			error = EEXIST;
    383      1.1  rmind 		}
    384      1.1  rmind 		break;
    385      1.1  rmind 	case NPF_TABLE_RBTREE:
    386      1.1  rmind 		/* Insert entry.  Returns false, if duplicate. */
    387      1.2  rmind 		if (rb_tree_insert_node(&t->t_rbtree, e) != e) {
    388      1.1  rmind 			error = EEXIST;
    389      1.1  rmind 		}
    390      1.1  rmind 		break;
    391      1.1  rmind 	default:
    392      1.1  rmind 		KASSERT(false);
    393      1.1  rmind 	}
    394      1.1  rmind 	npf_table_put(t);
    395      1.1  rmind 
    396      1.1  rmind 	if (__predict_false(error)) {
    397      1.1  rmind 		pool_cache_put(tblent_cache, e);
    398      1.1  rmind 	}
    399      1.1  rmind 	return error;
    400      1.1  rmind }
    401      1.1  rmind 
    402      1.1  rmind /*
    403      1.1  rmind  * npf_table_rem_v4cidr: remove an IPv4 CIDR from the table.
    404      1.1  rmind  */
    405      1.1  rmind int
    406  1.5.6.1   yamt npf_table_rem_cidr(npf_tableset_t *tset, u_int tid,
    407  1.5.6.1   yamt     const npf_addr_t *addr, const npf_netmask_t mask)
    408      1.1  rmind {
    409      1.1  rmind 	struct npf_hashl *htbl;
    410      1.1  rmind 	npf_tblent_t *e;
    411      1.1  rmind 	npf_table_t *t;
    412  1.5.6.1   yamt 	npf_addr_t val;
    413      1.1  rmind 	int error;
    414      1.1  rmind 
    415      1.1  rmind 	e = NULL;
    416      1.1  rmind 
    417      1.1  rmind 	/* Locks the table. */
    418      1.1  rmind 	t = npf_table_get(tset, tid);
    419      1.1  rmind 	if (__predict_false(t == NULL)) {
    420      1.1  rmind 		return EINVAL;
    421      1.1  rmind 	}
    422      1.1  rmind 	/* Lookup & remove. */
    423      1.1  rmind 	switch (t->t_type) {
    424      1.1  rmind 	case NPF_TABLE_HASH:
    425      1.1  rmind 		/* Generate hash value from: (address & mask). */
    426  1.5.6.1   yamt 		npf_calculate_masked_addr(&val, addr, mask);
    427  1.5.6.1   yamt 		htbl = table_hash_bucket(t, &val, sizeof(npf_addr_t));
    428      1.1  rmind 		LIST_FOREACH(e, htbl, te_entry.hashq) {
    429  1.5.6.1   yamt 			if (e->te_mask != mask) {
    430  1.5.6.1   yamt 				continue;
    431  1.5.6.1   yamt 			}
    432  1.5.6.1   yamt 			if (!memcmp(&e->te_addr, addr, sizeof(npf_addr_t))) {
    433      1.1  rmind 				break;
    434  1.5.6.1   yamt 			}
    435      1.1  rmind 		}
    436      1.1  rmind 		if (__predict_true(e != NULL)) {
    437      1.1  rmind 			LIST_REMOVE(e, te_entry.hashq);
    438      1.1  rmind 		} else {
    439      1.1  rmind 			error = ESRCH;
    440      1.1  rmind 		}
    441      1.1  rmind 		break;
    442      1.1  rmind 	case NPF_TABLE_RBTREE:
    443      1.1  rmind 		/* Key: (address & mask). */
    444  1.5.6.1   yamt 		npf_calculate_masked_addr(&val, addr, mask);
    445      1.2  rmind 		e = rb_tree_find_node(&t->t_rbtree, &val);
    446      1.2  rmind 		if (__predict_true(e != NULL)) {
    447      1.2  rmind 			rb_tree_remove_node(&t->t_rbtree, e);
    448      1.1  rmind 		} else {
    449      1.1  rmind 			error = ESRCH;
    450      1.1  rmind 		}
    451      1.1  rmind 		break;
    452      1.1  rmind 	default:
    453      1.1  rmind 		KASSERT(false);
    454      1.1  rmind 	}
    455      1.1  rmind 	npf_table_put(t);
    456      1.1  rmind 
    457      1.1  rmind 	/* Free table the entry. */
    458      1.1  rmind 	if (__predict_true(e != NULL)) {
    459      1.1  rmind 		pool_cache_put(tblent_cache, e);
    460      1.1  rmind 	}
    461      1.1  rmind 	return e ? 0 : -1;
    462      1.1  rmind }
    463      1.1  rmind 
    464      1.1  rmind /*
    465  1.5.6.1   yamt  * npf_table_match_addr: find the table according to ID, lookup and
    466      1.1  rmind  * match the contents with specified IPv4 address.
    467      1.1  rmind  */
    468      1.1  rmind int
    469  1.5.6.1   yamt npf_table_match_addr(u_int tid, const npf_addr_t *addr)
    470      1.1  rmind {
    471      1.1  rmind 	struct npf_hashl *htbl;
    472      1.5  rmind 	npf_tblent_t *e = NULL;
    473      1.1  rmind 	npf_table_t *t;
    474      1.1  rmind 
    475      1.1  rmind 	/* Locks the table. */
    476      1.1  rmind 	t = npf_table_get(NULL, tid);
    477      1.1  rmind 	if (__predict_false(t == NULL)) {
    478      1.1  rmind 		return EINVAL;
    479      1.1  rmind 	}
    480      1.1  rmind 	switch (t->t_type) {
    481      1.1  rmind 	case NPF_TABLE_HASH:
    482  1.5.6.1   yamt 		htbl = table_hash_bucket(t, addr, sizeof(npf_addr_t));
    483      1.1  rmind 		LIST_FOREACH(e, htbl, te_entry.hashq) {
    484  1.5.6.1   yamt 			if (npf_compare_cidr(addr, e->te_mask, &e->te_addr,
    485  1.5.6.1   yamt 			    NPF_NO_NETMASK) == 0)
    486      1.1  rmind 				break;
    487      1.1  rmind 		}
    488      1.1  rmind 		break;
    489      1.1  rmind 	case NPF_TABLE_RBTREE:
    490  1.5.6.1   yamt 		e = rb_tree_find_node(&t->t_rbtree, addr);
    491  1.5.6.1   yamt 		KASSERT(e && npf_compare_cidr(addr, e->te_mask, &e->te_addr,
    492  1.5.6.1   yamt 		    NPF_NO_NETMASK) == 0);
    493      1.1  rmind 		break;
    494      1.1  rmind 	default:
    495      1.1  rmind 		KASSERT(false);
    496      1.1  rmind 	}
    497      1.1  rmind 	npf_table_put(t);
    498      1.1  rmind 
    499      1.1  rmind 	return e ? 0 : -1;
    500      1.1  rmind }
    501