Home | History | Annotate | Line # | Download | only in npf
npf_tableset.c revision 1.1
      1  1.1  rmind /*	$NetBSD: npf_tableset.c,v 1.1 2010/08/22 18:56:23 rmind Exp $	*/
      2  1.1  rmind 
      3  1.1  rmind /*-
      4  1.1  rmind  * Copyright (c) 2009-2010 The NetBSD Foundation, Inc.
      5  1.1  rmind  * All rights reserved.
      6  1.1  rmind  *
      7  1.1  rmind  * This material is based upon work partially supported by The
      8  1.1  rmind  * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
      9  1.1  rmind  *
     10  1.1  rmind  * Redistribution and use in source and binary forms, with or without
     11  1.1  rmind  * modification, are permitted provided that the following conditions
     12  1.1  rmind  * are met:
     13  1.1  rmind  * 1. Redistributions of source code must retain the above copyright
     14  1.1  rmind  *    notice, this list of conditions and the following disclaimer.
     15  1.1  rmind  * 2. Redistributions in binary form must reproduce the above copyright
     16  1.1  rmind  *    notice, this list of conditions and the following disclaimer in the
     17  1.1  rmind  *    documentation and/or other materials provided with the distribution.
     18  1.1  rmind  *
     19  1.1  rmind  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  1.1  rmind  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  1.1  rmind  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  1.1  rmind  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  1.1  rmind  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  1.1  rmind  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  1.1  rmind  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  1.1  rmind  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  1.1  rmind  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  1.1  rmind  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  1.1  rmind  * POSSIBILITY OF SUCH DAMAGE.
     30  1.1  rmind  */
     31  1.1  rmind 
     32  1.1  rmind /*
     33  1.1  rmind  * NPF table module.
     34  1.1  rmind  *
     35  1.1  rmind  *	table_lock ->
     36  1.1  rmind  *		npf_table_t::t_lock
     37  1.1  rmind  *
     38  1.1  rmind  * TODO:
     39  1.1  rmind  * - Currently, code is modeled to handle IPv4 CIDR blocks.
     40  1.1  rmind  * - Dynamic hash growing/shrinking (i.e. re-hash functionality), maybe?
     41  1.1  rmind  * - Dynamic array resize.
     42  1.1  rmind  */
     43  1.1  rmind 
     44  1.1  rmind #ifdef _KERNEL
     45  1.1  rmind #include <sys/cdefs.h>
     46  1.1  rmind __KERNEL_RCSID(0, "$NetBSD: npf_tableset.c,v 1.1 2010/08/22 18:56:23 rmind Exp $");
     47  1.1  rmind #endif
     48  1.1  rmind 
     49  1.1  rmind #include <sys/param.h>
     50  1.1  rmind #include <sys/kernel.h>
     51  1.1  rmind 
     52  1.1  rmind #include <sys/atomic.h>
     53  1.1  rmind #include <sys/hash.h>
     54  1.1  rmind #include <sys/kmem.h>
     55  1.1  rmind #include <sys/pool.h>
     56  1.1  rmind #include <sys/queue.h>
     57  1.1  rmind #include <sys/rwlock.h>
     58  1.1  rmind #include <sys/systm.h>
     59  1.1  rmind #include <sys/types.h>
     60  1.1  rmind 
     61  1.1  rmind #include "npf_impl.h"
     62  1.1  rmind 
     63  1.1  rmind /* Table entry structure. */
     64  1.1  rmind struct npf_tblent {
     65  1.1  rmind 	/* IPv4 CIDR block. */
     66  1.1  rmind 	in_addr_t			te_addr;
     67  1.1  rmind 	in_addr_t			te_mask;
     68  1.1  rmind 	union {
     69  1.1  rmind 		LIST_ENTRY(npf_tblent)	hashq;
     70  1.1  rmind 		struct rb_node		rbnode;
     71  1.1  rmind 	} te_entry;
     72  1.1  rmind };
     73  1.1  rmind 
     74  1.1  rmind /* Return pointer to npf_tblent_t from RB-tree node. (XXX fix rb-tree) */
     75  1.1  rmind #define	NPF_RBN2TBLENT(n)		\
     76  1.1  rmind     (npf_tblent_t *)((uintptr_t)n - offsetof(npf_tblent_t, te_entry.rbnode))
     77  1.1  rmind 
     78  1.1  rmind LIST_HEAD(npf_hashl, npf_tblent);
     79  1.1  rmind 
     80  1.1  rmind /* Table structure. */
     81  1.1  rmind struct npf_table {
     82  1.1  rmind 	char				t_name[16];
     83  1.1  rmind 	/* Lock and reference count. */
     84  1.1  rmind 	krwlock_t			t_lock;
     85  1.1  rmind 	u_int				t_refcnt;
     86  1.1  rmind 	/* Table ID. */
     87  1.1  rmind 	u_int				t_id;
     88  1.1  rmind 	/* The storage type can be: 1. Hash 2. RB-tree. */
     89  1.1  rmind 	u_int				t_type;
     90  1.1  rmind 	struct npf_hashl *		t_hashl;
     91  1.1  rmind 	u_long				t_hashmask;
     92  1.1  rmind 	struct rb_tree			t_rbtree;
     93  1.1  rmind };
     94  1.1  rmind 
     95  1.1  rmind /* Global table array and its lock. */
     96  1.1  rmind static npf_tableset_t *		table_array;
     97  1.1  rmind static krwlock_t		table_lock;
     98  1.1  rmind static pool_cache_t		tblent_cache;
     99  1.1  rmind 
    100  1.1  rmind /*
    101  1.1  rmind  * npf_table_sysinit: initialise tableset structures.
    102  1.1  rmind  */
    103  1.1  rmind int
    104  1.1  rmind npf_tableset_sysinit(void)
    105  1.1  rmind {
    106  1.1  rmind 
    107  1.1  rmind 	tblent_cache = pool_cache_init(sizeof(npf_tblent_t), coherency_unit,
    108  1.1  rmind 	    0, 0, "npftenpl", NULL, IPL_NONE, NULL, NULL, NULL);
    109  1.1  rmind 	if (tblent_cache == NULL) {
    110  1.1  rmind 		return ENOMEM;
    111  1.1  rmind 	}
    112  1.1  rmind 	table_array = npf_tableset_create();
    113  1.1  rmind 	if (table_array == NULL) {
    114  1.1  rmind 		pool_cache_destroy(tblent_cache);
    115  1.1  rmind 		return ENOMEM;
    116  1.1  rmind 	}
    117  1.1  rmind 	rw_init(&table_lock);
    118  1.1  rmind 	return 0;
    119  1.1  rmind }
    120  1.1  rmind 
    121  1.1  rmind void
    122  1.1  rmind npf_tableset_sysfini(void)
    123  1.1  rmind {
    124  1.1  rmind 
    125  1.1  rmind 	npf_tableset_destroy(table_array);
    126  1.1  rmind 	pool_cache_destroy(tblent_cache);
    127  1.1  rmind 	rw_destroy(&table_lock);
    128  1.1  rmind }
    129  1.1  rmind 
    130  1.1  rmind npf_tableset_t *
    131  1.1  rmind npf_tableset_create(void)
    132  1.1  rmind {
    133  1.1  rmind 	const size_t sz = NPF_TABLE_SLOTS * sizeof(npf_table_t *);
    134  1.1  rmind 
    135  1.1  rmind 	return kmem_zalloc(sz, KM_SLEEP);
    136  1.1  rmind }
    137  1.1  rmind 
    138  1.1  rmind void
    139  1.1  rmind npf_tableset_destroy(npf_tableset_t *tblset)
    140  1.1  rmind {
    141  1.1  rmind 	const size_t sz = NPF_TABLE_SLOTS * sizeof(npf_table_t *);
    142  1.1  rmind 	npf_table_t *t;
    143  1.1  rmind 	u_int tid;
    144  1.1  rmind 
    145  1.1  rmind 	/*
    146  1.1  rmind 	 * Destroy all tables (no references should be held, as ruleset
    147  1.1  rmind 	 * should be destroyed before).
    148  1.1  rmind 	 */
    149  1.1  rmind 	for (tid = 0; tid < NPF_TABLE_SLOTS; tid++) {
    150  1.1  rmind 		t = tblset[tid];
    151  1.1  rmind 		if (t != NULL) {
    152  1.1  rmind 			npf_table_destroy(t);
    153  1.1  rmind 		}
    154  1.1  rmind 	}
    155  1.1  rmind 	kmem_free(tblset, sz);
    156  1.1  rmind }
    157  1.1  rmind 
    158  1.1  rmind /*
    159  1.1  rmind  * npf_tableset_insert: insert the table into the specified tableset.
    160  1.1  rmind  *
    161  1.1  rmind  * => Returns 0 on success, fails and returns errno if ID is already used.
    162  1.1  rmind  */
    163  1.1  rmind int
    164  1.1  rmind npf_tableset_insert(npf_tableset_t *tblset, npf_table_t *t)
    165  1.1  rmind {
    166  1.1  rmind 	const u_int tid = t->t_id;
    167  1.1  rmind 	int error;
    168  1.1  rmind 
    169  1.1  rmind 	KASSERT((u_int)tid < NPF_TABLE_SLOTS);
    170  1.1  rmind 
    171  1.1  rmind 	if (tblset[tid] == NULL) {
    172  1.1  rmind 		tblset[tid] = t;
    173  1.1  rmind 		error = 0;
    174  1.1  rmind 	} else {
    175  1.1  rmind 		error = EEXIST;
    176  1.1  rmind 	}
    177  1.1  rmind 	return error;
    178  1.1  rmind }
    179  1.1  rmind 
    180  1.1  rmind /*
    181  1.1  rmind  * npf_tableset_reload: replace old tableset array with a new one.
    182  1.1  rmind  *
    183  1.1  rmind  * => Called from npf_ruleset_reload() with a global ruleset lock held.
    184  1.1  rmind  * => Returns pointer to the old tableset, caller will destroy it.
    185  1.1  rmind  */
    186  1.1  rmind npf_tableset_t *
    187  1.1  rmind npf_tableset_reload(npf_tableset_t *tblset)
    188  1.1  rmind {
    189  1.1  rmind 	npf_tableset_t *oldtblset;
    190  1.1  rmind 
    191  1.1  rmind 	rw_enter(&table_lock, RW_WRITER);
    192  1.1  rmind 	oldtblset = table_array;
    193  1.1  rmind 	table_array = tblset;
    194  1.1  rmind 	rw_exit(&table_lock);
    195  1.1  rmind 
    196  1.1  rmind 	return oldtblset;
    197  1.1  rmind }
    198  1.1  rmind 
    199  1.1  rmind /*
    200  1.1  rmind  * Red-black tree storage.
    201  1.1  rmind  */
    202  1.1  rmind 
    203  1.1  rmind static signed int
    204  1.1  rmind table_rbtree_cmp_nodes(const struct rb_node *n1, const struct rb_node *n2)
    205  1.1  rmind {
    206  1.1  rmind 	const npf_tblent_t *te1 = NPF_RBN2TBLENT(n1);
    207  1.1  rmind 	const npf_tblent_t *te2 = NPF_RBN2TBLENT(n2);
    208  1.1  rmind 	const in_addr_t x = te1->te_addr & te1->te_mask;
    209  1.1  rmind 	const in_addr_t y = te2->te_addr & te2->te_mask;
    210  1.1  rmind 
    211  1.1  rmind 	if (x < y)
    212  1.1  rmind 		return 1;
    213  1.1  rmind 	if (x > y)
    214  1.1  rmind 		return -1;
    215  1.1  rmind 	return 0;
    216  1.1  rmind }
    217  1.1  rmind 
    218  1.1  rmind static signed int
    219  1.1  rmind table_rbtree_cmp_key(const struct rb_node *n1, const void *key)
    220  1.1  rmind {
    221  1.1  rmind 	const npf_tblent_t *te = NPF_RBN2TBLENT(n1);
    222  1.1  rmind 	const in_addr_t x = te->te_addr & te->te_mask;
    223  1.1  rmind 	const in_addr_t y = *(const in_addr_t *)key;
    224  1.1  rmind 
    225  1.1  rmind 	if (x < y)
    226  1.1  rmind 		return 1;
    227  1.1  rmind 	if (x > y)
    228  1.1  rmind 		return -1;
    229  1.1  rmind 	return 0;
    230  1.1  rmind }
    231  1.1  rmind 
    232  1.1  rmind static const struct rb_tree_ops table_rbtree_ops = {
    233  1.1  rmind 	.rbto_compare_nodes = table_rbtree_cmp_nodes,
    234  1.1  rmind 	.rbto_compare_key = table_rbtree_cmp_key
    235  1.1  rmind };
    236  1.1  rmind 
    237  1.1  rmind /*
    238  1.1  rmind  * Hash helper routine.
    239  1.1  rmind  */
    240  1.1  rmind 
    241  1.1  rmind static inline struct npf_hashl *
    242  1.1  rmind table_hash_bucket(npf_table_t *t, void *buf, size_t sz)
    243  1.1  rmind {
    244  1.1  rmind 	const uint32_t hidx = hash32_buf(buf, sz, HASH32_BUF_INIT);
    245  1.1  rmind 
    246  1.1  rmind 	return &t->t_hashl[hidx & t->t_hashmask];
    247  1.1  rmind }
    248  1.1  rmind 
    249  1.1  rmind /*
    250  1.1  rmind  * npf_table_create: create table with a specified ID.
    251  1.1  rmind  */
    252  1.1  rmind npf_table_t *
    253  1.1  rmind npf_table_create(u_int tid, int type, size_t hsize)
    254  1.1  rmind {
    255  1.1  rmind 	npf_table_t *t;
    256  1.1  rmind 
    257  1.1  rmind 	KASSERT((u_int)tid < NPF_TABLE_SLOTS);
    258  1.1  rmind 
    259  1.1  rmind 	t = kmem_zalloc(sizeof(npf_table_t), KM_SLEEP);
    260  1.1  rmind 	switch (type) {
    261  1.1  rmind 	case NPF_TABLE_RBTREE:
    262  1.1  rmind 		rb_tree_init(&t->t_rbtree, &table_rbtree_ops);
    263  1.1  rmind 		break;
    264  1.1  rmind 	case NPF_TABLE_HASH:
    265  1.1  rmind 		t->t_hashl = hashinit(hsize, HASH_LIST, true, &t->t_hashmask);
    266  1.1  rmind 		if (t->t_hashl == NULL) {
    267  1.1  rmind 			kmem_free(t, sizeof(npf_table_t));
    268  1.1  rmind 			return NULL;
    269  1.1  rmind 		}
    270  1.1  rmind 		break;
    271  1.1  rmind 	default:
    272  1.1  rmind 		KASSERT(false);
    273  1.1  rmind 	}
    274  1.1  rmind 	rw_init(&t->t_lock);
    275  1.1  rmind 	t->t_type = type;
    276  1.1  rmind 	t->t_refcnt = 1;
    277  1.1  rmind 	t->t_id = tid;
    278  1.1  rmind 	return t;
    279  1.1  rmind }
    280  1.1  rmind 
    281  1.1  rmind /*
    282  1.1  rmind  * npf_table_destroy: free all table entries and table itself.
    283  1.1  rmind  */
    284  1.1  rmind void
    285  1.1  rmind npf_table_destroy(npf_table_t *t)
    286  1.1  rmind {
    287  1.1  rmind 	npf_tblent_t *e;
    288  1.1  rmind 	struct rb_node *nd;
    289  1.1  rmind 	u_int n;
    290  1.1  rmind 
    291  1.1  rmind 	switch (t->t_type) {
    292  1.1  rmind 	case NPF_TABLE_HASH:
    293  1.1  rmind 		for (n = 0; n <= t->t_hashmask; n++) {
    294  1.1  rmind 			while ((e = LIST_FIRST(&t->t_hashl[n])) != NULL) {
    295  1.1  rmind 				LIST_REMOVE(e, te_entry.hashq);
    296  1.1  rmind 				pool_cache_put(tblent_cache, e);
    297  1.1  rmind 			}
    298  1.1  rmind 		}
    299  1.1  rmind 		hashdone(t->t_hashl, HASH_LIST, t->t_hashmask);
    300  1.1  rmind 		break;
    301  1.1  rmind 	case NPF_TABLE_RBTREE:
    302  1.1  rmind 		while ((nd = rb_tree_iterate(&t->t_rbtree, NULL,
    303  1.1  rmind 		    RB_DIR_RIGHT)) != NULL) {
    304  1.1  rmind 			e = NPF_RBN2TBLENT(nd);
    305  1.1  rmind 			rb_tree_remove_node(&t->t_rbtree, &e->te_entry.rbnode);
    306  1.1  rmind 			pool_cache_put(tblent_cache, e);
    307  1.1  rmind 		}
    308  1.1  rmind 		break;
    309  1.1  rmind 	default:
    310  1.1  rmind 		KASSERT(false);
    311  1.1  rmind 	}
    312  1.1  rmind 	rw_destroy(&t->t_lock);
    313  1.1  rmind 	kmem_free(t, sizeof(npf_table_t));
    314  1.1  rmind }
    315  1.1  rmind 
    316  1.1  rmind /*
    317  1.1  rmind  * npf_table_ref: holds the reference on table.
    318  1.1  rmind  *
    319  1.1  rmind  * => Table must be locked.
    320  1.1  rmind  */
    321  1.1  rmind void
    322  1.1  rmind npf_table_ref(npf_table_t *t)
    323  1.1  rmind {
    324  1.1  rmind 
    325  1.1  rmind 	KASSERT(rw_lock_held(&t->t_lock));
    326  1.1  rmind 	atomic_inc_uint(&t->t_refcnt);
    327  1.1  rmind }
    328  1.1  rmind 
    329  1.1  rmind /*
    330  1.1  rmind  * npf_table_unref: drop reference from the table and destroy the table if
    331  1.1  rmind  * it is the last reference.
    332  1.1  rmind  */
    333  1.1  rmind void
    334  1.1  rmind npf_table_unref(npf_table_t *t)
    335  1.1  rmind {
    336  1.1  rmind 
    337  1.1  rmind 	if (atomic_dec_uint_nv(&t->t_refcnt) != 0) {
    338  1.1  rmind 		return;
    339  1.1  rmind 	}
    340  1.1  rmind 	npf_table_destroy(t);
    341  1.1  rmind }
    342  1.1  rmind 
    343  1.1  rmind /*
    344  1.1  rmind  * npf_table_get: find the table according to ID and "get it" by locking it.
    345  1.1  rmind  */
    346  1.1  rmind npf_table_t *
    347  1.1  rmind npf_table_get(npf_tableset_t *tset, u_int tid)
    348  1.1  rmind {
    349  1.1  rmind 	npf_table_t *t;
    350  1.1  rmind 
    351  1.1  rmind 	if ((u_int)tid >= NPF_TABLE_SLOTS) {
    352  1.1  rmind 		return NULL;
    353  1.1  rmind 	}
    354  1.1  rmind 	if (tset) {
    355  1.1  rmind 		t = tset[tid];
    356  1.1  rmind 		if (t != NULL) {
    357  1.1  rmind 			rw_enter(&t->t_lock, RW_READER);
    358  1.1  rmind 		}
    359  1.1  rmind 		return t;
    360  1.1  rmind 	}
    361  1.1  rmind 	rw_enter(&table_lock, RW_READER);
    362  1.1  rmind 	t = table_array[tid];
    363  1.1  rmind 	if (t != NULL) {
    364  1.1  rmind 		rw_enter(&t->t_lock, RW_READER);
    365  1.1  rmind 	}
    366  1.1  rmind 	rw_exit(&table_lock);
    367  1.1  rmind 	return t;
    368  1.1  rmind }
    369  1.1  rmind 
    370  1.1  rmind /*
    371  1.1  rmind  * npf_table_put: "put table back" by unlocking it.
    372  1.1  rmind  */
    373  1.1  rmind void
    374  1.1  rmind npf_table_put(npf_table_t *t)
    375  1.1  rmind {
    376  1.1  rmind 
    377  1.1  rmind 	rw_exit(&t->t_lock);
    378  1.1  rmind }
    379  1.1  rmind 
    380  1.1  rmind /*
    381  1.1  rmind  * npf_table_check: validate ID and type.
    382  1.1  rmind  * */
    383  1.1  rmind int
    384  1.1  rmind npf_table_check(npf_tableset_t *tset, u_int tid, int type)
    385  1.1  rmind {
    386  1.1  rmind 
    387  1.1  rmind 	if ((u_int)tid >= NPF_TABLE_SLOTS) {
    388  1.1  rmind 		return EINVAL;
    389  1.1  rmind 	}
    390  1.1  rmind 	if (tset[tid] != NULL) {
    391  1.1  rmind 		return EEXIST;
    392  1.1  rmind 	}
    393  1.1  rmind 	if (type != NPF_TABLE_RBTREE && type != NPF_TABLE_HASH) {
    394  1.1  rmind 		return EINVAL;
    395  1.1  rmind 	}
    396  1.1  rmind 	return 0;
    397  1.1  rmind }
    398  1.1  rmind 
    399  1.1  rmind /*
    400  1.1  rmind  * npf_table_add_v4cidr: add an IPv4 CIDR into the table.
    401  1.1  rmind  */
    402  1.1  rmind int
    403  1.1  rmind npf_table_add_v4cidr(npf_tableset_t *tset, u_int tid,
    404  1.1  rmind     in_addr_t addr, in_addr_t mask)
    405  1.1  rmind {
    406  1.1  rmind 	struct npf_hashl *htbl;
    407  1.1  rmind 	npf_tblent_t *e, *it;
    408  1.1  rmind 	npf_table_t *t;
    409  1.1  rmind 	in_addr_t val;
    410  1.1  rmind 	int error = 0;
    411  1.1  rmind 
    412  1.1  rmind 	/* Allocate and setup entry. */
    413  1.1  rmind 	e = pool_cache_get(tblent_cache, PR_WAITOK);
    414  1.1  rmind 	if (e == NULL) {
    415  1.1  rmind 		return ENOMEM;
    416  1.1  rmind 	}
    417  1.1  rmind 	e->te_addr = addr;
    418  1.1  rmind 	e->te_mask = mask;
    419  1.1  rmind 
    420  1.1  rmind 	/* Locks the table. */
    421  1.1  rmind 	t = npf_table_get(tset, tid);
    422  1.1  rmind 	if (__predict_false(t == NULL)) {
    423  1.1  rmind 		pool_cache_put(tblent_cache, e);
    424  1.1  rmind 		return EINVAL;
    425  1.1  rmind 	}
    426  1.1  rmind 	switch (t->t_type) {
    427  1.1  rmind 	case NPF_TABLE_HASH:
    428  1.1  rmind 		/* Generate hash value from: address & mask. */
    429  1.1  rmind 		val = addr & mask;
    430  1.1  rmind 		htbl = table_hash_bucket(t, &val, sizeof(in_addr_t));
    431  1.1  rmind 		/* Lookup to check for duplicates. */
    432  1.1  rmind 		LIST_FOREACH(it, htbl, te_entry.hashq) {
    433  1.1  rmind 			if (it->te_addr == addr && it->te_mask == mask)
    434  1.1  rmind 				break;
    435  1.1  rmind 		}
    436  1.1  rmind 		/* If no duplicate - insert entry. */
    437  1.1  rmind 		if (__predict_true(it == NULL)) {
    438  1.1  rmind 			LIST_INSERT_HEAD(htbl, e, te_entry.hashq);
    439  1.1  rmind 		} else {
    440  1.1  rmind 			error = EEXIST;
    441  1.1  rmind 		}
    442  1.1  rmind 		break;
    443  1.1  rmind 	case NPF_TABLE_RBTREE:
    444  1.1  rmind 		/* Insert entry.  Returns false, if duplicate. */
    445  1.1  rmind 		if (!rb_tree_insert_node(&t->t_rbtree, &e->te_entry.rbnode)) {
    446  1.1  rmind 			error = EEXIST;
    447  1.1  rmind 		}
    448  1.1  rmind 		break;
    449  1.1  rmind 	default:
    450  1.1  rmind 		KASSERT(false);
    451  1.1  rmind 	}
    452  1.1  rmind 	npf_table_put(t);
    453  1.1  rmind 
    454  1.1  rmind 	if (__predict_false(error)) {
    455  1.1  rmind 		pool_cache_put(tblent_cache, e);
    456  1.1  rmind 	}
    457  1.1  rmind 	return error;
    458  1.1  rmind }
    459  1.1  rmind 
    460  1.1  rmind /*
    461  1.1  rmind  * npf_table_rem_v4cidr: remove an IPv4 CIDR from the table.
    462  1.1  rmind  */
    463  1.1  rmind int
    464  1.1  rmind npf_table_rem_v4cidr(npf_tableset_t *tset, u_int tid,
    465  1.1  rmind     in_addr_t addr, in_addr_t mask)
    466  1.1  rmind {
    467  1.1  rmind 	struct npf_hashl *htbl;
    468  1.1  rmind 	struct rb_node *nd;
    469  1.1  rmind 	npf_tblent_t *e;
    470  1.1  rmind 	npf_table_t *t;
    471  1.1  rmind 	in_addr_t val;
    472  1.1  rmind 	int error;
    473  1.1  rmind 
    474  1.1  rmind 	e = NULL;
    475  1.1  rmind 
    476  1.1  rmind 	/* Locks the table. */
    477  1.1  rmind 	t = npf_table_get(tset, tid);
    478  1.1  rmind 	if (__predict_false(t == NULL)) {
    479  1.1  rmind 		return EINVAL;
    480  1.1  rmind 	}
    481  1.1  rmind 	/* Lookup & remove. */
    482  1.1  rmind 	switch (t->t_type) {
    483  1.1  rmind 	case NPF_TABLE_HASH:
    484  1.1  rmind 		/* Generate hash value from: (address & mask). */
    485  1.1  rmind 		val = addr & mask;
    486  1.1  rmind 		htbl = table_hash_bucket(t, &val, sizeof(in_addr_t));
    487  1.1  rmind 		LIST_FOREACH(e, htbl, te_entry.hashq) {
    488  1.1  rmind 			if (e->te_addr == addr && e->te_mask == mask)
    489  1.1  rmind 				break;
    490  1.1  rmind 		}
    491  1.1  rmind 		if (__predict_true(e != NULL)) {
    492  1.1  rmind 			LIST_REMOVE(e, te_entry.hashq);
    493  1.1  rmind 		} else {
    494  1.1  rmind 			error = ESRCH;
    495  1.1  rmind 		}
    496  1.1  rmind 		break;
    497  1.1  rmind 	case NPF_TABLE_RBTREE:
    498  1.1  rmind 		/* Key: (address & mask). */
    499  1.1  rmind 		val = addr & mask;
    500  1.1  rmind 		nd = rb_tree_find_node(&t->t_rbtree, &val);
    501  1.1  rmind 		if (__predict_true(nd != NULL)) {
    502  1.1  rmind 			e = NPF_RBN2TBLENT(nd);
    503  1.1  rmind 			rb_tree_remove_node(&t->t_rbtree, &e->te_entry.rbnode);
    504  1.1  rmind 		} else {
    505  1.1  rmind 			error = ESRCH;
    506  1.1  rmind 		}
    507  1.1  rmind 		break;
    508  1.1  rmind 	default:
    509  1.1  rmind 		KASSERT(false);
    510  1.1  rmind 	}
    511  1.1  rmind 	npf_table_put(t);
    512  1.1  rmind 
    513  1.1  rmind 	/* Free table the entry. */
    514  1.1  rmind 	if (__predict_true(e != NULL)) {
    515  1.1  rmind 		pool_cache_put(tblent_cache, e);
    516  1.1  rmind 	}
    517  1.1  rmind 	return e ? 0 : -1;
    518  1.1  rmind }
    519  1.1  rmind 
    520  1.1  rmind /*
    521  1.1  rmind  * npf_table_match_v4addr: find the table according to ID, lookup and
    522  1.1  rmind  * match the contents with specified IPv4 address.
    523  1.1  rmind  */
    524  1.1  rmind int
    525  1.1  rmind npf_table_match_v4addr(u_int tid, in_addr_t ip4addr)
    526  1.1  rmind {
    527  1.1  rmind 	struct npf_hashl *htbl;
    528  1.1  rmind 	struct rb_node *nd;
    529  1.1  rmind 	npf_tblent_t *e;
    530  1.1  rmind 	npf_table_t *t;
    531  1.1  rmind 
    532  1.1  rmind 	e = NULL;
    533  1.1  rmind 
    534  1.1  rmind 	/* Locks the table. */
    535  1.1  rmind 	t = npf_table_get(NULL, tid);
    536  1.1  rmind 	if (__predict_false(t == NULL)) {
    537  1.1  rmind 		return EINVAL;
    538  1.1  rmind 	}
    539  1.1  rmind 	switch (t->t_type) {
    540  1.1  rmind 	case NPF_TABLE_HASH:
    541  1.1  rmind 		htbl = table_hash_bucket(t, &ip4addr, sizeof(in_addr_t));
    542  1.1  rmind 		LIST_FOREACH(e, htbl, te_entry.hashq) {
    543  1.1  rmind 			if ((ip4addr & e->te_mask) == e->te_addr) {
    544  1.1  rmind 				break;
    545  1.1  rmind 			}
    546  1.1  rmind 		}
    547  1.1  rmind 		break;
    548  1.1  rmind 	case NPF_TABLE_RBTREE:
    549  1.1  rmind 		nd = rb_tree_find_node(&t->t_rbtree, &ip4addr);
    550  1.1  rmind 		e = NPF_RBN2TBLENT(nd);
    551  1.1  rmind 		KASSERT((ip4addr & e->te_mask) == e->te_addr);
    552  1.1  rmind 		break;
    553  1.1  rmind 	default:
    554  1.1  rmind 		KASSERT(false);
    555  1.1  rmind 	}
    556  1.1  rmind 	npf_table_put(t);
    557  1.1  rmind 
    558  1.1  rmind 	return e ? 0 : -1;
    559  1.1  rmind }
    560