Home | History | Annotate | Line # | Download | only in npf
npf_tableset.c revision 1.12
      1  1.12   rmind /*	$NetBSD: npf_tableset.c,v 1.12 2012/07/01 23:21:06 rmind Exp $	*/
      2   1.1   rmind 
      3   1.1   rmind /*-
      4   1.9   rmind  * Copyright (c) 2009-2012 The NetBSD Foundation, Inc.
      5   1.1   rmind  * All rights reserved.
      6   1.1   rmind  *
      7   1.1   rmind  * This material is based upon work partially supported by The
      8   1.1   rmind  * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
      9   1.1   rmind  *
     10   1.1   rmind  * Redistribution and use in source and binary forms, with or without
     11   1.1   rmind  * modification, are permitted provided that the following conditions
     12   1.1   rmind  * are met:
     13   1.1   rmind  * 1. Redistributions of source code must retain the above copyright
     14   1.1   rmind  *    notice, this list of conditions and the following disclaimer.
     15   1.1   rmind  * 2. Redistributions in binary form must reproduce the above copyright
     16   1.1   rmind  *    notice, this list of conditions and the following disclaimer in the
     17   1.1   rmind  *    documentation and/or other materials provided with the distribution.
     18   1.1   rmind  *
     19   1.1   rmind  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20   1.1   rmind  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21   1.1   rmind  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22   1.1   rmind  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23   1.1   rmind  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24   1.1   rmind  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25   1.1   rmind  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26   1.1   rmind  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27   1.1   rmind  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28   1.1   rmind  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29   1.1   rmind  * POSSIBILITY OF SUCH DAMAGE.
     30   1.1   rmind  */
     31   1.1   rmind 
     32   1.1   rmind /*
     33   1.4   rmind  * NPF tableset module.
     34   1.1   rmind  *
     35   1.1   rmind  * TODO:
     36  1.12   rmind  * - Convert to Patricia tree.
     37   1.1   rmind  * - Dynamic hash growing/shrinking (i.e. re-hash functionality), maybe?
     38   1.1   rmind  * - Dynamic array resize.
     39   1.1   rmind  */
     40   1.1   rmind 
     41   1.1   rmind #include <sys/cdefs.h>
     42  1.12   rmind __KERNEL_RCSID(0, "$NetBSD: npf_tableset.c,v 1.12 2012/07/01 23:21:06 rmind Exp $");
     43   1.1   rmind 
     44   1.1   rmind #include <sys/param.h>
     45  1.10   rmind #include <sys/types.h>
     46   1.1   rmind 
     47   1.1   rmind #include <sys/atomic.h>
     48   1.1   rmind #include <sys/hash.h>
     49   1.1   rmind #include <sys/kmem.h>
     50   1.1   rmind #include <sys/pool.h>
     51   1.1   rmind #include <sys/queue.h>
     52   1.1   rmind #include <sys/rwlock.h>
     53   1.1   rmind #include <sys/systm.h>
     54   1.1   rmind #include <sys/types.h>
     55   1.1   rmind 
     56   1.1   rmind #include "npf_impl.h"
     57   1.1   rmind 
     58   1.1   rmind /* Table entry structure. */
     59   1.1   rmind struct npf_tblent {
     60   1.2   rmind 	/* Hash/tree entry. */
     61   1.1   rmind 	union {
     62   1.1   rmind 		LIST_ENTRY(npf_tblent)	hashq;
     63   1.4   rmind 		rb_node_t		rbnode;
     64   1.1   rmind 	} te_entry;
     65  1.12   rmind 	/* CIDR block. */
     66   1.6  zoltan 	npf_addr_t			te_addr;
     67   1.6  zoltan 	npf_netmask_t			te_mask;
     68   1.1   rmind };
     69   1.1   rmind 
     70   1.1   rmind LIST_HEAD(npf_hashl, npf_tblent);
     71   1.1   rmind 
     72   1.1   rmind /* Table structure. */
     73   1.1   rmind struct npf_table {
     74   1.1   rmind 	char				t_name[16];
     75   1.1   rmind 	/* Lock and reference count. */
     76   1.1   rmind 	krwlock_t			t_lock;
     77   1.1   rmind 	u_int				t_refcnt;
     78   1.1   rmind 	/* Table ID. */
     79   1.1   rmind 	u_int				t_id;
     80   1.1   rmind 	/* The storage type can be: 1. Hash 2. RB-tree. */
     81   1.5   rmind 	int				t_type;
     82   1.1   rmind 	struct npf_hashl *		t_hashl;
     83   1.1   rmind 	u_long				t_hashmask;
     84   1.2   rmind 	rb_tree_t			t_rbtree;
     85   1.1   rmind };
     86   1.1   rmind 
     87   1.4   rmind static pool_cache_t			tblent_cache	__read_mostly;
     88   1.1   rmind 
     89   1.1   rmind /*
     90   1.1   rmind  * npf_table_sysinit: initialise tableset structures.
     91   1.1   rmind  */
     92   1.4   rmind void
     93   1.1   rmind npf_tableset_sysinit(void)
     94   1.1   rmind {
     95   1.1   rmind 
     96   1.1   rmind 	tblent_cache = pool_cache_init(sizeof(npf_tblent_t), coherency_unit,
     97   1.1   rmind 	    0, 0, "npftenpl", NULL, IPL_NONE, NULL, NULL, NULL);
     98   1.1   rmind }
     99   1.1   rmind 
    100   1.1   rmind void
    101   1.1   rmind npf_tableset_sysfini(void)
    102   1.1   rmind {
    103   1.1   rmind 
    104   1.1   rmind 	pool_cache_destroy(tblent_cache);
    105   1.1   rmind }
    106   1.1   rmind 
    107   1.1   rmind npf_tableset_t *
    108   1.1   rmind npf_tableset_create(void)
    109   1.1   rmind {
    110   1.1   rmind 	const size_t sz = NPF_TABLE_SLOTS * sizeof(npf_table_t *);
    111   1.1   rmind 
    112   1.1   rmind 	return kmem_zalloc(sz, KM_SLEEP);
    113   1.1   rmind }
    114   1.1   rmind 
    115   1.1   rmind void
    116   1.1   rmind npf_tableset_destroy(npf_tableset_t *tblset)
    117   1.1   rmind {
    118   1.1   rmind 	const size_t sz = NPF_TABLE_SLOTS * sizeof(npf_table_t *);
    119   1.1   rmind 	npf_table_t *t;
    120   1.1   rmind 	u_int tid;
    121   1.1   rmind 
    122   1.1   rmind 	/*
    123   1.1   rmind 	 * Destroy all tables (no references should be held, as ruleset
    124   1.1   rmind 	 * should be destroyed before).
    125   1.1   rmind 	 */
    126   1.1   rmind 	for (tid = 0; tid < NPF_TABLE_SLOTS; tid++) {
    127   1.1   rmind 		t = tblset[tid];
    128   1.1   rmind 		if (t != NULL) {
    129   1.1   rmind 			npf_table_destroy(t);
    130   1.1   rmind 		}
    131   1.1   rmind 	}
    132   1.1   rmind 	kmem_free(tblset, sz);
    133   1.1   rmind }
    134   1.1   rmind 
    135   1.1   rmind /*
    136   1.1   rmind  * npf_tableset_insert: insert the table into the specified tableset.
    137   1.1   rmind  *
    138   1.1   rmind  * => Returns 0 on success, fails and returns errno if ID is already used.
    139   1.1   rmind  */
    140   1.1   rmind int
    141   1.1   rmind npf_tableset_insert(npf_tableset_t *tblset, npf_table_t *t)
    142   1.1   rmind {
    143   1.1   rmind 	const u_int tid = t->t_id;
    144   1.1   rmind 	int error;
    145   1.1   rmind 
    146   1.1   rmind 	KASSERT((u_int)tid < NPF_TABLE_SLOTS);
    147   1.1   rmind 
    148   1.1   rmind 	if (tblset[tid] == NULL) {
    149   1.1   rmind 		tblset[tid] = t;
    150   1.1   rmind 		error = 0;
    151   1.1   rmind 	} else {
    152   1.1   rmind 		error = EEXIST;
    153   1.1   rmind 	}
    154   1.1   rmind 	return error;
    155   1.1   rmind }
    156   1.1   rmind 
    157   1.1   rmind /*
    158   1.1   rmind  * Red-black tree storage.
    159   1.1   rmind  */
    160   1.1   rmind 
    161   1.1   rmind static signed int
    162   1.2   rmind table_rbtree_cmp_nodes(void *ctx, const void *n1, const void *n2)
    163   1.1   rmind {
    164   1.2   rmind 	const npf_tblent_t * const te1 = n1;
    165   1.2   rmind 	const npf_tblent_t * const te2 = n2;
    166   1.1   rmind 
    167  1.11   rmind 	return npf_addr_cmp(&te1->te_addr, te1->te_mask,
    168  1.12   rmind 	    &te2->te_addr, te2->te_mask, sizeof(npf_addr_t));
    169   1.1   rmind }
    170   1.1   rmind 
    171   1.1   rmind static signed int
    172   1.2   rmind table_rbtree_cmp_key(void *ctx, const void *n1, const void *key)
    173   1.1   rmind {
    174   1.2   rmind 	const npf_tblent_t * const te = n1;
    175   1.6  zoltan 	const npf_addr_t *t2 = key;
    176   1.1   rmind 
    177  1.12   rmind 	return npf_addr_cmp(&te->te_addr, te->te_mask,
    178  1.12   rmind 	    t2, NPF_NO_NETMASK, sizeof(npf_addr_t));
    179   1.1   rmind }
    180   1.1   rmind 
    181   1.2   rmind static const rb_tree_ops_t table_rbtree_ops = {
    182   1.1   rmind 	.rbto_compare_nodes = table_rbtree_cmp_nodes,
    183   1.2   rmind 	.rbto_compare_key = table_rbtree_cmp_key,
    184   1.2   rmind 	.rbto_node_offset = offsetof(npf_tblent_t, te_entry.rbnode),
    185   1.2   rmind 	.rbto_context = NULL
    186   1.1   rmind };
    187   1.1   rmind 
    188   1.1   rmind /*
    189   1.1   rmind  * Hash helper routine.
    190   1.1   rmind  */
    191   1.1   rmind 
    192   1.1   rmind static inline struct npf_hashl *
    193   1.6  zoltan table_hash_bucket(npf_table_t *t, const void *buf, size_t sz)
    194   1.1   rmind {
    195   1.1   rmind 	const uint32_t hidx = hash32_buf(buf, sz, HASH32_BUF_INIT);
    196   1.1   rmind 
    197   1.1   rmind 	return &t->t_hashl[hidx & t->t_hashmask];
    198   1.1   rmind }
    199   1.1   rmind 
    200   1.1   rmind /*
    201   1.1   rmind  * npf_table_create: create table with a specified ID.
    202   1.1   rmind  */
    203   1.1   rmind npf_table_t *
    204   1.1   rmind npf_table_create(u_int tid, int type, size_t hsize)
    205   1.1   rmind {
    206   1.1   rmind 	npf_table_t *t;
    207   1.1   rmind 
    208   1.1   rmind 	KASSERT((u_int)tid < NPF_TABLE_SLOTS);
    209   1.1   rmind 
    210   1.1   rmind 	t = kmem_zalloc(sizeof(npf_table_t), KM_SLEEP);
    211   1.1   rmind 	switch (type) {
    212   1.9   rmind 	case NPF_TABLE_TREE:
    213   1.1   rmind 		rb_tree_init(&t->t_rbtree, &table_rbtree_ops);
    214   1.1   rmind 		break;
    215   1.1   rmind 	case NPF_TABLE_HASH:
    216   1.1   rmind 		t->t_hashl = hashinit(hsize, HASH_LIST, true, &t->t_hashmask);
    217   1.1   rmind 		if (t->t_hashl == NULL) {
    218   1.1   rmind 			kmem_free(t, sizeof(npf_table_t));
    219   1.1   rmind 			return NULL;
    220   1.1   rmind 		}
    221   1.1   rmind 		break;
    222   1.1   rmind 	default:
    223   1.1   rmind 		KASSERT(false);
    224   1.1   rmind 	}
    225   1.1   rmind 	rw_init(&t->t_lock);
    226   1.1   rmind 	t->t_type = type;
    227   1.1   rmind 	t->t_refcnt = 1;
    228   1.1   rmind 	t->t_id = tid;
    229   1.1   rmind 	return t;
    230   1.1   rmind }
    231   1.1   rmind 
    232   1.1   rmind /*
    233   1.1   rmind  * npf_table_destroy: free all table entries and table itself.
    234   1.1   rmind  */
    235   1.1   rmind void
    236   1.1   rmind npf_table_destroy(npf_table_t *t)
    237   1.1   rmind {
    238   1.1   rmind 	npf_tblent_t *e;
    239   1.1   rmind 	u_int n;
    240   1.1   rmind 
    241   1.1   rmind 	switch (t->t_type) {
    242   1.1   rmind 	case NPF_TABLE_HASH:
    243   1.1   rmind 		for (n = 0; n <= t->t_hashmask; n++) {
    244   1.1   rmind 			while ((e = LIST_FIRST(&t->t_hashl[n])) != NULL) {
    245   1.1   rmind 				LIST_REMOVE(e, te_entry.hashq);
    246   1.1   rmind 				pool_cache_put(tblent_cache, e);
    247   1.1   rmind 			}
    248   1.1   rmind 		}
    249   1.1   rmind 		hashdone(t->t_hashl, HASH_LIST, t->t_hashmask);
    250   1.1   rmind 		break;
    251   1.9   rmind 	case NPF_TABLE_TREE:
    252   1.2   rmind 		while ((e = rb_tree_iterate(&t->t_rbtree, NULL,
    253   1.2   rmind 		    RB_DIR_LEFT)) != NULL) {
    254   1.2   rmind 			rb_tree_remove_node(&t->t_rbtree, e);
    255   1.1   rmind 			pool_cache_put(tblent_cache, e);
    256   1.1   rmind 		}
    257   1.1   rmind 		break;
    258   1.1   rmind 	default:
    259   1.1   rmind 		KASSERT(false);
    260   1.1   rmind 	}
    261   1.1   rmind 	rw_destroy(&t->t_lock);
    262   1.1   rmind 	kmem_free(t, sizeof(npf_table_t));
    263   1.1   rmind }
    264   1.1   rmind 
    265   1.1   rmind /*
    266   1.1   rmind  * npf_table_ref: holds the reference on table.
    267   1.1   rmind  *
    268   1.1   rmind  * => Table must be locked.
    269   1.1   rmind  */
    270   1.1   rmind void
    271   1.1   rmind npf_table_ref(npf_table_t *t)
    272   1.1   rmind {
    273   1.1   rmind 
    274   1.1   rmind 	KASSERT(rw_lock_held(&t->t_lock));
    275   1.1   rmind 	atomic_inc_uint(&t->t_refcnt);
    276   1.1   rmind }
    277   1.1   rmind 
    278   1.1   rmind /*
    279   1.1   rmind  * npf_table_unref: drop reference from the table and destroy the table if
    280   1.1   rmind  * it is the last reference.
    281   1.1   rmind  */
    282   1.1   rmind void
    283   1.1   rmind npf_table_unref(npf_table_t *t)
    284   1.1   rmind {
    285   1.1   rmind 
    286   1.1   rmind 	if (atomic_dec_uint_nv(&t->t_refcnt) != 0) {
    287   1.1   rmind 		return;
    288   1.1   rmind 	}
    289   1.1   rmind 	npf_table_destroy(t);
    290   1.1   rmind }
    291   1.1   rmind 
    292   1.1   rmind /*
    293   1.1   rmind  * npf_table_get: find the table according to ID and "get it" by locking it.
    294   1.1   rmind  */
    295   1.1   rmind npf_table_t *
    296   1.1   rmind npf_table_get(npf_tableset_t *tset, u_int tid)
    297   1.1   rmind {
    298   1.1   rmind 	npf_table_t *t;
    299   1.1   rmind 
    300   1.8   rmind 	KASSERT(tset != NULL);
    301   1.8   rmind 
    302   1.1   rmind 	if ((u_int)tid >= NPF_TABLE_SLOTS) {
    303   1.1   rmind 		return NULL;
    304   1.1   rmind 	}
    305   1.8   rmind 	t = tset[tid];
    306   1.1   rmind 	if (t != NULL) {
    307   1.1   rmind 		rw_enter(&t->t_lock, RW_READER);
    308   1.1   rmind 	}
    309   1.1   rmind 	return t;
    310   1.1   rmind }
    311   1.1   rmind 
    312   1.1   rmind /*
    313   1.1   rmind  * npf_table_put: "put table back" by unlocking it.
    314   1.1   rmind  */
    315   1.1   rmind void
    316   1.1   rmind npf_table_put(npf_table_t *t)
    317   1.1   rmind {
    318   1.1   rmind 
    319   1.1   rmind 	rw_exit(&t->t_lock);
    320   1.1   rmind }
    321   1.1   rmind 
    322   1.1   rmind /*
    323   1.1   rmind  * npf_table_check: validate ID and type.
    324   1.1   rmind  * */
    325   1.1   rmind int
    326   1.1   rmind npf_table_check(npf_tableset_t *tset, u_int tid, int type)
    327   1.1   rmind {
    328   1.1   rmind 
    329   1.1   rmind 	if ((u_int)tid >= NPF_TABLE_SLOTS) {
    330   1.1   rmind 		return EINVAL;
    331   1.1   rmind 	}
    332   1.1   rmind 	if (tset[tid] != NULL) {
    333   1.1   rmind 		return EEXIST;
    334   1.1   rmind 	}
    335   1.9   rmind 	if (type != NPF_TABLE_TREE && type != NPF_TABLE_HASH) {
    336   1.1   rmind 		return EINVAL;
    337   1.1   rmind 	}
    338   1.1   rmind 	return 0;
    339   1.1   rmind }
    340   1.1   rmind 
    341   1.1   rmind /*
    342  1.12   rmind  * npf_table_add_cidr: add an IP CIDR into the table.
    343   1.1   rmind  */
    344   1.1   rmind int
    345   1.6  zoltan npf_table_add_cidr(npf_tableset_t *tset, u_int tid,
    346   1.6  zoltan     const npf_addr_t *addr, const npf_netmask_t mask)
    347   1.1   rmind {
    348   1.1   rmind 	struct npf_hashl *htbl;
    349  1.12   rmind 	npf_tblent_t *ent, *it;
    350   1.1   rmind 	npf_table_t *t;
    351   1.6  zoltan 	npf_addr_t val;
    352   1.1   rmind 	int error = 0;
    353   1.1   rmind 
    354   1.8   rmind 	if (mask > NPF_MAX_NETMASK) {
    355   1.8   rmind 		return EINVAL;
    356   1.8   rmind 	}
    357  1.12   rmind 	ent = pool_cache_get(tblent_cache, PR_WAITOK);
    358  1.12   rmind 	memcpy(&ent->te_addr, addr, sizeof(npf_addr_t));
    359  1.12   rmind 	ent->te_mask = mask;
    360   1.1   rmind 
    361   1.8   rmind 	/* Get the table (acquire the lock). */
    362   1.1   rmind 	t = npf_table_get(tset, tid);
    363   1.8   rmind 	if (t == NULL) {
    364  1.12   rmind 		pool_cache_put(tblent_cache, ent);
    365   1.1   rmind 		return EINVAL;
    366   1.1   rmind 	}
    367  1.12   rmind 
    368   1.1   rmind 	switch (t->t_type) {
    369   1.1   rmind 	case NPF_TABLE_HASH:
    370   1.1   rmind 		/* Generate hash value from: address & mask. */
    371  1.12   rmind 		npf_addr_mask(addr, mask, sizeof(npf_addr_t), &val);
    372   1.6  zoltan 		htbl = table_hash_bucket(t, &val, sizeof(npf_addr_t));
    373  1.12   rmind 
    374   1.1   rmind 		/* Lookup to check for duplicates. */
    375   1.1   rmind 		LIST_FOREACH(it, htbl, te_entry.hashq) {
    376   1.7   rmind 			if (it->te_mask != mask) {
    377   1.7   rmind 				continue;
    378   1.7   rmind 			}
    379   1.7   rmind 			if (!memcmp(&it->te_addr, addr, sizeof(npf_addr_t))) {
    380   1.7   rmind 				break;
    381   1.6  zoltan 			}
    382   1.1   rmind 		}
    383  1.12   rmind 
    384   1.1   rmind 		/* If no duplicate - insert entry. */
    385   1.1   rmind 		if (__predict_true(it == NULL)) {
    386  1.12   rmind 			LIST_INSERT_HEAD(htbl, ent, te_entry.hashq);
    387   1.1   rmind 		} else {
    388   1.1   rmind 			error = EEXIST;
    389   1.1   rmind 		}
    390   1.1   rmind 		break;
    391   1.9   rmind 	case NPF_TABLE_TREE:
    392   1.1   rmind 		/* Insert entry.  Returns false, if duplicate. */
    393  1.12   rmind 		if (rb_tree_insert_node(&t->t_rbtree, ent) != ent) {
    394   1.1   rmind 			error = EEXIST;
    395   1.1   rmind 		}
    396   1.1   rmind 		break;
    397   1.1   rmind 	default:
    398   1.1   rmind 		KASSERT(false);
    399   1.1   rmind 	}
    400   1.1   rmind 	npf_table_put(t);
    401   1.1   rmind 
    402   1.8   rmind 	if (error) {
    403  1.12   rmind 		pool_cache_put(tblent_cache, ent);
    404   1.1   rmind 	}
    405   1.1   rmind 	return error;
    406   1.1   rmind }
    407   1.1   rmind 
    408   1.1   rmind /*
    409  1.12   rmind  * npf_table_rem_cidr: remove an IP CIDR from the table.
    410   1.1   rmind  */
    411   1.1   rmind int
    412   1.6  zoltan npf_table_rem_cidr(npf_tableset_t *tset, u_int tid,
    413   1.6  zoltan     const npf_addr_t *addr, const npf_netmask_t mask)
    414   1.1   rmind {
    415   1.1   rmind 	struct npf_hashl *htbl;
    416  1.12   rmind 	npf_tblent_t *ent;
    417   1.1   rmind 	npf_table_t *t;
    418   1.6  zoltan 	npf_addr_t val;
    419   1.1   rmind 
    420   1.8   rmind 	if (mask > NPF_MAX_NETMASK) {
    421   1.8   rmind 		return EINVAL;
    422   1.8   rmind 	}
    423   1.1   rmind 
    424   1.8   rmind 	/* Get the table (acquire the lock). */
    425   1.1   rmind 	t = npf_table_get(tset, tid);
    426   1.1   rmind 	if (__predict_false(t == NULL)) {
    427   1.1   rmind 		return EINVAL;
    428   1.1   rmind 	}
    429  1.12   rmind 
    430  1.12   rmind 	/* Key: (address & mask). */
    431  1.12   rmind 	npf_addr_mask(addr, mask, sizeof(npf_addr_t), &val);
    432  1.12   rmind 	ent = NULL;
    433   1.8   rmind 
    434   1.1   rmind 	switch (t->t_type) {
    435   1.1   rmind 	case NPF_TABLE_HASH:
    436   1.1   rmind 		/* Generate hash value from: (address & mask). */
    437   1.6  zoltan 		htbl = table_hash_bucket(t, &val, sizeof(npf_addr_t));
    438  1.12   rmind 		LIST_FOREACH(ent, htbl, te_entry.hashq) {
    439  1.12   rmind 			if (ent->te_mask != mask) {
    440   1.7   rmind 				continue;
    441   1.7   rmind 			}
    442  1.12   rmind 			if (!memcmp(&ent->te_addr, addr, sizeof(npf_addr_t))) {
    443   1.7   rmind 				break;
    444   1.6  zoltan 			}
    445   1.1   rmind 		}
    446  1.12   rmind 		if (__predict_true(ent != NULL)) {
    447  1.12   rmind 			LIST_REMOVE(ent, te_entry.hashq);
    448   1.1   rmind 		}
    449   1.1   rmind 		break;
    450   1.9   rmind 	case NPF_TABLE_TREE:
    451  1.12   rmind 		ent = rb_tree_find_node(&t->t_rbtree, &val);
    452  1.12   rmind 		if (__predict_true(ent != NULL)) {
    453  1.12   rmind 			rb_tree_remove_node(&t->t_rbtree, ent);
    454   1.1   rmind 		}
    455   1.1   rmind 		break;
    456   1.1   rmind 	default:
    457   1.1   rmind 		KASSERT(false);
    458   1.1   rmind 	}
    459   1.1   rmind 	npf_table_put(t);
    460   1.1   rmind 
    461  1.12   rmind 	if (ent == NULL) {
    462   1.8   rmind 		return ENOENT;
    463   1.1   rmind 	}
    464  1.12   rmind 	pool_cache_put(tblent_cache, ent);
    465   1.8   rmind 	return 0;
    466   1.1   rmind }
    467   1.1   rmind 
    468   1.1   rmind /*
    469   1.6  zoltan  * npf_table_match_addr: find the table according to ID, lookup and
    470   1.1   rmind  * match the contents with specified IPv4 address.
    471   1.1   rmind  */
    472   1.1   rmind int
    473   1.8   rmind npf_table_match_addr(npf_tableset_t *tset, u_int tid, const npf_addr_t *addr)
    474   1.1   rmind {
    475   1.1   rmind 	struct npf_hashl *htbl;
    476  1.12   rmind 	npf_tblent_t *ent = NULL;
    477   1.1   rmind 	npf_table_t *t;
    478   1.1   rmind 
    479   1.8   rmind 	/* Get the table (acquire the lock). */
    480   1.8   rmind 	t = npf_table_get(tset, tid);
    481   1.1   rmind 	if (__predict_false(t == NULL)) {
    482   1.1   rmind 		return EINVAL;
    483   1.1   rmind 	}
    484   1.1   rmind 	switch (t->t_type) {
    485   1.1   rmind 	case NPF_TABLE_HASH:
    486   1.6  zoltan 		htbl = table_hash_bucket(t, addr, sizeof(npf_addr_t));
    487  1.12   rmind 		LIST_FOREACH(ent, htbl, te_entry.hashq) {
    488  1.12   rmind 			if (npf_addr_cmp(addr, ent->te_mask, &ent->te_addr,
    489  1.12   rmind 			    NPF_NO_NETMASK, sizeof(npf_addr_t)) == 0) {
    490   1.7   rmind 				break;
    491  1.12   rmind 			}
    492   1.1   rmind 		}
    493   1.1   rmind 		break;
    494   1.9   rmind 	case NPF_TABLE_TREE:
    495  1.12   rmind 		ent = rb_tree_find_node(&t->t_rbtree, addr);
    496   1.1   rmind 		break;
    497   1.1   rmind 	default:
    498   1.1   rmind 		KASSERT(false);
    499   1.1   rmind 	}
    500   1.1   rmind 	npf_table_put(t);
    501   1.1   rmind 
    502  1.12   rmind 	if (ent == NULL) {
    503  1.11   rmind 		return ENOENT;
    504  1.11   rmind 	}
    505  1.12   rmind 	KASSERT(npf_addr_cmp(addr, ent->te_mask, &ent->te_addr,
    506  1.12   rmind 	    NPF_NO_NETMASK, sizeof(npf_addr_t)) == 0);
    507  1.11   rmind 	return 0;
    508   1.1   rmind }
    509