Home | History | Annotate | Line # | Download | only in npf
npf_ruleset.c revision 1.1
      1  1.1  rmind /*	$NetBSD: npf_ruleset.c,v 1.1 2010/08/22 18:56:22 rmind Exp $	*/
      2  1.1  rmind 
      3  1.1  rmind /*-
      4  1.1  rmind  * Copyright (c) 2009-2010 The NetBSD Foundation, Inc.
      5  1.1  rmind  * All rights reserved.
      6  1.1  rmind  *
      7  1.1  rmind  * This material is based upon work partially supported by The
      8  1.1  rmind  * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
      9  1.1  rmind  *
     10  1.1  rmind  * Redistribution and use in source and binary forms, with or without
     11  1.1  rmind  * modification, are permitted provided that the following conditions
     12  1.1  rmind  * are met:
     13  1.1  rmind  * 1. Redistributions of source code must retain the above copyright
     14  1.1  rmind  *    notice, this list of conditions and the following disclaimer.
     15  1.1  rmind  * 2. Redistributions in binary form must reproduce the above copyright
     16  1.1  rmind  *    notice, this list of conditions and the following disclaimer in the
     17  1.1  rmind  *    documentation and/or other materials provided with the distribution.
     18  1.1  rmind  *
     19  1.1  rmind  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  1.1  rmind  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  1.1  rmind  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  1.1  rmind  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  1.1  rmind  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  1.1  rmind  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  1.1  rmind  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  1.1  rmind  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  1.1  rmind  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  1.1  rmind  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  1.1  rmind  * POSSIBILITY OF SUCH DAMAGE.
     30  1.1  rmind  */
     31  1.1  rmind 
     32  1.1  rmind /*
     33  1.1  rmind  * NPF ruleset module.
     34  1.1  rmind  *
     35  1.1  rmind  * Lock order:
     36  1.1  rmind  *
     37  1.1  rmind  *	ruleset_lock -> table_lock -> npf_table_t::t_lock
     38  1.1  rmind  */
     39  1.1  rmind 
     40  1.1  rmind #ifdef _KERNEL
     41  1.1  rmind #include <sys/cdefs.h>
     42  1.1  rmind __KERNEL_RCSID(0, "$NetBSD: npf_ruleset.c,v 1.1 2010/08/22 18:56:22 rmind Exp $");
     43  1.1  rmind 
     44  1.1  rmind #include <sys/param.h>
     45  1.1  rmind #include <sys/kernel.h>
     46  1.1  rmind #endif
     47  1.1  rmind 
     48  1.1  rmind #include <sys/atomic.h>
     49  1.1  rmind #include <sys/kmem.h>
     50  1.1  rmind #include <sys/pool.h>
     51  1.1  rmind #include <sys/queue.h>
     52  1.1  rmind #include <sys/rwlock.h>
     53  1.1  rmind #include <sys/types.h>
     54  1.1  rmind 
     55  1.1  rmind #include <net/if.h>
     56  1.1  rmind #include <net/pfil.h>
     57  1.1  rmind 
     58  1.1  rmind #include "npf_ncode.h"
     59  1.1  rmind #include "npf_impl.h"
     60  1.1  rmind 
     61  1.1  rmind struct npf_hook {
     62  1.1  rmind 	void				(*hk_fn)(const npf_cache_t *, void *);
     63  1.1  rmind 	void *				hk_arg;
     64  1.1  rmind 	LIST_ENTRY(npf_hook)		hk_entry;
     65  1.1  rmind };
     66  1.1  rmind 
     67  1.1  rmind struct npf_ruleset {
     68  1.1  rmind 	TAILQ_HEAD(, npf_rule)		rs_queue;
     69  1.1  rmind 	npf_rule_t *			rs_default;
     70  1.1  rmind 	int				_reserved;
     71  1.1  rmind };
     72  1.1  rmind 
     73  1.1  rmind /* Rule structure. */
     74  1.1  rmind struct npf_rule {
     75  1.1  rmind 	/* List entry in the ruleset. */
     76  1.1  rmind 	TAILQ_ENTRY(npf_rule)		r_entry;
     77  1.1  rmind 	/* Optional: sub-ruleset, NAT policy. */
     78  1.1  rmind 	npf_ruleset_t			r_subset;
     79  1.1  rmind 	npf_natpolicy_t *		r_nat;
     80  1.1  rmind 	/* Rule priority: (highest) 0, 1, 2 ... n (lowest). */
     81  1.1  rmind 	u_int				r_priority;
     82  1.1  rmind 	/* N-code to process. */
     83  1.1  rmind 	void *				r_ncode;
     84  1.1  rmind 	size_t				r_nc_size;
     85  1.1  rmind 	/* Attributes of this rule. */
     86  1.1  rmind 	int				r_attr;
     87  1.1  rmind 	/* Interface. */
     88  1.1  rmind 	u_int				r_ifid;
     89  1.1  rmind 	/* Hit counter. */
     90  1.1  rmind 	u_long				r_hitcount;
     91  1.1  rmind 	/* List of hooks to process on match. */
     92  1.1  rmind 	LIST_HEAD(, npf_hook)		r_hooks;
     93  1.1  rmind };
     94  1.1  rmind 
     95  1.1  rmind /* Global ruleset, its lock, cache and NAT ruleset. */
     96  1.1  rmind static npf_ruleset_t *			ruleset;
     97  1.1  rmind static krwlock_t			ruleset_lock;
     98  1.1  rmind static pool_cache_t			rule_cache;
     99  1.1  rmind 
    100  1.1  rmind /*
    101  1.1  rmind  * npf_ruleset_sysinit: initialise ruleset structures.
    102  1.1  rmind  */
    103  1.1  rmind int
    104  1.1  rmind npf_ruleset_sysinit(void)
    105  1.1  rmind {
    106  1.1  rmind 
    107  1.1  rmind 	rule_cache = pool_cache_init(sizeof(npf_rule_t), coherency_unit,
    108  1.1  rmind 	    0, 0, "npfrlpl", NULL, IPL_NONE, NULL, NULL, NULL);
    109  1.1  rmind 	if (rule_cache == NULL) {
    110  1.1  rmind 		return ENOMEM;
    111  1.1  rmind 	}
    112  1.1  rmind 	rw_init(&ruleset_lock);
    113  1.1  rmind 	ruleset = npf_ruleset_create();
    114  1.1  rmind 	return 0;
    115  1.1  rmind }
    116  1.1  rmind 
    117  1.1  rmind void
    118  1.1  rmind npf_ruleset_sysfini(void)
    119  1.1  rmind {
    120  1.1  rmind 
    121  1.1  rmind 	npf_ruleset_destroy(ruleset);
    122  1.1  rmind 	rw_destroy(&ruleset_lock);
    123  1.1  rmind 	pool_cache_destroy(rule_cache);
    124  1.1  rmind }
    125  1.1  rmind 
    126  1.1  rmind npf_ruleset_t *
    127  1.1  rmind npf_ruleset_create(void)
    128  1.1  rmind {
    129  1.1  rmind 	npf_ruleset_t *rlset;
    130  1.1  rmind 
    131  1.1  rmind 	rlset = kmem_zalloc(sizeof(npf_ruleset_t), KM_SLEEP);
    132  1.1  rmind 	TAILQ_INIT(&rlset->rs_queue);
    133  1.1  rmind 	return rlset;
    134  1.1  rmind }
    135  1.1  rmind 
    136  1.1  rmind void
    137  1.1  rmind npf_ruleset_destroy(npf_ruleset_t *rlset)
    138  1.1  rmind {
    139  1.1  rmind 	npf_rule_t *rl;
    140  1.1  rmind 
    141  1.1  rmind 	while ((rl = TAILQ_FIRST(&rlset->rs_queue)) != NULL) {
    142  1.1  rmind 		TAILQ_REMOVE(&rlset->rs_queue, rl, r_entry);
    143  1.1  rmind 		npf_rule_free(rl);
    144  1.1  rmind 	}
    145  1.1  rmind 	kmem_free(rlset, sizeof(npf_ruleset_t));
    146  1.1  rmind }
    147  1.1  rmind 
    148  1.1  rmind /*
    149  1.1  rmind  * npf_ruleset_insert: insert the rule into the specified ruleset.
    150  1.1  rmind  *
    151  1.1  rmind  * Note: multiple rules at the same priority are allowed.
    152  1.1  rmind  */
    153  1.1  rmind void
    154  1.1  rmind npf_ruleset_insert(npf_ruleset_t *rlset, npf_rule_t *rl)
    155  1.1  rmind {
    156  1.1  rmind 	npf_rule_t *it;
    157  1.1  rmind 
    158  1.1  rmind 	if (rl->r_attr & NPF_RULE_DEFAULT) {
    159  1.1  rmind 		rlset->rs_default = rl;
    160  1.1  rmind 		return;
    161  1.1  rmind 	}
    162  1.1  rmind 	TAILQ_FOREACH(it, &rlset->rs_queue, r_entry) {
    163  1.1  rmind 		/* Rule priority: (highest) 0, 1, 2, 4 ... n (lowest). */
    164  1.1  rmind 		if (it->r_priority > rl->r_priority)
    165  1.1  rmind 			break;
    166  1.1  rmind 	}
    167  1.1  rmind 	if (it == NULL) {
    168  1.1  rmind 		TAILQ_INSERT_TAIL(&rlset->rs_queue, rl, r_entry);
    169  1.1  rmind 	} else {
    170  1.1  rmind 		TAILQ_INSERT_BEFORE(it, rl, r_entry);
    171  1.1  rmind 	}
    172  1.1  rmind }
    173  1.1  rmind 
    174  1.1  rmind /*
    175  1.1  rmind  * npf_ruleset_reload: atomically load new ruleset and tableset,
    176  1.1  rmind  * and destroy old structures.
    177  1.1  rmind  */
    178  1.1  rmind void
    179  1.1  rmind npf_ruleset_reload(npf_ruleset_t *nrlset, npf_tableset_t *ntblset)
    180  1.1  rmind {
    181  1.1  rmind 	npf_ruleset_t *oldrlset;
    182  1.1  rmind 	npf_tableset_t *oldtblset;
    183  1.1  rmind 
    184  1.1  rmind 	/*
    185  1.1  rmind 	 * Swap old ruleset with the new.
    186  1.1  rmind 	 * XXX: Rework to be fully lock-less; later.
    187  1.1  rmind 	 */
    188  1.1  rmind 	rw_enter(&ruleset_lock, RW_WRITER);
    189  1.1  rmind 	oldrlset = atomic_swap_ptr(&ruleset, nrlset);
    190  1.1  rmind 
    191  1.1  rmind 	/*
    192  1.1  rmind 	 * Setup a new tableset.  It will lock the global tableset lock,
    193  1.1  rmind 	 * therefore ensures atomicity.  We shall free the old table-set.
    194  1.1  rmind 	 */
    195  1.1  rmind 	oldtblset = npf_tableset_reload(ntblset);
    196  1.1  rmind 	KASSERT(oldtblset != NULL);
    197  1.1  rmind 	/* Unlock.  Everything goes "live" now. */
    198  1.1  rmind 	rw_exit(&ruleset_lock);
    199  1.1  rmind 
    200  1.1  rmind 	npf_tableset_destroy(oldtblset);
    201  1.1  rmind 	npf_ruleset_destroy(oldrlset);
    202  1.1  rmind }
    203  1.1  rmind 
    204  1.1  rmind /*
    205  1.1  rmind  * npf_rule_alloc: allocate a rule and copy ncode from user-space.
    206  1.1  rmind  */
    207  1.1  rmind npf_rule_t *
    208  1.1  rmind npf_rule_alloc(int attr, pri_t pri, int ifidx, void *nc, size_t sz)
    209  1.1  rmind {
    210  1.1  rmind 	npf_rule_t *rl;
    211  1.1  rmind 	int errat;
    212  1.1  rmind 
    213  1.1  rmind 	/* Perform validation & building of n-code. */
    214  1.1  rmind 	if (nc && npf_ncode_validate(nc, sz, &errat)) {
    215  1.1  rmind 		return NULL;
    216  1.1  rmind 	}
    217  1.1  rmind 	/* Allocate a rule structure. */
    218  1.1  rmind 	rl = pool_cache_get(rule_cache, PR_WAITOK);
    219  1.1  rmind 	if (rl == NULL) {
    220  1.1  rmind 		return NULL;
    221  1.1  rmind 	}
    222  1.1  rmind 	TAILQ_INIT(&rl->r_subset.rs_queue);
    223  1.1  rmind 	LIST_INIT(&rl->r_hooks);
    224  1.1  rmind 	rl->r_priority = pri;
    225  1.1  rmind 	rl->r_attr = attr;
    226  1.1  rmind 	rl->r_ifid = ifidx;
    227  1.1  rmind 	rl->r_ncode = nc;
    228  1.1  rmind 	rl->r_nc_size = sz;
    229  1.1  rmind 	rl->r_hitcount = 0;
    230  1.1  rmind 	rl->r_nat = NULL;
    231  1.1  rmind 	return rl;
    232  1.1  rmind }
    233  1.1  rmind #if 0
    234  1.1  rmind /*
    235  1.1  rmind  * npf_activate_rule: activate rule by inserting it into the global ruleset.
    236  1.1  rmind  */
    237  1.1  rmind void
    238  1.1  rmind npf_activate_rule(npf_rule_t *rl)
    239  1.1  rmind {
    240  1.1  rmind 
    241  1.1  rmind 	rw_enter(&ruleset_lock, RW_WRITER);
    242  1.1  rmind 	npf_ruleset_insert(ruleset, rl);
    243  1.1  rmind 	rw_exit(&ruleset_lock);
    244  1.1  rmind }
    245  1.1  rmind 
    246  1.1  rmind /*
    247  1.1  rmind  * npf_deactivate_rule: deactivate rule by removing it from the ruleset.
    248  1.1  rmind  */
    249  1.1  rmind void
    250  1.1  rmind npf_deactivate_rule(npf_rule_t *)
    251  1.1  rmind {
    252  1.1  rmind 
    253  1.1  rmind 	rw_enter(&ruleset_lock, RW_WRITER);
    254  1.1  rmind 	TAILQ_REMOVE(&ruleset->rs_queue, rl, r_entry);
    255  1.1  rmind 	rw_exit(&ruleset_lock);
    256  1.1  rmind }
    257  1.1  rmind #endif
    258  1.1  rmind 
    259  1.1  rmind /*
    260  1.1  rmind  * npf_rule_free: free the specified rule.
    261  1.1  rmind  */
    262  1.1  rmind void
    263  1.1  rmind npf_rule_free(npf_rule_t *rl)
    264  1.1  rmind {
    265  1.1  rmind 
    266  1.1  rmind 	if (rl->r_ncode) {
    267  1.1  rmind 		/* Free n-code (if any). */
    268  1.1  rmind 		npf_ncode_free(rl->r_ncode, rl->r_nc_size);
    269  1.1  rmind 	}
    270  1.1  rmind 	if (rl->r_nat) {
    271  1.1  rmind 		/* Free NAT policy (if associated). */
    272  1.1  rmind 		npf_nat_freepolicy(rl->r_nat);
    273  1.1  rmind 	}
    274  1.1  rmind 	pool_cache_put(rule_cache, rl);
    275  1.1  rmind }
    276  1.1  rmind 
    277  1.1  rmind /*
    278  1.1  rmind  * npf_rule_subset: return sub-ruleset, if any.
    279  1.1  rmind  * npf_rule_getnat: get NAT policy assigned to the rule.
    280  1.1  rmind  * npf_rule_setnat: assign NAT policy to the rule.
    281  1.1  rmind  */
    282  1.1  rmind 
    283  1.1  rmind npf_ruleset_t *
    284  1.1  rmind npf_rule_subset(npf_rule_t *rl)
    285  1.1  rmind {
    286  1.1  rmind 	return &rl->r_subset;
    287  1.1  rmind }
    288  1.1  rmind 
    289  1.1  rmind npf_natpolicy_t *
    290  1.1  rmind npf_rule_getnat(const npf_rule_t *rl)
    291  1.1  rmind {
    292  1.1  rmind 	return rl->r_nat;
    293  1.1  rmind }
    294  1.1  rmind 
    295  1.1  rmind void
    296  1.1  rmind npf_rule_setnat(npf_rule_t *rl, npf_natpolicy_t *np)
    297  1.1  rmind {
    298  1.1  rmind 	rl->r_nat = np;
    299  1.1  rmind }
    300  1.1  rmind 
    301  1.1  rmind /*
    302  1.1  rmind  * npf_hook_register: register action hook in the rule.
    303  1.1  rmind  */
    304  1.1  rmind npf_hook_t *
    305  1.1  rmind npf_hook_register(npf_rule_t *rl,
    306  1.1  rmind     void (*fn)(const npf_cache_t *, void *), void *arg)
    307  1.1  rmind {
    308  1.1  rmind 	npf_hook_t *hk;
    309  1.1  rmind 
    310  1.1  rmind 	hk = kmem_alloc(sizeof(npf_hook_t), KM_SLEEP);
    311  1.1  rmind 	if (hk != NULL) {
    312  1.1  rmind 		hk->hk_fn = fn;
    313  1.1  rmind 		hk->hk_arg = arg;
    314  1.1  rmind 		rw_enter(&ruleset_lock, RW_WRITER);
    315  1.1  rmind 		LIST_INSERT_HEAD(&rl->r_hooks, hk, hk_entry);
    316  1.1  rmind 		rw_exit(&ruleset_lock);
    317  1.1  rmind 	}
    318  1.1  rmind 	return hk;
    319  1.1  rmind }
    320  1.1  rmind 
    321  1.1  rmind /*
    322  1.1  rmind  * npf_hook_unregister: unregister a specified hook.
    323  1.1  rmind  *
    324  1.1  rmind  * => Hook should have been registered in the rule.
    325  1.1  rmind  */
    326  1.1  rmind void
    327  1.1  rmind npf_hook_unregister(npf_rule_t *rl, npf_hook_t *hk)
    328  1.1  rmind {
    329  1.1  rmind 
    330  1.1  rmind 	rw_enter(&ruleset_lock, RW_WRITER);
    331  1.1  rmind 	LIST_REMOVE(hk, hk_entry);
    332  1.1  rmind 	rw_exit(&ruleset_lock);
    333  1.1  rmind 	kmem_free(hk, sizeof(npf_hook_t));
    334  1.1  rmind }
    335  1.1  rmind 
    336  1.1  rmind /*
    337  1.1  rmind  * npf_ruleset_match: inspect the packet against the ruleset.
    338  1.1  rmind  *
    339  1.1  rmind  * Loop for each rule in the set and perform run n-code processor of each
    340  1.1  rmind  * rule against the packet (nbuf chain).  If sub-ruleset found, inspect it.
    341  1.1  rmind  *
    342  1.1  rmind  * => If found, ruleset is kept read-locked.
    343  1.1  rmind  * => Caller should protect the nbuf chain.
    344  1.1  rmind  */
    345  1.1  rmind npf_rule_t *
    346  1.1  rmind npf_ruleset_match(npf_ruleset_t *rlset0, npf_cache_t *npc, nbuf_t *nbuf,
    347  1.1  rmind     struct ifnet *ifp, const int di, const int layer)
    348  1.1  rmind {
    349  1.1  rmind 	npf_rule_t *final_rl = NULL, *rl;
    350  1.1  rmind 	npf_ruleset_t *rlset = rlset0;
    351  1.1  rmind 
    352  1.1  rmind 	KASSERT(((di & PFIL_IN) != 0) ^ ((di & PFIL_OUT) != 0));
    353  1.1  rmind reinspect:
    354  1.1  rmind 	TAILQ_FOREACH(rl, &rlset->rs_queue, r_entry) {
    355  1.1  rmind 		KASSERT(!final_rl || rl->r_priority >= final_rl->r_priority);
    356  1.1  rmind 
    357  1.1  rmind 		/* Match the interface. */
    358  1.1  rmind 		if (rl->r_ifid && rl->r_ifid != ifp->if_index) {
    359  1.1  rmind 			continue;
    360  1.1  rmind 		}
    361  1.1  rmind 		/* Match the direction. */
    362  1.1  rmind 		if ((rl->r_attr & NPF_RULE_DIMASK) != NPF_RULE_DIMASK) {
    363  1.1  rmind 			const int di_mask =
    364  1.1  rmind 			    (di & PFIL_IN) ? NPF_RULE_IN : NPF_RULE_OUT;
    365  1.1  rmind 
    366  1.1  rmind 			if ((rl->r_attr & di_mask) == 0)
    367  1.1  rmind 				continue;
    368  1.1  rmind 		}
    369  1.1  rmind 		/* Process the n-code, if any. */
    370  1.1  rmind 		const void *nc = rl->r_ncode;
    371  1.1  rmind 		if (nc && npf_ncode_process(npc, nc, nbuf, layer)) {
    372  1.1  rmind 			continue;
    373  1.1  rmind 		}
    374  1.1  rmind 		/* Set the matching rule and check for "final". */
    375  1.1  rmind 		final_rl = rl;
    376  1.1  rmind 		if (rl->r_attr & NPF_RULE_FINAL) {
    377  1.1  rmind 			goto final;
    378  1.1  rmind 		}
    379  1.1  rmind 	}
    380  1.1  rmind 	/* Default, if no final rule. */
    381  1.1  rmind 	if (final_rl == NULL) {
    382  1.1  rmind 		rlset = rlset0;
    383  1.1  rmind 		final_rl = rlset->rs_default;
    384  1.1  rmind 	}
    385  1.1  rmind 	/* Inspect the sub-ruleset, if any. */
    386  1.1  rmind 	if (final_rl) {
    387  1.1  rmind final:
    388  1.1  rmind 		if (TAILQ_EMPTY(&final_rl->r_subset.rs_queue)) {
    389  1.1  rmind 			return final_rl;
    390  1.1  rmind 		}
    391  1.1  rmind 		rlset = &final_rl->r_subset;
    392  1.1  rmind 		final_rl = NULL;
    393  1.1  rmind 		goto reinspect;
    394  1.1  rmind 	}
    395  1.1  rmind 	return final_rl;
    396  1.1  rmind }
    397  1.1  rmind 
    398  1.1  rmind /*
    399  1.1  rmind  * npf_ruleset_inspect: inspection of the main ruleset for filtering.
    400  1.1  rmind  */
    401  1.1  rmind npf_rule_t *
    402  1.1  rmind npf_ruleset_inspect(npf_cache_t *npc, nbuf_t *nbuf,
    403  1.1  rmind     struct ifnet *ifp, const int di, const int layer)
    404  1.1  rmind {
    405  1.1  rmind 	npf_rule_t *rl;
    406  1.1  rmind 
    407  1.1  rmind 	rw_enter(&ruleset_lock, RW_READER);
    408  1.1  rmind 	rl = npf_ruleset_match(ruleset, npc, nbuf, ifp, di, layer);
    409  1.1  rmind 	if (rl == NULL) {
    410  1.1  rmind 		rw_exit(&ruleset_lock);
    411  1.1  rmind 	}
    412  1.1  rmind 	return rl;
    413  1.1  rmind }
    414  1.1  rmind 
    415  1.1  rmind /*
    416  1.1  rmind  * npf_rule_apply: apply the rule i.e. run hooks and return appropriate value.
    417  1.1  rmind  *
    418  1.1  rmind  * => Returns ENETUNREACH if "block" and 0 if "pass".
    419  1.1  rmind  * => Releases the ruleset lock.
    420  1.1  rmind  */
    421  1.1  rmind int
    422  1.1  rmind npf_rule_apply(const npf_cache_t *npc, npf_rule_t *rl, bool *keepstate)
    423  1.1  rmind {
    424  1.1  rmind 	npf_hook_t *hk;
    425  1.1  rmind 
    426  1.1  rmind 	KASSERT(rw_lock_held(&ruleset_lock));
    427  1.1  rmind 
    428  1.1  rmind 	/* Update the "hit" counter. */
    429  1.1  rmind 	if (rl->r_attr & NPF_RULE_COUNT) {
    430  1.1  rmind 		atomic_inc_ulong(&rl->r_hitcount);
    431  1.1  rmind 	}
    432  1.1  rmind 
    433  1.1  rmind 	/* If not passing - drop the packet. */
    434  1.1  rmind 	if ((rl->r_attr & NPF_RULE_PASS) == 0) {
    435  1.1  rmind 		rw_exit(&ruleset_lock);
    436  1.1  rmind 		return ENETUNREACH;
    437  1.1  rmind 	}
    438  1.1  rmind 
    439  1.1  rmind 	/* Passing.  Run the hooks. */
    440  1.1  rmind 	LIST_FOREACH(hk, &rl->r_hooks, hk_entry) {
    441  1.1  rmind 		KASSERT(hk->hk_fn != NULL);
    442  1.1  rmind 		(*hk->hk_fn)(npc, hk->hk_arg);
    443  1.1  rmind 	}
    444  1.1  rmind 	*keepstate = (rl->r_attr & NPF_RULE_KEEPSTATE) != 0;
    445  1.1  rmind 	rw_exit(&ruleset_lock);
    446  1.1  rmind 
    447  1.1  rmind 	return 0;
    448  1.1  rmind }
    449  1.1  rmind 
    450  1.1  rmind #if defined(DDB) || defined(_NPF_TESTING)
    451  1.1  rmind 
    452  1.1  rmind void
    453  1.1  rmind npf_rulenc_dump(npf_rule_t *rl)
    454  1.1  rmind {
    455  1.1  rmind 	uint32_t *op = rl->r_ncode;
    456  1.1  rmind 	size_t n = rl->r_nc_size;
    457  1.1  rmind 
    458  1.1  rmind 	do {
    459  1.1  rmind 		printf("\t> |0x%02x|\n", (uint32_t)*op);
    460  1.1  rmind 		op++;
    461  1.1  rmind 		n -= sizeof(*op);
    462  1.1  rmind 	} while (n);
    463  1.1  rmind 
    464  1.1  rmind 	printf("-> %s\n", (rl->r_attr & NPF_RULE_PASS) ? "pass" : "block");
    465  1.1  rmind }
    466  1.1  rmind 
    467  1.1  rmind #endif
    468