Home | History | Annotate | Line # | Download | only in npf
npf_ruleset.c revision 1.30.2.1
      1 /*	$NetBSD: npf_ruleset.c,v 1.30.2.1 2014/08/10 06:56:16 tls Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2009-2013 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This material is based upon work partially supported by The
      8  * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * NPF ruleset module.
     34  */
     35 
     36 #include <sys/cdefs.h>
     37 __KERNEL_RCSID(0, "$NetBSD: npf_ruleset.c,v 1.30.2.1 2014/08/10 06:56:16 tls Exp $");
     38 
     39 #include <sys/param.h>
     40 #include <sys/types.h>
     41 
     42 #include <sys/atomic.h>
     43 #include <sys/kmem.h>
     44 #include <sys/queue.h>
     45 #include <sys/mbuf.h>
     46 #include <sys/types.h>
     47 
     48 #include <net/bpf.h>
     49 #include <net/bpfjit.h>
     50 #include <net/pfil.h>
     51 #include <net/if.h>
     52 
     53 #include "npf_impl.h"
     54 
     55 struct npf_ruleset {
     56 	/*
     57 	 * - List of all rules.
     58 	 * - Dynamic (i.e. named) rules.
     59 	 * - G/C list for convenience.
     60 	 */
     61 	LIST_HEAD(, npf_rule)	rs_all;
     62 	LIST_HEAD(, npf_rule)	rs_dynamic;
     63 	LIST_HEAD(, npf_rule)	rs_gc;
     64 
     65 	/* Unique ID counter. */
     66 	uint64_t		rs_idcnt;
     67 
     68 	/* Number of array slots and active rules. */
     69 	u_int			rs_slots;
     70 	u_int			rs_nitems;
     71 
     72 	/* Array of ordered rules. */
     73 	npf_rule_t *		rs_rules[];
     74 };
     75 
     76 struct npf_rule {
     77 	/* Attributes, interface and skip slot. */
     78 	uint32_t		r_attr;
     79 	u_int			r_ifid;
     80 	u_int			r_skip_to;
     81 
     82 	/* Code to process, if any. */
     83 	int			r_type;
     84 	bpfjit_func_t		r_jcode;
     85 	void *			r_code;
     86 	size_t			r_clen;
     87 
     88 	/* NAT policy (optional), rule procedure and subset. */
     89 	npf_natpolicy_t *	r_natp;
     90 	npf_rproc_t *		r_rproc;
     91 
     92 	/* Rule priority: (highest) 1, 2 ... n (lowest). */
     93 	pri_t			r_priority;
     94 
     95 	/*
     96 	 * Dynamic group: subset queue and a dynamic group list entry.
     97 	 * Dynamic rule: entry and the parent rule (the group).
     98 	 */
     99 	union {
    100 		TAILQ_HEAD(npf_ruleq, npf_rule) r_subset;
    101 		TAILQ_ENTRY(npf_rule)	r_entry;
    102 	} /* C11 */;
    103 	union {
    104 		LIST_ENTRY(npf_rule)	r_dentry;
    105 		npf_rule_t *		r_parent;
    106 	} /* C11 */;
    107 
    108 	/* Rule ID and the original dictionary. */
    109 	uint64_t		r_id;
    110 	prop_dictionary_t	r_dict;
    111 
    112 	/* Rule name and all-list entry. */
    113 	char			r_name[NPF_RULE_MAXNAMELEN];
    114 	LIST_ENTRY(npf_rule)	r_aentry;
    115 
    116 	/* Key (optional). */
    117 	uint8_t			r_key[NPF_RULE_MAXKEYLEN];
    118 };
    119 
    120 /*
    121  * Private attributes - must be in the NPF_RULE_PRIVMASK range.
    122  */
    123 #define	NPF_RULE_KEEPNAT	(0x01000000 & NPF_RULE_PRIVMASK)
    124 
    125 #define	NPF_DYNAMIC_GROUP_P(attr) \
    126     (((attr) & NPF_DYNAMIC_GROUP) == NPF_DYNAMIC_GROUP)
    127 
    128 #define	NPF_DYNAMIC_RULE_P(attr) \
    129     (((attr) & NPF_DYNAMIC_GROUP) == NPF_RULE_DYNAMIC)
    130 
    131 npf_ruleset_t *
    132 npf_ruleset_create(size_t slots)
    133 {
    134 	size_t len = offsetof(npf_ruleset_t, rs_rules[slots]);
    135 	npf_ruleset_t *rlset;
    136 
    137 	rlset = kmem_zalloc(len, KM_SLEEP);
    138 	LIST_INIT(&rlset->rs_dynamic);
    139 	LIST_INIT(&rlset->rs_all);
    140 	LIST_INIT(&rlset->rs_gc);
    141 	rlset->rs_slots = slots;
    142 
    143 	return rlset;
    144 }
    145 
    146 static void
    147 npf_ruleset_unlink(npf_ruleset_t *rlset, npf_rule_t *rl)
    148 {
    149 	if (NPF_DYNAMIC_GROUP_P(rl->r_attr)) {
    150 		LIST_REMOVE(rl, r_dentry);
    151 	}
    152 	if (NPF_DYNAMIC_RULE_P(rl->r_attr)) {
    153 		npf_rule_t *rg = rl->r_parent;
    154 		TAILQ_REMOVE(&rg->r_subset, rl, r_entry);
    155 	}
    156 	LIST_REMOVE(rl, r_aentry);
    157 }
    158 
    159 void
    160 npf_ruleset_destroy(npf_ruleset_t *rlset)
    161 {
    162 	size_t len = offsetof(npf_ruleset_t, rs_rules[rlset->rs_slots]);
    163 	npf_rule_t *rl;
    164 
    165 	while ((rl = LIST_FIRST(&rlset->rs_all)) != NULL) {
    166 		npf_ruleset_unlink(rlset, rl);
    167 		npf_rule_free(rl);
    168 	}
    169 	KASSERT(LIST_EMPTY(&rlset->rs_dynamic));
    170 	KASSERT(LIST_EMPTY(&rlset->rs_gc));
    171 	kmem_free(rlset, len);
    172 }
    173 
    174 /*
    175  * npf_ruleset_insert: insert the rule into the specified ruleset.
    176  */
    177 void
    178 npf_ruleset_insert(npf_ruleset_t *rlset, npf_rule_t *rl)
    179 {
    180 	u_int n = rlset->rs_nitems;
    181 
    182 	KASSERT(n < rlset->rs_slots);
    183 
    184 	LIST_INSERT_HEAD(&rlset->rs_all, rl, r_aentry);
    185 	if (NPF_DYNAMIC_GROUP_P(rl->r_attr)) {
    186 		LIST_INSERT_HEAD(&rlset->rs_dynamic, rl, r_dentry);
    187 	} else {
    188 		KASSERTMSG(rl->r_parent == NULL, "cannot be dynamic rule");
    189 		rl->r_attr &= ~NPF_RULE_DYNAMIC;
    190 	}
    191 
    192 	rlset->rs_rules[n] = rl;
    193 	rlset->rs_nitems++;
    194 
    195 	if (rl->r_skip_to < ++n) {
    196 		rl->r_skip_to = n;
    197 	}
    198 }
    199 
    200 static npf_rule_t *
    201 npf_ruleset_lookup(npf_ruleset_t *rlset, const char *name)
    202 {
    203 	npf_rule_t *rl;
    204 
    205 	KASSERT(npf_config_locked_p());
    206 
    207 	LIST_FOREACH(rl, &rlset->rs_dynamic, r_dentry) {
    208 		KASSERT(NPF_DYNAMIC_GROUP_P(rl->r_attr));
    209 		if (strncmp(rl->r_name, name, NPF_RULE_MAXNAMELEN) == 0)
    210 			break;
    211 	}
    212 	return rl;
    213 }
    214 
    215 int
    216 npf_ruleset_add(npf_ruleset_t *rlset, const char *rname, npf_rule_t *rl)
    217 {
    218 	npf_rule_t *rg, *it;
    219 	pri_t priocmd;
    220 
    221 	rg = npf_ruleset_lookup(rlset, rname);
    222 	if (rg == NULL) {
    223 		return ESRCH;
    224 	}
    225 	if (!NPF_DYNAMIC_RULE_P(rl->r_attr)) {
    226 		return EINVAL;
    227 	}
    228 
    229 	/* Dynamic rule - assign a unique ID and save the parent. */
    230 	rl->r_id = ++rlset->rs_idcnt;
    231 	rl->r_parent = rg;
    232 
    233 	/*
    234 	 * Rule priority: (highest) 1, 2 ... n (lowest).
    235 	 * Negative priority indicates an operation and is reset to zero.
    236 	 */
    237 	if ((priocmd = rl->r_priority) < 0) {
    238 		rl->r_priority = 0;
    239 	}
    240 
    241 	switch (priocmd) {
    242 	case NPF_PRI_FIRST:
    243 		TAILQ_FOREACH(it, &rg->r_subset, r_entry) {
    244 			if (rl->r_priority <= it->r_priority)
    245 				break;
    246 		}
    247 		if (it) {
    248 			TAILQ_INSERT_BEFORE(it, rl, r_entry);
    249 		} else {
    250 			TAILQ_INSERT_HEAD(&rg->r_subset, rl, r_entry);
    251 		}
    252 		break;
    253 	case NPF_PRI_LAST:
    254 	default:
    255 		TAILQ_FOREACH(it, &rg->r_subset, r_entry) {
    256 			if (rl->r_priority < it->r_priority)
    257 				break;
    258 		}
    259 		if (it) {
    260 			TAILQ_INSERT_BEFORE(it, rl, r_entry);
    261 		} else {
    262 			TAILQ_INSERT_TAIL(&rg->r_subset, rl, r_entry);
    263 		}
    264 		break;
    265 	}
    266 
    267 	/* Finally, add into the all-list. */
    268 	LIST_INSERT_HEAD(&rlset->rs_all, rl, r_aentry);
    269 	return 0;
    270 }
    271 
    272 int
    273 npf_ruleset_remove(npf_ruleset_t *rlset, const char *rname, uint64_t id)
    274 {
    275 	npf_rule_t *rg, *rl;
    276 
    277 	if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
    278 		return ESRCH;
    279 	}
    280 	TAILQ_FOREACH(rl, &rg->r_subset, r_entry) {
    281 		KASSERT(rl->r_parent == rg);
    282 
    283 		/* Compare ID.  On match, remove and return. */
    284 		if (rl->r_id == id) {
    285 			npf_ruleset_unlink(rlset, rl);
    286 			LIST_INSERT_HEAD(&rlset->rs_gc, rl, r_aentry);
    287 			return 0;
    288 		}
    289 	}
    290 	return ENOENT;
    291 }
    292 
    293 int
    294 npf_ruleset_remkey(npf_ruleset_t *rlset, const char *rname,
    295     const void *key, size_t len)
    296 {
    297 	npf_rule_t *rg, *rl;
    298 
    299 	KASSERT(len && len <= NPF_RULE_MAXKEYLEN);
    300 
    301 	if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
    302 		return ESRCH;
    303 	}
    304 
    305 	/* Find the last in the list. */
    306 	TAILQ_FOREACH_REVERSE(rl, &rg->r_subset, npf_ruleq, r_entry) {
    307 		KASSERT(rl->r_parent == rg);
    308 
    309 		/* Compare the key.  On match, remove and return. */
    310 		if (memcmp(rl->r_key, key, len) == 0) {
    311 			npf_ruleset_unlink(rlset, rl);
    312 			LIST_INSERT_HEAD(&rlset->rs_gc, rl, r_aentry);
    313 			return 0;
    314 		}
    315 	}
    316 	return ENOENT;
    317 }
    318 
    319 prop_dictionary_t
    320 npf_ruleset_list(npf_ruleset_t *rlset, const char *rname)
    321 {
    322 	prop_dictionary_t rldict;
    323 	prop_array_t rules;
    324 	npf_rule_t *rg, *rl;
    325 
    326 	if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
    327 		return NULL;
    328 	}
    329 	if ((rldict = prop_dictionary_create()) == NULL) {
    330 		return NULL;
    331 	}
    332 	if ((rules = prop_array_create()) == NULL) {
    333 		prop_object_release(rldict);
    334 		return NULL;
    335 	}
    336 
    337 	TAILQ_FOREACH(rl, &rg->r_subset, r_entry) {
    338 		KASSERT(rl->r_parent == rg);
    339 		if (rl->r_dict && !prop_array_add(rules, rl->r_dict)) {
    340 			prop_object_release(rldict);
    341 			prop_object_release(rules);
    342 			return NULL;
    343 		}
    344 	}
    345 
    346 	if (!prop_dictionary_set(rldict, "rules", rules)) {
    347 		prop_object_release(rldict);
    348 		rldict = NULL;
    349 	}
    350 	prop_object_release(rules);
    351 	return rldict;
    352 }
    353 
    354 int
    355 npf_ruleset_flush(npf_ruleset_t *rlset, const char *rname)
    356 {
    357 	npf_rule_t *rg, *rl;
    358 
    359 	if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
    360 		return ESRCH;
    361 	}
    362 	while ((rl = TAILQ_FIRST(&rg->r_subset)) != NULL) {
    363 		KASSERT(rl->r_parent == rg);
    364 		npf_ruleset_unlink(rlset, rl);
    365 		LIST_INSERT_HEAD(&rlset->rs_gc, rl, r_aentry);
    366 	}
    367 	return 0;
    368 }
    369 
    370 void
    371 npf_ruleset_gc(npf_ruleset_t *rlset)
    372 {
    373 	npf_rule_t *rl;
    374 
    375 	while ((rl = LIST_FIRST(&rlset->rs_gc)) != NULL) {
    376 		LIST_REMOVE(rl, r_aentry);
    377 		npf_rule_free(rl);
    378 	}
    379 }
    380 
    381 /*
    382  * npf_ruleset_cmpnat: find a matching NAT policy in the ruleset.
    383  */
    384 static inline npf_rule_t *
    385 npf_ruleset_cmpnat(npf_ruleset_t *rlset, npf_natpolicy_t *mnp)
    386 {
    387 	npf_rule_t *rl;
    388 
    389 	/* Find a matching NAT policy in the old ruleset. */
    390 	LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
    391 		if (rl->r_natp && npf_nat_cmppolicy(rl->r_natp, mnp))
    392 			break;
    393 	}
    394 	return rl;
    395 }
    396 
    397 /*
    398  * npf_ruleset_reload: prepare the new ruleset by scanning the active
    399  * ruleset and 1) sharing the dynamic rules 2) sharing NAT policies.
    400  *
    401  * => The active (old) ruleset should be exclusively locked.
    402  */
    403 void
    404 npf_ruleset_reload(npf_ruleset_t *newset, npf_ruleset_t *oldset)
    405 {
    406 	npf_rule_t *rg, *rl;
    407 	uint64_t nid = 0;
    408 
    409 	KASSERT(npf_config_locked_p());
    410 
    411 	/*
    412 	 * Scan the dynamic rules and share (migrate) if needed.
    413 	 */
    414 	LIST_FOREACH(rg, &newset->rs_dynamic, r_dentry) {
    415 		npf_rule_t *actrg;
    416 
    417 		/* Look for a dynamic ruleset group with such name. */
    418 		actrg = npf_ruleset_lookup(oldset, rg->r_name);
    419 		if (actrg == NULL) {
    420 			continue;
    421 		}
    422 
    423 		/*
    424 		 * Copy the list-head structure.  This is necessary because
    425 		 * the rules are still active and therefore accessible for
    426 		 * inspection via the old ruleset.
    427 		 */
    428 		memcpy(&rg->r_subset, &actrg->r_subset, sizeof(rg->r_subset));
    429 		TAILQ_FOREACH(rl, &rg->r_subset, r_entry) {
    430 			/*
    431 			 * We can safely migrate to the new all-rule list
    432 			 * and re-set the parent rule, though.
    433 			 */
    434 			LIST_REMOVE(rl, r_aentry);
    435 			LIST_INSERT_HEAD(&newset->rs_all, rl, r_aentry);
    436 			rl->r_parent = rg;
    437 		}
    438 	}
    439 
    440 	/*
    441 	 * Scan all rules in the new ruleset and share NAT policies.
    442 	 * Also, assign a unique ID for each policy here.
    443 	 */
    444 	LIST_FOREACH(rl, &newset->rs_all, r_aentry) {
    445 		npf_natpolicy_t *np;
    446 		npf_rule_t *actrl;
    447 
    448 		/* Does the rule have a NAT policy associated? */
    449 		if ((np = rl->r_natp) == NULL) {
    450 			continue;
    451 		}
    452 
    453 		/* Does it match with any policy in the active ruleset? */
    454 		if ((actrl = npf_ruleset_cmpnat(oldset, np)) == NULL) {
    455 			npf_nat_setid(np, ++nid);
    456 			continue;
    457 		}
    458 
    459 		/*
    460 		 * Inherit the matching NAT policy and check other ones
    461 		 * in the new ruleset for sharing the portmap.
    462 		 */
    463 		rl->r_natp = actrl->r_natp;
    464 		npf_ruleset_sharepm(newset, rl->r_natp);
    465 		npf_nat_setid(rl->r_natp, ++nid);
    466 
    467 		/*
    468 		 * Finally, mark the active rule to not destroy its NAT
    469 		 * policy later as we inherited it (but the rule must be
    470 		 * kept active for now).  Destroy the new/unused policy.
    471 		 */
    472 		actrl->r_attr |= NPF_RULE_KEEPNAT;
    473 		npf_nat_freepolicy(np);
    474 	}
    475 
    476 	/* Inherit the ID counter. */
    477 	newset->rs_idcnt = oldset->rs_idcnt;
    478 }
    479 
    480 npf_rule_t *
    481 npf_ruleset_sharepm(npf_ruleset_t *rlset, npf_natpolicy_t *mnp)
    482 {
    483 	npf_natpolicy_t *np;
    484 	npf_rule_t *rl;
    485 
    486 	/* Find a matching NAT policy in the old ruleset. */
    487 	LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
    488 		/*
    489 		 * NAT policy might not yet be set during the creation of
    490 		 * the ruleset (in such case, rule is for our policy), or
    491 		 * policies might be equal due to rule exchange on reload.
    492 		 */
    493 		np = rl->r_natp;
    494 		if (np == NULL || np == mnp)
    495 			continue;
    496 		if (npf_nat_sharepm(np, mnp))
    497 			break;
    498 	}
    499 	return rl;
    500 }
    501 
    502 npf_natpolicy_t *
    503 npf_ruleset_findnat(npf_ruleset_t *rlset, uint64_t id)
    504 {
    505 	npf_rule_t *rl;
    506 
    507 	LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
    508 		npf_natpolicy_t *np = rl->r_natp;
    509 		if (np && npf_nat_getid(np) == id) {
    510 			return np;
    511 		}
    512 	}
    513 	return NULL;
    514 }
    515 
    516 /*
    517  * npf_ruleset_freealg: inspect the ruleset and disassociate specified
    518  * ALG from all NAT entries using it.
    519  */
    520 void
    521 npf_ruleset_freealg(npf_ruleset_t *rlset, npf_alg_t *alg)
    522 {
    523 	npf_rule_t *rl;
    524 	npf_natpolicy_t *np;
    525 
    526 	LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
    527 		if ((np = rl->r_natp) != NULL) {
    528 			npf_nat_freealg(np, alg);
    529 		}
    530 	}
    531 }
    532 
    533 /*
    534  * npf_rule_alloc: allocate a rule and initialise it.
    535  */
    536 npf_rule_t *
    537 npf_rule_alloc(prop_dictionary_t rldict)
    538 {
    539 	npf_rule_t *rl;
    540 	const char *rname;
    541 
    542 	/* Allocate a rule structure. */
    543 	rl = kmem_zalloc(sizeof(npf_rule_t), KM_SLEEP);
    544 	TAILQ_INIT(&rl->r_subset);
    545 	rl->r_natp = NULL;
    546 
    547 	/* Name (optional) */
    548 	if (prop_dictionary_get_cstring_nocopy(rldict, "name", &rname)) {
    549 		strlcpy(rl->r_name, rname, NPF_RULE_MAXNAMELEN);
    550 	} else {
    551 		rl->r_name[0] = '\0';
    552 	}
    553 
    554 	/* Attributes, priority and interface ID (optional). */
    555 	prop_dictionary_get_uint32(rldict, "attributes", &rl->r_attr);
    556 	prop_dictionary_get_int32(rldict, "priority", &rl->r_priority);
    557 	rl->r_attr &= ~NPF_RULE_PRIVMASK;
    558 
    559 	if (prop_dictionary_get_cstring_nocopy(rldict, "interface", &rname)) {
    560 		if ((rl->r_ifid = npf_ifmap_register(rname)) == 0) {
    561 			kmem_free(rl, sizeof(npf_rule_t));
    562 			return NULL;
    563 		}
    564 	} else {
    565 		rl->r_ifid = 0;
    566 	}
    567 
    568 	/* Get the skip-to index.  No need to validate it. */
    569 	prop_dictionary_get_uint32(rldict, "skip-to", &rl->r_skip_to);
    570 
    571 	/* Key (optional). */
    572 	prop_object_t obj = prop_dictionary_get(rldict, "key");
    573 	const void *key = prop_data_data_nocopy(obj);
    574 
    575 	if (key) {
    576 		size_t len = prop_data_size(obj);
    577 		if (len > NPF_RULE_MAXKEYLEN) {
    578 			kmem_free(rl, sizeof(npf_rule_t));
    579 			return NULL;
    580 		}
    581 		memcpy(rl->r_key, key, len);
    582 	}
    583 
    584 	if (NPF_DYNAMIC_RULE_P(rl->r_attr)) {
    585 		rl->r_dict = prop_dictionary_copy(rldict);
    586 	}
    587 
    588 	return rl;
    589 }
    590 
    591 /*
    592  * npf_rule_setcode: assign filter code to the rule.
    593  *
    594  * => The code must be validated by the caller.
    595  * => JIT compilation may be performed here.
    596  */
    597 void
    598 npf_rule_setcode(npf_rule_t *rl, const int type, void *code, size_t size)
    599 {
    600 	KASSERT(type == NPF_CODE_BPF);
    601 
    602 	if ((rl->r_jcode = npf_bpf_compile(code, size)) == NULL) {
    603 		rl->r_code = code;
    604 		rl->r_clen = size;
    605 	} else {
    606 		rl->r_code = NULL;
    607 	}
    608 	rl->r_type = type;
    609 }
    610 
    611 /*
    612  * npf_rule_setrproc: assign a rule procedure and hold a reference on it.
    613  */
    614 void
    615 npf_rule_setrproc(npf_rule_t *rl, npf_rproc_t *rp)
    616 {
    617 	npf_rproc_acquire(rp);
    618 	rl->r_rproc = rp;
    619 }
    620 
    621 /*
    622  * npf_rule_free: free the specified rule.
    623  */
    624 void
    625 npf_rule_free(npf_rule_t *rl)
    626 {
    627 	npf_natpolicy_t *np = rl->r_natp;
    628 	npf_rproc_t *rp = rl->r_rproc;
    629 
    630 	if (np && (rl->r_attr & NPF_RULE_KEEPNAT) == 0) {
    631 		/* Free NAT policy. */
    632 		npf_nat_freepolicy(np);
    633 	}
    634 	if (rp) {
    635 		/* Release rule procedure. */
    636 		npf_rproc_release(rp);
    637 	}
    638 	if (rl->r_code) {
    639 		/* Free byte-code. */
    640 		kmem_free(rl->r_code, rl->r_clen);
    641 	}
    642 	if (rl->r_jcode) {
    643 		/* Free JIT code. */
    644 		bpf_jit_freecode(rl->r_jcode);
    645 	}
    646 	if (rl->r_dict) {
    647 		/* Destroy the dictionary. */
    648 		prop_object_release(rl->r_dict);
    649 	}
    650 	kmem_free(rl, sizeof(npf_rule_t));
    651 }
    652 
    653 /*
    654  * npf_rule_getid: return the unique ID of a rule.
    655  * npf_rule_getrproc: acquire a reference and return rule procedure, if any.
    656  * npf_rule_getnat: get NAT policy assigned to the rule.
    657  */
    658 
    659 uint64_t
    660 npf_rule_getid(const npf_rule_t *rl)
    661 {
    662 	KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
    663 	return rl->r_id;
    664 }
    665 
    666 npf_rproc_t *
    667 npf_rule_getrproc(const npf_rule_t *rl)
    668 {
    669 	npf_rproc_t *rp = rl->r_rproc;
    670 
    671 	if (rp) {
    672 		npf_rproc_acquire(rp);
    673 	}
    674 	return rp;
    675 }
    676 
    677 npf_natpolicy_t *
    678 npf_rule_getnat(const npf_rule_t *rl)
    679 {
    680 	return rl->r_natp;
    681 }
    682 
    683 /*
    684  * npf_rule_setnat: assign NAT policy to the rule and insert into the
    685  * NAT policy list in the ruleset.
    686  */
    687 void
    688 npf_rule_setnat(npf_rule_t *rl, npf_natpolicy_t *np)
    689 {
    690 	KASSERT(rl->r_natp == NULL);
    691 	rl->r_natp = np;
    692 }
    693 
    694 /*
    695  * npf_rule_inspect: match the interface, direction and run the filter code.
    696  * Returns true if rule matches and false otherwise.
    697  */
    698 static inline bool
    699 npf_rule_inspect(const npf_rule_t *rl, bpf_args_t *bc_args,
    700     const int di_mask, const u_int ifid)
    701 {
    702 	/* Match the interface. */
    703 	if (rl->r_ifid && rl->r_ifid != ifid) {
    704 		return false;
    705 	}
    706 
    707 	/* Match the direction. */
    708 	if ((rl->r_attr & NPF_RULE_DIMASK) != NPF_RULE_DIMASK) {
    709 		if ((rl->r_attr & di_mask) == 0)
    710 			return false;
    711 	}
    712 
    713 	/* Any code? */
    714 	if (rl->r_jcode == rl->r_code) {
    715 		KASSERT(rl->r_jcode == NULL);
    716 		KASSERT(rl->r_code == NULL);
    717 		return true;
    718 	}
    719 	KASSERT(rl->r_type == NPF_CODE_BPF);
    720 	return npf_bpf_filter(bc_args, rl->r_code, rl->r_jcode) != 0;
    721 }
    722 
    723 /*
    724  * npf_rule_reinspect: re-inspect the dynamic rule by iterating its list.
    725  * This is only for the dynamic rules.  Subrules cannot have nested rules.
    726  */
    727 static npf_rule_t *
    728 npf_rule_reinspect(const npf_rule_t *drl, bpf_args_t *bc_args,
    729     const int di_mask, const u_int ifid)
    730 {
    731 	npf_rule_t *final_rl = NULL, *rl;
    732 
    733 	KASSERT(NPF_DYNAMIC_GROUP_P(drl->r_attr));
    734 
    735 	TAILQ_FOREACH(rl, &drl->r_subset, r_entry) {
    736 		if (!npf_rule_inspect(rl, bc_args, di_mask, ifid)) {
    737 			continue;
    738 		}
    739 		if (rl->r_attr & NPF_RULE_FINAL) {
    740 			return rl;
    741 		}
    742 		final_rl = rl;
    743 	}
    744 	return final_rl;
    745 }
    746 
    747 /*
    748  * npf_ruleset_inspect: inspect the packet against the given ruleset.
    749  *
    750  * Loop through the rules in the set and run the byte-code of each rule
    751  * against the packet (nbuf chain).  If sub-ruleset is found, inspect it.
    752  */
    753 npf_rule_t *
    754 npf_ruleset_inspect(npf_cache_t *npc, const npf_ruleset_t *rlset,
    755     const int di, const int layer)
    756 {
    757 	nbuf_t *nbuf = npc->npc_nbuf;
    758 	const int di_mask = (di & PFIL_IN) ? NPF_RULE_IN : NPF_RULE_OUT;
    759 	const u_int nitems = rlset->rs_nitems;
    760 	const u_int ifid = nbuf->nb_ifid;
    761 	npf_rule_t *final_rl = NULL;
    762 	bpf_args_t bc_args;
    763 	u_int n = 0;
    764 
    765 	KASSERT(((di & PFIL_IN) != 0) ^ ((di & PFIL_OUT) != 0));
    766 
    767 	/*
    768 	 * Prepare the external memory store and the arguments for
    769 	 * the BPF programs to be executed.
    770 	 */
    771 	uint32_t bc_words[NPF_BPF_NWORDS];
    772 	npf_bpf_prepare(npc, &bc_args, bc_words);
    773 
    774 	while (n < nitems) {
    775 		npf_rule_t *rl = rlset->rs_rules[n];
    776 		const u_int skip_to = rl->r_skip_to;
    777 		const uint32_t attr = rl->r_attr;
    778 
    779 		KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
    780 		KASSERT(!final_rl || rl->r_priority >= final_rl->r_priority);
    781 		KASSERT(n < skip_to);
    782 
    783 		/* Group is a barrier: return a matching if found any. */
    784 		if ((attr & NPF_RULE_GROUP) != 0 && final_rl) {
    785 			break;
    786 		}
    787 
    788 		/* Main inspection of the rule. */
    789 		if (!npf_rule_inspect(rl, &bc_args, di_mask, ifid)) {
    790 			n = skip_to;
    791 			continue;
    792 		}
    793 
    794 		if (NPF_DYNAMIC_GROUP_P(attr)) {
    795 			/*
    796 			 * If this is a dynamic rule, re-inspect the subrules.
    797 			 * If it has any matching rule, then it is final.
    798 			 */
    799 			rl = npf_rule_reinspect(rl, &bc_args, di_mask, ifid);
    800 			if (rl != NULL) {
    801 				final_rl = rl;
    802 				break;
    803 			}
    804 		} else if ((attr & NPF_RULE_GROUP) == 0) {
    805 			/*
    806 			 * Groups themselves are not matching.
    807 			 */
    808 			final_rl = rl;
    809 		}
    810 
    811 		/* Set the matching rule and check for "final". */
    812 		if (attr & NPF_RULE_FINAL) {
    813 			break;
    814 		}
    815 		n++;
    816 	}
    817 
    818 	KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
    819 	return final_rl;
    820 }
    821 
    822 /*
    823  * npf_rule_conclude: return decision and the flags for conclusion.
    824  *
    825  * => Returns ENETUNREACH if "block" and 0 if "pass".
    826  */
    827 int
    828 npf_rule_conclude(const npf_rule_t *rl, int *retfl)
    829 {
    830 	/* If not passing - drop the packet. */
    831 	*retfl = rl->r_attr;
    832 	return (rl->r_attr & NPF_RULE_PASS) ? 0 : ENETUNREACH;
    833 }
    834