Home | History | Annotate | Line # | Download | only in npf
npf_ruleset.c revision 1.20.6.1
      1 /*	$NetBSD: npf_ruleset.c,v 1.20.6.1 2014/05/18 17:46:13 rmind Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2009-2013 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This material is based upon work partially supported by The
      8  * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * NPF ruleset module.
     34  */
     35 
     36 #include <sys/cdefs.h>
     37 __KERNEL_RCSID(0, "$NetBSD: npf_ruleset.c,v 1.20.6.1 2014/05/18 17:46:13 rmind Exp $");
     38 
     39 #include <sys/param.h>
     40 #include <sys/types.h>
     41 
     42 #include <sys/atomic.h>
     43 #include <sys/kmem.h>
     44 #include <sys/queue.h>
     45 #include <sys/mbuf.h>
     46 #include <sys/types.h>
     47 
     48 #include <net/bpf.h>
     49 #include <net/bpfjit.h>
     50 #include <net/pfil.h>
     51 #include <net/if.h>
     52 
     53 #include "npf_impl.h"
     54 
     55 struct npf_ruleset {
     56 	/*
     57 	 * - List of all rules.
     58 	 * - Dynamic (i.e. named) rules.
     59 	 * - G/C list for convenience.
     60 	 */
     61 	LIST_HEAD(, npf_rule)	rs_all;
     62 	LIST_HEAD(, npf_rule)	rs_dynamic;
     63 	LIST_HEAD(, npf_rule)	rs_gc;
     64 
     65 	/* Unique ID counter. */
     66 	uint64_t		rs_idcnt;
     67 
     68 	/* Number of array slots and active rules. */
     69 	u_int			rs_slots;
     70 	u_int			rs_nitems;
     71 
     72 	/* Array of ordered rules. */
     73 	npf_rule_t *		rs_rules[];
     74 };
     75 
     76 struct npf_rule {
     77 	/* Attributes, interface and skip slot. */
     78 	uint32_t		r_attr;
     79 	u_int			r_ifid;
     80 	u_int			r_skip_to;
     81 
     82 	/* Code to process, if any. */
     83 	int			r_type;
     84 	bpfjit_func_t		r_jcode;
     85 	void *			r_code;
     86 	size_t			r_clen;
     87 
     88 	/* NAT policy (optional), rule procedure and subset. */
     89 	npf_natpolicy_t *	r_natp;
     90 	npf_rproc_t *		r_rproc;
     91 
     92 	/* Rule priority: (highest) 1, 2 ... n (lowest). */
     93 	pri_t			r_priority;
     94 
     95 	/*
     96 	 * Dynamic group: subset queue and a dynamic group list entry.
     97 	 * Dynamic rule: entry and the parent rule (the group).
     98 	 */
     99 	union {
    100 		TAILQ_HEAD(npf_ruleq, npf_rule) r_subset;
    101 		TAILQ_ENTRY(npf_rule)	r_entry;
    102 	} /* C11 */;
    103 	union {
    104 		LIST_ENTRY(npf_rule)	r_dentry;
    105 		npf_rule_t *		r_parent;
    106 	} /* C11 */;
    107 
    108 	/* Rule ID and the original dictionary. */
    109 	uint64_t		r_id;
    110 	prop_dictionary_t	r_dict;
    111 
    112 	/* Rule name and all-list entry. */
    113 	char			r_name[NPF_RULE_MAXNAMELEN];
    114 	LIST_ENTRY(npf_rule)	r_aentry;
    115 
    116 	/* Key (optional). */
    117 	uint8_t			r_key[NPF_RULE_MAXKEYLEN];
    118 };
    119 
    120 #define	NPF_DYNAMIC_GROUP_P(attr) \
    121     (((attr) & NPF_DYNAMIC_GROUP) == NPF_DYNAMIC_GROUP)
    122 
    123 #define	NPF_DYNAMIC_RULE_P(attr) \
    124     (((attr) & NPF_DYNAMIC_GROUP) == NPF_RULE_DYNAMIC)
    125 
    126 npf_ruleset_t *
    127 npf_ruleset_create(size_t slots)
    128 {
    129 	size_t len = offsetof(npf_ruleset_t, rs_rules[slots]);
    130 	npf_ruleset_t *rlset;
    131 
    132 	rlset = kmem_zalloc(len, KM_SLEEP);
    133 	LIST_INIT(&rlset->rs_dynamic);
    134 	LIST_INIT(&rlset->rs_all);
    135 	LIST_INIT(&rlset->rs_gc);
    136 	rlset->rs_slots = slots;
    137 
    138 	return rlset;
    139 }
    140 
    141 static void
    142 npf_ruleset_unlink(npf_ruleset_t *rlset, npf_rule_t *rl)
    143 {
    144 	if (NPF_DYNAMIC_GROUP_P(rl->r_attr)) {
    145 		LIST_REMOVE(rl, r_dentry);
    146 	}
    147 	if (NPF_DYNAMIC_RULE_P(rl->r_attr)) {
    148 		npf_rule_t *rg = rl->r_parent;
    149 		TAILQ_REMOVE(&rg->r_subset, rl, r_entry);
    150 	}
    151 	LIST_REMOVE(rl, r_aentry);
    152 }
    153 
    154 void
    155 npf_ruleset_destroy(npf_ruleset_t *rlset)
    156 {
    157 	size_t len = offsetof(npf_ruleset_t, rs_rules[rlset->rs_slots]);
    158 	npf_rule_t *rl;
    159 
    160 	while ((rl = LIST_FIRST(&rlset->rs_all)) != NULL) {
    161 		npf_ruleset_unlink(rlset, rl);
    162 		npf_rule_free(rl);
    163 	}
    164 	KASSERT(LIST_EMPTY(&rlset->rs_dynamic));
    165 	KASSERT(LIST_EMPTY(&rlset->rs_gc));
    166 	kmem_free(rlset, len);
    167 }
    168 
    169 /*
    170  * npf_ruleset_insert: insert the rule into the specified ruleset.
    171  */
    172 void
    173 npf_ruleset_insert(npf_ruleset_t *rlset, npf_rule_t *rl)
    174 {
    175 	u_int n = rlset->rs_nitems;
    176 
    177 	KASSERT(n < rlset->rs_slots);
    178 
    179 	LIST_INSERT_HEAD(&rlset->rs_all, rl, r_aentry);
    180 	if (NPF_DYNAMIC_GROUP_P(rl->r_attr)) {
    181 		LIST_INSERT_HEAD(&rlset->rs_dynamic, rl, r_dentry);
    182 	} else {
    183 		KASSERTMSG(rl->r_parent == NULL, "cannot be dynamic rule");
    184 		rl->r_attr &= ~NPF_RULE_DYNAMIC;
    185 	}
    186 
    187 	rlset->rs_rules[n] = rl;
    188 	rlset->rs_nitems++;
    189 
    190 	if (rl->r_skip_to < ++n) {
    191 		rl->r_skip_to = n;
    192 	}
    193 }
    194 
    195 static npf_rule_t *
    196 npf_ruleset_lookup(npf_ruleset_t *rlset, const char *name)
    197 {
    198 	npf_rule_t *rl;
    199 
    200 	KASSERT(npf_config_locked_p());
    201 
    202 	LIST_FOREACH(rl, &rlset->rs_dynamic, r_dentry) {
    203 		KASSERT(NPF_DYNAMIC_GROUP_P(rl->r_attr));
    204 		if (strncmp(rl->r_name, name, NPF_RULE_MAXNAMELEN) == 0)
    205 			break;
    206 	}
    207 	return rl;
    208 }
    209 
    210 int
    211 npf_ruleset_add(npf_ruleset_t *rlset, const char *rname, npf_rule_t *rl)
    212 {
    213 	npf_rule_t *rg, *it;
    214 	pri_t priocmd;
    215 
    216 	rg = npf_ruleset_lookup(rlset, rname);
    217 	if (rg == NULL) {
    218 		return ESRCH;
    219 	}
    220 	if (!NPF_DYNAMIC_RULE_P(rl->r_attr)) {
    221 		return EINVAL;
    222 	}
    223 
    224 	/* Dynamic rule - assign a unique ID and save the parent. */
    225 	rl->r_id = ++rlset->rs_idcnt;
    226 	rl->r_parent = rg;
    227 
    228 	/*
    229 	 * Rule priority: (highest) 1, 2 ... n (lowest).
    230 	 * Negative priority indicates an operation and is reset to zero.
    231 	 */
    232 	if ((priocmd = rl->r_priority) < 0) {
    233 		rl->r_priority = 0;
    234 	}
    235 
    236 	switch (priocmd) {
    237 	case NPF_PRI_FIRST:
    238 		TAILQ_FOREACH(it, &rg->r_subset, r_entry) {
    239 			if (rl->r_priority <= it->r_priority)
    240 				break;
    241 		}
    242 		if (it) {
    243 			TAILQ_INSERT_BEFORE(it, rl, r_entry);
    244 		} else {
    245 			TAILQ_INSERT_HEAD(&rg->r_subset, rl, r_entry);
    246 		}
    247 		break;
    248 	case NPF_PRI_LAST:
    249 	default:
    250 		TAILQ_FOREACH(it, &rg->r_subset, r_entry) {
    251 			if (rl->r_priority < it->r_priority)
    252 				break;
    253 		}
    254 		if (it) {
    255 			TAILQ_INSERT_BEFORE(it, rl, r_entry);
    256 		} else {
    257 			TAILQ_INSERT_TAIL(&rg->r_subset, rl, r_entry);
    258 		}
    259 		break;
    260 	}
    261 
    262 	/* Finally, add into the all-list. */
    263 	LIST_INSERT_HEAD(&rlset->rs_all, rl, r_aentry);
    264 	return 0;
    265 }
    266 
    267 int
    268 npf_ruleset_remove(npf_ruleset_t *rlset, const char *rname, uint64_t id)
    269 {
    270 	npf_rule_t *rg, *rl;
    271 
    272 	if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
    273 		return ESRCH;
    274 	}
    275 	TAILQ_FOREACH(rl, &rg->r_subset, r_entry) {
    276 		KASSERT(rl->r_parent == rg);
    277 
    278 		/* Compare ID.  On match, remove and return. */
    279 		if (rl->r_id == id) {
    280 			npf_ruleset_unlink(rlset, rl);
    281 			LIST_INSERT_HEAD(&rlset->rs_gc, rl, r_aentry);
    282 			return 0;
    283 		}
    284 	}
    285 	return ENOENT;
    286 }
    287 
    288 int
    289 npf_ruleset_remkey(npf_ruleset_t *rlset, const char *rname,
    290     const void *key, size_t len)
    291 {
    292 	npf_rule_t *rg, *rl;
    293 
    294 	KASSERT(len && len <= NPF_RULE_MAXKEYLEN);
    295 
    296 	if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
    297 		return ESRCH;
    298 	}
    299 
    300 	/* Find the last in the list. */
    301 	TAILQ_FOREACH_REVERSE(rl, &rg->r_subset, npf_ruleq, r_entry) {
    302 		KASSERT(rl->r_parent == rg);
    303 
    304 		/* Compare the key.  On match, remove and return. */
    305 		if (memcmp(rl->r_key, key, len) == 0) {
    306 			npf_ruleset_unlink(rlset, rl);
    307 			LIST_INSERT_HEAD(&rlset->rs_gc, rl, r_aentry);
    308 			return 0;
    309 		}
    310 	}
    311 	return ENOENT;
    312 }
    313 
    314 prop_dictionary_t
    315 npf_ruleset_list(npf_ruleset_t *rlset, const char *rname)
    316 {
    317 	prop_dictionary_t rldict;
    318 	prop_array_t rules;
    319 	npf_rule_t *rg, *rl;
    320 
    321 	if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
    322 		return NULL;
    323 	}
    324 	if ((rldict = prop_dictionary_create()) == NULL) {
    325 		return NULL;
    326 	}
    327 	if ((rules = prop_array_create()) == NULL) {
    328 		prop_object_release(rldict);
    329 		return NULL;
    330 	}
    331 
    332 	TAILQ_FOREACH(rl, &rg->r_subset, r_entry) {
    333 		KASSERT(rl->r_parent == rg);
    334 		if (rl->r_dict && !prop_array_add(rules, rl->r_dict)) {
    335 			prop_object_release(rldict);
    336 			prop_object_release(rules);
    337 			return NULL;
    338 		}
    339 	}
    340 
    341 	if (!prop_dictionary_set(rldict, "rules", rules)) {
    342 		prop_object_release(rldict);
    343 		rldict = NULL;
    344 	}
    345 	prop_object_release(rules);
    346 	return rldict;
    347 }
    348 
    349 int
    350 npf_ruleset_flush(npf_ruleset_t *rlset, const char *rname)
    351 {
    352 	npf_rule_t *rg, *rl;
    353 
    354 	if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
    355 		return ESRCH;
    356 	}
    357 	while ((rl = TAILQ_FIRST(&rg->r_subset)) != NULL) {
    358 		KASSERT(rl->r_parent == rg);
    359 		npf_ruleset_unlink(rlset, rl);
    360 		LIST_INSERT_HEAD(&rlset->rs_gc, rl, r_aentry);
    361 	}
    362 	return 0;
    363 }
    364 
    365 void
    366 npf_ruleset_gc(npf_ruleset_t *rlset)
    367 {
    368 	npf_rule_t *rl;
    369 
    370 	while ((rl = LIST_FIRST(&rlset->rs_gc)) != NULL) {
    371 		LIST_REMOVE(rl, r_aentry);
    372 		npf_rule_free(rl);
    373 	}
    374 }
    375 
    376 /*
    377  * npf_ruleset_reload: share the dynamic rules.
    378  *
    379  * => Active ruleset should be exclusively locked.
    380  */
    381 void
    382 npf_ruleset_reload(npf_ruleset_t *rlset, npf_ruleset_t *arlset)
    383 {
    384 	npf_rule_t *rg;
    385 
    386 	KASSERT(npf_config_locked_p());
    387 
    388 	LIST_FOREACH(rg, &rlset->rs_dynamic, r_dentry) {
    389 		npf_rule_t *arg, *rl;
    390 
    391 		if ((arg = npf_ruleset_lookup(arlset, rg->r_name)) == NULL) {
    392 			continue;
    393 		}
    394 
    395 		/*
    396 		 * Copy the list-head structure.  This is necessary because
    397 		 * the rules are still active and therefore accessible for
    398 		 * inspection via the old ruleset.
    399 		 */
    400 		memcpy(&rg->r_subset, &arg->r_subset, sizeof(rg->r_subset));
    401 		TAILQ_FOREACH(rl, &rg->r_subset, r_entry) {
    402 			/*
    403 			 * We can safely migrate to the new all-rule list
    404 			 * and re-set the parent rule, though.
    405 			 */
    406 			LIST_REMOVE(rl, r_aentry);
    407 			LIST_INSERT_HEAD(&rlset->rs_all, rl, r_aentry);
    408 			rl->r_parent = rg;
    409 		}
    410 	}
    411 
    412 	/* Inherit the ID counter. */
    413 	rlset->rs_idcnt = arlset->rs_idcnt;
    414 }
    415 
    416 /*
    417  * npf_ruleset_matchnat: find a matching NAT policy in the ruleset.
    418  */
    419 npf_rule_t *
    420 npf_ruleset_matchnat(npf_ruleset_t *rlset, npf_natpolicy_t *mnp)
    421 {
    422 	npf_rule_t *rl;
    423 
    424 	/* Find a matching NAT policy in the old ruleset. */
    425 	LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
    426 		if (npf_nat_matchpolicy(rl->r_natp, mnp))
    427 			break;
    428 	}
    429 	return rl;
    430 }
    431 
    432 npf_rule_t *
    433 npf_ruleset_sharepm(npf_ruleset_t *rlset, npf_natpolicy_t *mnp)
    434 {
    435 	npf_natpolicy_t *np;
    436 	npf_rule_t *rl;
    437 
    438 	/* Find a matching NAT policy in the old ruleset. */
    439 	LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
    440 		/*
    441 		 * NAT policy might not yet be set during the creation of
    442 		 * the ruleset (in such case, rule is for our policy), or
    443 		 * policies might be equal due to rule exchange on reload.
    444 		 */
    445 		np = rl->r_natp;
    446 		if (np == NULL || np == mnp)
    447 			continue;
    448 		if (npf_nat_sharepm(np, mnp))
    449 			break;
    450 	}
    451 	return rl;
    452 }
    453 
    454 /*
    455  * npf_ruleset_freealg: inspect the ruleset and disassociate specified
    456  * ALG from all NAT entries using it.
    457  */
    458 void
    459 npf_ruleset_freealg(npf_ruleset_t *rlset, npf_alg_t *alg)
    460 {
    461 	npf_rule_t *rl;
    462 	npf_natpolicy_t *np;
    463 
    464 	LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
    465 		if ((np = rl->r_natp) != NULL) {
    466 			npf_nat_freealg(np, alg);
    467 		}
    468 	}
    469 }
    470 
    471 /*
    472  * npf_ruleset_natreload: minimum reload of NAT policies by matching
    473  * two (active and new) NAT rulesets.
    474  */
    475 void
    476 npf_ruleset_natreload(npf_ruleset_t *nrlset, npf_ruleset_t *arlset)
    477 {
    478 	npf_natpolicy_t *np, *anp;
    479 	npf_rule_t *rl, *arl;
    480 
    481 	KASSERT(npf_config_locked_p());
    482 
    483 	/* Scan a new NAT ruleset against NAT policies in old ruleset. */
    484 	LIST_FOREACH(rl, &nrlset->rs_all, r_aentry) {
    485 		np = rl->r_natp;
    486 		arl = npf_ruleset_matchnat(arlset, np);
    487 		if (arl == NULL) {
    488 			continue;
    489 		}
    490 		/* On match - we exchange NAT policies. */
    491 		anp = arl->r_natp;
    492 		rl->r_natp = anp;
    493 		arl->r_natp = np;
    494 		/* Update other NAT policies to share portmap. */
    495 		(void)npf_ruleset_sharepm(nrlset, anp);
    496 	}
    497 }
    498 
    499 /*
    500  * npf_rule_alloc: allocate a rule and initialise it.
    501  */
    502 npf_rule_t *
    503 npf_rule_alloc(prop_dictionary_t rldict)
    504 {
    505 	npf_rule_t *rl;
    506 	const char *rname;
    507 
    508 	/* Allocate a rule structure. */
    509 	rl = kmem_zalloc(sizeof(npf_rule_t), KM_SLEEP);
    510 	TAILQ_INIT(&rl->r_subset);
    511 	rl->r_natp = NULL;
    512 
    513 	/* Name (optional) */
    514 	if (prop_dictionary_get_cstring_nocopy(rldict, "name", &rname)) {
    515 		strlcpy(rl->r_name, rname, NPF_RULE_MAXNAMELEN);
    516 	} else {
    517 		rl->r_name[0] = '\0';
    518 	}
    519 
    520 	/* Attributes, priority and interface ID (optional). */
    521 	prop_dictionary_get_uint32(rldict, "attributes", &rl->r_attr);
    522 	prop_dictionary_get_int32(rldict, "priority", &rl->r_priority);
    523 
    524 	if (prop_dictionary_get_cstring_nocopy(rldict, "interface", &rname)) {
    525 		if ((rl->r_ifid = npf_ifmap_register(rname)) == 0) {
    526 			kmem_free(rl, sizeof(npf_rule_t));
    527 			return NULL;
    528 		}
    529 	} else {
    530 		rl->r_ifid = 0;
    531 	}
    532 
    533 	/* Get the skip-to index.  No need to validate it. */
    534 	prop_dictionary_get_uint32(rldict, "skip-to", &rl->r_skip_to);
    535 
    536 	/* Key (optional). */
    537 	prop_object_t obj = prop_dictionary_get(rldict, "key");
    538 	const void *key = prop_data_data_nocopy(obj);
    539 
    540 	if (key) {
    541 		size_t len = prop_data_size(obj);
    542 		if (len > NPF_RULE_MAXKEYLEN) {
    543 			kmem_free(rl, sizeof(npf_rule_t));
    544 			return NULL;
    545 		}
    546 		memcpy(rl->r_key, key, len);
    547 	}
    548 
    549 	if (NPF_DYNAMIC_RULE_P(rl->r_attr)) {
    550 		rl->r_dict = prop_dictionary_copy(rldict);
    551 	}
    552 
    553 	return rl;
    554 }
    555 
    556 /*
    557  * npf_rule_setcode: assign filter code to the rule.
    558  *
    559  * => The code must be validated by the caller.
    560  * => JIT compilation may be performed here.
    561  */
    562 void
    563 npf_rule_setcode(npf_rule_t *rl, const int type, void *code, size_t size)
    564 {
    565 	KASSERT(type == NPF_CODE_BPF);
    566 
    567 	if ((rl->r_jcode = npf_bpf_compile(code, size)) == NULL) {
    568 		rl->r_code = code;
    569 		rl->r_clen = size;
    570 	} else {
    571 		rl->r_code = NULL;
    572 	}
    573 	rl->r_type = type;
    574 }
    575 
    576 /*
    577  * npf_rule_setrproc: assign a rule procedure and hold a reference on it.
    578  */
    579 void
    580 npf_rule_setrproc(npf_rule_t *rl, npf_rproc_t *rp)
    581 {
    582 	npf_rproc_acquire(rp);
    583 	rl->r_rproc = rp;
    584 }
    585 
    586 /*
    587  * npf_rule_free: free the specified rule.
    588  */
    589 void
    590 npf_rule_free(npf_rule_t *rl)
    591 {
    592 	npf_natpolicy_t *np = rl->r_natp;
    593 	npf_rproc_t *rp = rl->r_rproc;
    594 
    595 	if (np) {
    596 		/* Free NAT policy. */
    597 		npf_nat_freepolicy(np);
    598 	}
    599 	if (rp) {
    600 		/* Release rule procedure. */
    601 		npf_rproc_release(rp);
    602 	}
    603 	if (rl->r_code) {
    604 		/* Free byte-code. */
    605 		kmem_free(rl->r_code, rl->r_clen);
    606 	}
    607 	if (rl->r_jcode) {
    608 		/* Free JIT code. */
    609 		bpf_jit_freecode(rl->r_jcode);
    610 	}
    611 	if (rl->r_dict) {
    612 		/* Destroy the dictionary. */
    613 		prop_object_release(rl->r_dict);
    614 	}
    615 	kmem_free(rl, sizeof(npf_rule_t));
    616 }
    617 
    618 /*
    619  * npf_rule_getid: return the unique ID of a rule.
    620  * npf_rule_getrproc: acquire a reference and return rule procedure, if any.
    621  * npf_rule_getnat: get NAT policy assigned to the rule.
    622  */
    623 
    624 uint64_t
    625 npf_rule_getid(const npf_rule_t *rl)
    626 {
    627 	KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
    628 	return rl->r_id;
    629 }
    630 
    631 npf_rproc_t *
    632 npf_rule_getrproc(const npf_rule_t *rl)
    633 {
    634 	npf_rproc_t *rp = rl->r_rproc;
    635 
    636 	if (rp) {
    637 		npf_rproc_acquire(rp);
    638 	}
    639 	return rp;
    640 }
    641 
    642 npf_natpolicy_t *
    643 npf_rule_getnat(const npf_rule_t *rl)
    644 {
    645 	return rl->r_natp;
    646 }
    647 
    648 /*
    649  * npf_rule_setnat: assign NAT policy to the rule and insert into the
    650  * NAT policy list in the ruleset.
    651  */
    652 void
    653 npf_rule_setnat(npf_rule_t *rl, npf_natpolicy_t *np)
    654 {
    655 
    656 	KASSERT(rl->r_natp == NULL);
    657 	rl->r_natp = np;
    658 }
    659 
    660 /*
    661  * npf_rule_inspect: match the interface, direction and run the filter code.
    662  * Returns true if rule matches and false otherwise.
    663  */
    664 static inline bool
    665 npf_rule_inspect(const npf_rule_t *rl, bpf_args_t *bc_args,
    666     const int di_mask, const u_int ifid)
    667 {
    668 	/* Match the interface. */
    669 	if (rl->r_ifid && rl->r_ifid != ifid) {
    670 		return false;
    671 	}
    672 
    673 	/* Match the direction. */
    674 	if ((rl->r_attr & NPF_RULE_DIMASK) != NPF_RULE_DIMASK) {
    675 		if ((rl->r_attr & di_mask) == 0)
    676 			return false;
    677 	}
    678 
    679 	/* Any code? */
    680 	if (rl->r_jcode == rl->r_code) {
    681 		KASSERT(rl->r_jcode == NULL);
    682 		KASSERT(rl->r_code == NULL);
    683 		return true;
    684 	}
    685 	KASSERT(rl->r_type == NPF_CODE_BPF);
    686 	return npf_bpf_filter(bc_args, rl->r_code, rl->r_jcode) != 0;
    687 }
    688 
    689 /*
    690  * npf_rule_reinspect: re-inspect the dynamic rule by iterating its list.
    691  * This is only for the dynamic rules.  Subrules cannot have nested rules.
    692  */
    693 static npf_rule_t *
    694 npf_rule_reinspect(const npf_rule_t *drl, bpf_args_t *bc_args,
    695     const int di_mask, const u_int ifid)
    696 {
    697 	npf_rule_t *final_rl = NULL, *rl;
    698 
    699 	KASSERT(NPF_DYNAMIC_GROUP_P(drl->r_attr));
    700 
    701 	TAILQ_FOREACH(rl, &drl->r_subset, r_entry) {
    702 		if (!npf_rule_inspect(rl, bc_args, di_mask, ifid)) {
    703 			continue;
    704 		}
    705 		if (rl->r_attr & NPF_RULE_FINAL) {
    706 			return rl;
    707 		}
    708 		final_rl = rl;
    709 	}
    710 	return final_rl;
    711 }
    712 
    713 /*
    714  * npf_ruleset_inspect: inspect the packet against the given ruleset.
    715  *
    716  * Loop through the rules in the set and run the byte-code of each rule
    717  * against the packet (nbuf chain).  If sub-ruleset is found, inspect it.
    718  *
    719  * => Caller is responsible for nbuf chain protection.
    720  */
    721 npf_rule_t *
    722 npf_ruleset_inspect(npf_cache_t *npc, nbuf_t *nbuf,
    723     const npf_ruleset_t *rlset, const int di, const int layer)
    724 {
    725 	const int di_mask = (di & PFIL_IN) ? NPF_RULE_IN : NPF_RULE_OUT;
    726 	const u_int nitems = rlset->rs_nitems;
    727 	const u_int ifid = nbuf->nb_ifid;
    728 	npf_rule_t *final_rl = NULL;
    729 	bpf_args_t bc_args;
    730 	u_int n = 0;
    731 
    732 	memset(&bc_args, 0, sizeof(bpf_args_t));
    733 	bc_args.pkt = nbuf_head_mbuf(nbuf);
    734 	bc_args.wirelen = m_length(bc_args.pkt);
    735 	bc_args.arg = npc;
    736 
    737 	KASSERT(((di & PFIL_IN) != 0) ^ ((di & PFIL_OUT) != 0));
    738 
    739 	while (n < nitems) {
    740 		npf_rule_t *rl = rlset->rs_rules[n];
    741 		const u_int skip_to = rl->r_skip_to;
    742 		const uint32_t attr = rl->r_attr;
    743 
    744 		KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
    745 		KASSERT(!final_rl || rl->r_priority >= final_rl->r_priority);
    746 		KASSERT(n < skip_to);
    747 
    748 		/* Group is a barrier: return a matching if found any. */
    749 		if ((attr & NPF_RULE_GROUP) != 0 && final_rl) {
    750 			break;
    751 		}
    752 
    753 		/* Main inspection of the rule. */
    754 		if (!npf_rule_inspect(rl, &bc_args, di_mask, ifid)) {
    755 			n = skip_to;
    756 			continue;
    757 		}
    758 
    759 		if (NPF_DYNAMIC_GROUP_P(attr)) {
    760 			/*
    761 			 * If this is a dynamic rule, re-inspect the subrules.
    762 			 * If it has any matching rule, then it is final.
    763 			 */
    764 			rl = npf_rule_reinspect(rl, &bc_args, di_mask, ifid);
    765 			if (rl != NULL) {
    766 				final_rl = rl;
    767 				break;
    768 			}
    769 		} else if ((attr & NPF_RULE_GROUP) == 0) {
    770 			/*
    771 			 * Groups themselves are not matching.
    772 			 */
    773 			final_rl = rl;
    774 		}
    775 
    776 		/* Set the matching rule and check for "final". */
    777 		if (attr & NPF_RULE_FINAL) {
    778 			break;
    779 		}
    780 		n++;
    781 	}
    782 
    783 	KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
    784 	return final_rl;
    785 }
    786 
    787 /*
    788  * npf_rule_conclude: return decision and the flags for conclusion.
    789  *
    790  * => Returns ENETUNREACH if "block" and 0 if "pass".
    791  */
    792 int
    793 npf_rule_conclude(const npf_rule_t *rl, int *retfl)
    794 {
    795 	/* If not passing - drop the packet. */
    796 	*retfl = rl->r_attr;
    797 	return (rl->r_attr & NPF_RULE_PASS) ? 0 : ENETUNREACH;
    798 }
    799