npf_ruleset.c revision 1.27 1 /* $NetBSD: npf_ruleset.c,v 1.27 2013/11/15 00:12:45 rmind Exp $ */
2
3 /*-
4 * Copyright (c) 2009-2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This material is based upon work partially supported by The
8 * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * NPF ruleset module.
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: npf_ruleset.c,v 1.27 2013/11/15 00:12:45 rmind Exp $");
38
39 #include <sys/param.h>
40 #include <sys/types.h>
41
42 #include <sys/atomic.h>
43 #include <sys/kmem.h>
44 #include <sys/queue.h>
45 #include <sys/mbuf.h>
46 #include <sys/types.h>
47
48 #include <net/bpf.h>
49 #include <net/bpfjit.h>
50 #include <net/pfil.h>
51 #include <net/if.h>
52
53 #include "npf_impl.h"
54
55 struct npf_ruleset {
56 /*
57 * - List of all rules.
58 * - Dynamic (i.e. named) rules.
59 * - G/C list for convenience.
60 */
61 LIST_HEAD(, npf_rule) rs_all;
62 LIST_HEAD(, npf_rule) rs_dynamic;
63 LIST_HEAD(, npf_rule) rs_gc;
64
65 /* Unique ID counter. */
66 uint64_t rs_idcnt;
67
68 /* Number of array slots and active rules. */
69 u_int rs_slots;
70 u_int rs_nitems;
71
72 /* Array of ordered rules. */
73 npf_rule_t * rs_rules[];
74 };
75
76 struct npf_rule {
77 /* Attributes, interface and skip slot. */
78 uint32_t r_attr;
79 u_int r_ifid;
80 u_int r_skip_to;
81
82 /* Code to process, if any. */
83 int r_type;
84 bpfjit_func_t r_jcode;
85 void * r_code;
86 size_t r_clen;
87
88 /* NAT policy (optional), rule procedure and subset. */
89 npf_natpolicy_t * r_natp;
90 npf_rproc_t * r_rproc;
91
92 /* Rule priority: (highest) 1, 2 ... n (lowest). */
93 pri_t r_priority;
94
95 /*
96 * Dynamic group: subset queue and a dynamic group list entry.
97 * Dynamic rule: entry and the parent rule (the group).
98 */
99 union {
100 TAILQ_HEAD(npf_ruleq, npf_rule) r_subset;
101 TAILQ_ENTRY(npf_rule) r_entry;
102 } /* C11 */;
103 union {
104 LIST_ENTRY(npf_rule) r_dentry;
105 npf_rule_t * r_parent;
106 } /* C11 */;
107
108 /* Rule ID and the original dictionary. */
109 uint64_t r_id;
110 prop_dictionary_t r_dict;
111
112 /* Rule name and all-list entry. */
113 char r_name[NPF_RULE_MAXNAMELEN];
114 LIST_ENTRY(npf_rule) r_aentry;
115
116 /* Key (optional). */
117 uint8_t r_key[NPF_RULE_MAXKEYLEN];
118 };
119
120 #define NPF_DYNAMIC_GROUP_P(attr) \
121 (((attr) & NPF_DYNAMIC_GROUP) == NPF_DYNAMIC_GROUP)
122
123 #define NPF_DYNAMIC_RULE_P(attr) \
124 (((attr) & NPF_DYNAMIC_GROUP) == NPF_RULE_DYNAMIC)
125
126 npf_ruleset_t *
127 npf_ruleset_create(size_t slots)
128 {
129 size_t len = offsetof(npf_ruleset_t, rs_rules[slots]);
130 npf_ruleset_t *rlset;
131
132 rlset = kmem_zalloc(len, KM_SLEEP);
133 LIST_INIT(&rlset->rs_dynamic);
134 LIST_INIT(&rlset->rs_all);
135 LIST_INIT(&rlset->rs_gc);
136 rlset->rs_slots = slots;
137
138 return rlset;
139 }
140
141 static void
142 npf_ruleset_unlink(npf_ruleset_t *rlset, npf_rule_t *rl)
143 {
144 if (NPF_DYNAMIC_GROUP_P(rl->r_attr)) {
145 LIST_REMOVE(rl, r_dentry);
146 }
147 if (NPF_DYNAMIC_RULE_P(rl->r_attr)) {
148 npf_rule_t *rg = rl->r_parent;
149 TAILQ_REMOVE(&rg->r_subset, rl, r_entry);
150 }
151 LIST_REMOVE(rl, r_aentry);
152 }
153
154 void
155 npf_ruleset_destroy(npf_ruleset_t *rlset)
156 {
157 size_t len = offsetof(npf_ruleset_t, rs_rules[rlset->rs_slots]);
158 npf_rule_t *rl;
159
160 while ((rl = LIST_FIRST(&rlset->rs_all)) != NULL) {
161 npf_ruleset_unlink(rlset, rl);
162 npf_rule_free(rl);
163 }
164 KASSERT(LIST_EMPTY(&rlset->rs_dynamic));
165 KASSERT(LIST_EMPTY(&rlset->rs_gc));
166 kmem_free(rlset, len);
167 }
168
169 /*
170 * npf_ruleset_insert: insert the rule into the specified ruleset.
171 */
172 void
173 npf_ruleset_insert(npf_ruleset_t *rlset, npf_rule_t *rl)
174 {
175 u_int n = rlset->rs_nitems;
176
177 KASSERT(n < rlset->rs_slots);
178
179 LIST_INSERT_HEAD(&rlset->rs_all, rl, r_aentry);
180 if (NPF_DYNAMIC_GROUP_P(rl->r_attr)) {
181 LIST_INSERT_HEAD(&rlset->rs_dynamic, rl, r_dentry);
182 } else {
183 KASSERTMSG(rl->r_parent == NULL, "cannot be dynamic rule");
184 rl->r_attr &= ~NPF_RULE_DYNAMIC;
185 }
186
187 rlset->rs_rules[n] = rl;
188 rlset->rs_nitems++;
189
190 if (rl->r_skip_to < ++n) {
191 rl->r_skip_to = n;
192 }
193 }
194
195 static npf_rule_t *
196 npf_ruleset_lookup(npf_ruleset_t *rlset, const char *name)
197 {
198 npf_rule_t *rl;
199
200 KASSERT(npf_config_locked_p());
201
202 LIST_FOREACH(rl, &rlset->rs_dynamic, r_dentry) {
203 KASSERT(NPF_DYNAMIC_GROUP_P(rl->r_attr));
204 if (strncmp(rl->r_name, name, NPF_RULE_MAXNAMELEN) == 0)
205 break;
206 }
207 return rl;
208 }
209
210 int
211 npf_ruleset_add(npf_ruleset_t *rlset, const char *rname, npf_rule_t *rl)
212 {
213 npf_rule_t *rg, *it;
214 pri_t priocmd;
215
216 rg = npf_ruleset_lookup(rlset, rname);
217 if (rg == NULL) {
218 return ESRCH;
219 }
220 if (!NPF_DYNAMIC_RULE_P(rl->r_attr)) {
221 return EINVAL;
222 }
223
224 /* Dynamic rule - assign a unique ID and save the parent. */
225 rl->r_id = ++rlset->rs_idcnt;
226 rl->r_parent = rg;
227
228 /*
229 * Rule priority: (highest) 1, 2 ... n (lowest).
230 * Negative priority indicates an operation and is reset to zero.
231 */
232 if ((priocmd = rl->r_priority) < 0) {
233 rl->r_priority = 0;
234 }
235
236 switch (priocmd) {
237 case NPF_PRI_FIRST:
238 TAILQ_FOREACH(it, &rg->r_subset, r_entry) {
239 if (rl->r_priority <= it->r_priority)
240 break;
241 }
242 if (it) {
243 TAILQ_INSERT_BEFORE(it, rl, r_entry);
244 } else {
245 TAILQ_INSERT_HEAD(&rg->r_subset, rl, r_entry);
246 }
247 break;
248 case NPF_PRI_LAST:
249 default:
250 TAILQ_FOREACH(it, &rg->r_subset, r_entry) {
251 if (rl->r_priority < it->r_priority)
252 break;
253 }
254 if (it) {
255 TAILQ_INSERT_BEFORE(it, rl, r_entry);
256 } else {
257 TAILQ_INSERT_TAIL(&rg->r_subset, rl, r_entry);
258 }
259 break;
260 }
261
262 /* Finally, add into the all-list. */
263 LIST_INSERT_HEAD(&rlset->rs_all, rl, r_aentry);
264 return 0;
265 }
266
267 int
268 npf_ruleset_remove(npf_ruleset_t *rlset, const char *rname, uint64_t id)
269 {
270 npf_rule_t *rg, *rl;
271
272 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
273 return ESRCH;
274 }
275 TAILQ_FOREACH(rl, &rg->r_subset, r_entry) {
276 KASSERT(rl->r_parent == rg);
277
278 /* Compare ID. On match, remove and return. */
279 if (rl->r_id == id) {
280 npf_ruleset_unlink(rlset, rl);
281 LIST_INSERT_HEAD(&rlset->rs_gc, rl, r_aentry);
282 return 0;
283 }
284 }
285 return ENOENT;
286 }
287
288 int
289 npf_ruleset_remkey(npf_ruleset_t *rlset, const char *rname,
290 const void *key, size_t len)
291 {
292 npf_rule_t *rg, *rl;
293
294 KASSERT(len && len <= NPF_RULE_MAXKEYLEN);
295
296 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
297 return ESRCH;
298 }
299
300 /* Find the last in the list. */
301 TAILQ_FOREACH_REVERSE(rl, &rg->r_subset, npf_ruleq, r_entry) {
302 KASSERT(rl->r_parent == rg);
303
304 /* Compare the key. On match, remove and return. */
305 if (memcmp(rl->r_key, key, len) == 0) {
306 npf_ruleset_unlink(rlset, rl);
307 LIST_INSERT_HEAD(&rlset->rs_gc, rl, r_aentry);
308 return 0;
309 }
310 }
311 return ENOENT;
312 }
313
314 prop_dictionary_t
315 npf_ruleset_list(npf_ruleset_t *rlset, const char *rname)
316 {
317 prop_dictionary_t rldict;
318 prop_array_t rules;
319 npf_rule_t *rg, *rl;
320
321 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
322 return NULL;
323 }
324 if ((rldict = prop_dictionary_create()) == NULL) {
325 return NULL;
326 }
327 if ((rules = prop_array_create()) == NULL) {
328 prop_object_release(rldict);
329 return NULL;
330 }
331
332 TAILQ_FOREACH(rl, &rg->r_subset, r_entry) {
333 KASSERT(rl->r_parent == rg);
334 if (rl->r_dict && !prop_array_add(rules, rl->r_dict)) {
335 prop_object_release(rldict);
336 prop_object_release(rules);
337 return NULL;
338 }
339 }
340
341 if (!prop_dictionary_set(rldict, "rules", rules)) {
342 prop_object_release(rldict);
343 rldict = NULL;
344 }
345 prop_object_release(rules);
346 return rldict;
347 }
348
349 int
350 npf_ruleset_flush(npf_ruleset_t *rlset, const char *rname)
351 {
352 npf_rule_t *rg, *rl;
353
354 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
355 return ESRCH;
356 }
357 while ((rl = TAILQ_FIRST(&rg->r_subset)) != NULL) {
358 KASSERT(rl->r_parent == rg);
359 npf_ruleset_unlink(rlset, rl);
360 LIST_INSERT_HEAD(&rlset->rs_gc, rl, r_aentry);
361 }
362 return 0;
363 }
364
365 void
366 npf_ruleset_gc(npf_ruleset_t *rlset)
367 {
368 npf_rule_t *rl;
369
370 while ((rl = LIST_FIRST(&rlset->rs_gc)) != NULL) {
371 LIST_REMOVE(rl, r_aentry);
372 npf_rule_free(rl);
373 }
374 }
375
376 /*
377 * npf_ruleset_reload: share the dynamic rules.
378 *
379 * => Active ruleset should be exclusively locked.
380 */
381 void
382 npf_ruleset_reload(npf_ruleset_t *rlset, npf_ruleset_t *arlset)
383 {
384 npf_rule_t *rg;
385
386 KASSERT(npf_config_locked_p());
387
388 LIST_FOREACH(rg, &rlset->rs_dynamic, r_dentry) {
389 npf_rule_t *arg, *rl;
390
391 if ((arg = npf_ruleset_lookup(arlset, rg->r_name)) == NULL) {
392 continue;
393 }
394
395 /*
396 * Copy the list-head structure. This is necessary because
397 * the rules are still active and therefore accessible for
398 * inspection via the old ruleset.
399 */
400 memcpy(&rg->r_subset, &arg->r_subset, sizeof(rg->r_subset));
401 TAILQ_FOREACH(rl, &rg->r_subset, r_entry) {
402 /*
403 * We can safely migrate to the new all-rule list
404 * and re-set the parent rule, though.
405 */
406 LIST_REMOVE(rl, r_aentry);
407 LIST_INSERT_HEAD(&rlset->rs_all, rl, r_aentry);
408 rl->r_parent = rg;
409 }
410 }
411
412 /* Inherit the ID counter. */
413 rlset->rs_idcnt = arlset->rs_idcnt;
414 }
415
416 /*
417 * npf_ruleset_matchnat: find a matching NAT policy in the ruleset.
418 */
419 npf_rule_t *
420 npf_ruleset_matchnat(npf_ruleset_t *rlset, npf_natpolicy_t *mnp)
421 {
422 npf_rule_t *rl;
423
424 /* Find a matching NAT policy in the old ruleset. */
425 LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
426 if (npf_nat_matchpolicy(rl->r_natp, mnp))
427 break;
428 }
429 return rl;
430 }
431
432 npf_rule_t *
433 npf_ruleset_sharepm(npf_ruleset_t *rlset, npf_natpolicy_t *mnp)
434 {
435 npf_natpolicy_t *np;
436 npf_rule_t *rl;
437
438 /* Find a matching NAT policy in the old ruleset. */
439 LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
440 /*
441 * NAT policy might not yet be set during the creation of
442 * the ruleset (in such case, rule is for our policy), or
443 * policies might be equal due to rule exchange on reload.
444 */
445 np = rl->r_natp;
446 if (np == NULL || np == mnp)
447 continue;
448 if (npf_nat_sharepm(np, mnp))
449 break;
450 }
451 return rl;
452 }
453
454 /*
455 * npf_ruleset_freealg: inspect the ruleset and disassociate specified
456 * ALG from all NAT entries using it.
457 */
458 void
459 npf_ruleset_freealg(npf_ruleset_t *rlset, npf_alg_t *alg)
460 {
461 npf_rule_t *rl;
462 npf_natpolicy_t *np;
463
464 LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
465 if ((np = rl->r_natp) != NULL) {
466 npf_nat_freealg(np, alg);
467 }
468 }
469 }
470
471 /*
472 * npf_ruleset_natreload: minimum reload of NAT policies by maching
473 * two (active and new) NAT rulesets.
474 *
475 * => Active ruleset should be exclusively locked.
476 */
477 void
478 npf_ruleset_natreload(npf_ruleset_t *nrlset, npf_ruleset_t *arlset)
479 {
480 npf_natpolicy_t *np, *anp;
481 npf_rule_t *rl, *arl;
482
483 /* Scan a new NAT ruleset against NAT policies in old ruleset. */
484 LIST_FOREACH(rl, &nrlset->rs_all, r_aentry) {
485 np = rl->r_natp;
486 arl = npf_ruleset_matchnat(arlset, np);
487 if (arl == NULL) {
488 continue;
489 }
490 /* On match - we exchange NAT policies. */
491 anp = arl->r_natp;
492 rl->r_natp = anp;
493 arl->r_natp = np;
494 /* Update other NAT policies to share portmap. */
495 (void)npf_ruleset_sharepm(nrlset, anp);
496 }
497 }
498
499 /*
500 * npf_rule_alloc: allocate a rule and initialise it.
501 */
502 npf_rule_t *
503 npf_rule_alloc(prop_dictionary_t rldict)
504 {
505 npf_rule_t *rl;
506 const char *rname;
507
508 /* Allocate a rule structure. */
509 rl = kmem_zalloc(sizeof(npf_rule_t), KM_SLEEP);
510 TAILQ_INIT(&rl->r_subset);
511 rl->r_natp = NULL;
512
513 /* Name (optional) */
514 if (prop_dictionary_get_cstring_nocopy(rldict, "name", &rname)) {
515 strlcpy(rl->r_name, rname, NPF_RULE_MAXNAMELEN);
516 } else {
517 rl->r_name[0] = '\0';
518 }
519
520 /* Attributes, priority and interface ID (optional). */
521 prop_dictionary_get_uint32(rldict, "attributes", &rl->r_attr);
522 prop_dictionary_get_int32(rldict, "priority", &rl->r_priority);
523
524 if (prop_dictionary_get_cstring_nocopy(rldict, "interface", &rname)) {
525 if ((rl->r_ifid = npf_ifmap_register(rname)) == 0) {
526 kmem_free(rl, sizeof(npf_rule_t));
527 return NULL;
528 }
529 } else {
530 rl->r_ifid = 0;
531 }
532
533 /* Get the skip-to index. No need to validate it. */
534 prop_dictionary_get_uint32(rldict, "skip-to", &rl->r_skip_to);
535
536 /* Key (optional). */
537 prop_object_t obj = prop_dictionary_get(rldict, "key");
538 const void *key = prop_data_data_nocopy(obj);
539
540 if (key) {
541 size_t len = prop_data_size(obj);
542 if (len > NPF_RULE_MAXKEYLEN) {
543 kmem_free(rl, sizeof(npf_rule_t));
544 return NULL;
545 }
546 memcpy(rl->r_key, key, len);
547 }
548
549 if (NPF_DYNAMIC_RULE_P(rl->r_attr)) {
550 rl->r_dict = prop_dictionary_copy(rldict);
551 }
552
553 return rl;
554 }
555
556 /*
557 * npf_rule_setcode: assign filter code to the rule.
558 *
559 * => The code must be validated by the caller.
560 * => JIT compilation may be performed here.
561 */
562 void
563 npf_rule_setcode(npf_rule_t *rl, const int type, void *code, size_t size)
564 {
565 KASSERT(type == NPF_CODE_BPF);
566 rl->r_type = type;
567 rl->r_code = code;
568 rl->r_clen = size;
569 #if 0
570 /* Perform BPF JIT if possible. */
571 if (membar_consumer(), bpfjit_module_ops.bj_generate_code != NULL) {
572 KASSERT(rl->r_jcode == NULL);
573 rl->r_jcode = bpfjit_module_ops.bj_generate_code(code, size);
574 rl->r_code = NULL;
575 }
576 #endif
577 }
578
579 /*
580 * npf_rule_setrproc: assign a rule procedure and hold a reference on it.
581 */
582 void
583 npf_rule_setrproc(npf_rule_t *rl, npf_rproc_t *rp)
584 {
585 npf_rproc_acquire(rp);
586 rl->r_rproc = rp;
587 }
588
589 /*
590 * npf_rule_free: free the specified rule.
591 */
592 void
593 npf_rule_free(npf_rule_t *rl)
594 {
595 npf_natpolicy_t *np = rl->r_natp;
596 npf_rproc_t *rp = rl->r_rproc;
597
598 if (np) {
599 /* Free NAT policy. */
600 npf_nat_freepolicy(np);
601 }
602 if (rp) {
603 /* Release rule procedure. */
604 npf_rproc_release(rp);
605 }
606 if (rl->r_code) {
607 /* Free byte-code. */
608 kmem_free(rl->r_code, rl->r_clen);
609 }
610 if (rl->r_jcode) {
611 /* Free JIT code. */
612 KASSERT(bpfjit_module_ops.bj_free_code != NULL);
613 bpfjit_module_ops.bj_free_code(rl->r_jcode);
614 }
615 if (rl->r_dict) {
616 /* Destroy the dictionary. */
617 prop_object_release(rl->r_dict);
618 }
619 kmem_free(rl, sizeof(npf_rule_t));
620 }
621
622 /*
623 * npf_rule_getid: return the unique ID of a rule.
624 * npf_rule_getrproc: acquire a reference and return rule procedure, if any.
625 * npf_rule_getnat: get NAT policy assigned to the rule.
626 */
627
628 uint64_t
629 npf_rule_getid(const npf_rule_t *rl)
630 {
631 KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
632 return rl->r_id;
633 }
634
635 npf_rproc_t *
636 npf_rule_getrproc(npf_rule_t *rl)
637 {
638 npf_rproc_t *rp = rl->r_rproc;
639
640 if (rp) {
641 npf_rproc_acquire(rp);
642 }
643 return rp;
644 }
645
646 npf_natpolicy_t *
647 npf_rule_getnat(const npf_rule_t *rl)
648 {
649 return rl->r_natp;
650 }
651
652 /*
653 * npf_rule_setnat: assign NAT policy to the rule and insert into the
654 * NAT policy list in the ruleset.
655 */
656 void
657 npf_rule_setnat(npf_rule_t *rl, npf_natpolicy_t *np)
658 {
659
660 KASSERT(rl->r_natp == NULL);
661 rl->r_natp = np;
662 }
663
664 /*
665 * npf_rule_inspect: match the interface, direction and run the filter code.
666 * Returns true if rule matches, false otherise.
667 */
668 static inline bool
669 npf_rule_inspect(npf_cache_t *npc, nbuf_t *nbuf, const npf_rule_t *rl,
670 const int di_mask, const int layer)
671 {
672 /* Match the interface. */
673 if (rl->r_ifid && rl->r_ifid != nbuf->nb_ifid) {
674 return false;
675 }
676
677 /* Match the direction. */
678 if ((rl->r_attr & NPF_RULE_DIMASK) != NPF_RULE_DIMASK) {
679 if ((rl->r_attr & di_mask) == 0)
680 return false;
681 }
682
683 /* Any code? */
684 if (rl->r_jcode == rl->r_code) {
685 KASSERT(rl->r_jcode == NULL);
686 KASSERT(rl->r_code == NULL);
687 return true;
688 }
689 KASSERT(rl->r_type == NPF_CODE_BPF);
690 return npf_bpf_filter(npc, nbuf, rl->r_code, rl->r_jcode) != 0;
691 }
692
693 /*
694 * npf_rule_reinspect: re-inspect the dynamic rule by iterating its list.
695 * This is only for the dynamic rules. Subrules cannot have nested rules.
696 */
697 static npf_rule_t *
698 npf_rule_reinspect(npf_cache_t *npc, nbuf_t *nbuf, const npf_rule_t *drl,
699 const int di_mask, const int layer)
700 {
701 npf_rule_t *final_rl = NULL, *rl;
702
703 KASSERT(NPF_DYNAMIC_GROUP_P(drl->r_attr));
704
705 TAILQ_FOREACH(rl, &drl->r_subset, r_entry) {
706 if (!npf_rule_inspect(npc, nbuf, rl, di_mask, layer)) {
707 continue;
708 }
709 if (rl->r_attr & NPF_RULE_FINAL) {
710 return rl;
711 }
712 final_rl = rl;
713 }
714 return final_rl;
715 }
716
717 /*
718 * npf_ruleset_inspect: inspect the packet against the given ruleset.
719 *
720 * Loop through the rules in the set and run the byte-code of each rule
721 * against the packet (nbuf chain). If sub-ruleset is found, inspect it.
722 *
723 * => Caller is responsible for nbuf chain protection.
724 */
725 npf_rule_t *
726 npf_ruleset_inspect(npf_cache_t *npc, nbuf_t *nbuf,
727 const npf_ruleset_t *rlset, const int di, const int layer)
728 {
729 const int di_mask = (di & PFIL_IN) ? NPF_RULE_IN : NPF_RULE_OUT;
730 const u_int nitems = rlset->rs_nitems;
731 npf_rule_t *final_rl = NULL;
732 u_int n = 0;
733
734 KASSERT(((di & PFIL_IN) != 0) ^ ((di & PFIL_OUT) != 0));
735
736 while (n < nitems) {
737 npf_rule_t *rl = rlset->rs_rules[n];
738 const u_int skip_to = rl->r_skip_to;
739 const uint32_t attr = rl->r_attr;
740
741 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
742 KASSERT(!final_rl || rl->r_priority >= final_rl->r_priority);
743 KASSERT(n < skip_to);
744
745 /* Group is a barrier: return a matching if found any. */
746 if ((attr & NPF_RULE_GROUP) != 0 && final_rl) {
747 break;
748 }
749
750 /* Main inspection of the rule. */
751 if (!npf_rule_inspect(npc, nbuf, rl, di_mask, layer)) {
752 n = skip_to;
753 continue;
754 }
755
756 if (NPF_DYNAMIC_GROUP_P(attr)) {
757 /*
758 * If this is a dynamic rule, re-inspect the subrules.
759 * If it has any matching rule, then it is final.
760 */
761 rl = npf_rule_reinspect(npc, nbuf, rl, di_mask, layer);
762 if (rl != NULL) {
763 final_rl = rl;
764 break;
765 }
766 } else if ((attr & NPF_RULE_GROUP) == 0) {
767 /*
768 * Groups themselves are not matching.
769 */
770 final_rl = rl;
771 }
772
773 /* Set the matching rule and check for "final". */
774 if (attr & NPF_RULE_FINAL) {
775 break;
776 }
777 n++;
778 }
779
780 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
781 return final_rl;
782 }
783
784 /*
785 * npf_rule_conclude: return decision and the flags for conclusion.
786 *
787 * => Returns ENETUNREACH if "block" and 0 if "pass".
788 */
789 int
790 npf_rule_conclude(const npf_rule_t *rl, int *retfl)
791 {
792 /* If not passing - drop the packet. */
793 *retfl = rl->r_attr;
794 return (rl->r_attr & NPF_RULE_PASS) ? 0 : ENETUNREACH;
795 }
796