npf_ruleset.c revision 1.18 1 /* $NetBSD: npf_ruleset.c,v 1.18 2013/02/10 23:47:37 rmind Exp $ */
2
3 /*-
4 * Copyright (c) 2009-2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This material is based upon work partially supported by The
8 * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * NPF ruleset module.
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: npf_ruleset.c,v 1.18 2013/02/10 23:47:37 rmind Exp $");
38
39 #include <sys/param.h>
40 #include <sys/types.h>
41
42 #include <sys/kmem.h>
43 #include <sys/queue.h>
44 #include <sys/mbuf.h>
45 #include <sys/types.h>
46
47 #include <net/bpf.h>
48 #include <net/pfil.h>
49 #include <net/if.h>
50
51 #include "npf_ncode.h"
52 #include "npf_impl.h"
53
54 struct npf_ruleset {
55 /*
56 * - List of all rules.
57 * - Dynamic (i.e. named) rules.
58 * - G/C list for convenience.
59 */
60 LIST_HEAD(, npf_rule) rs_all;
61 LIST_HEAD(, npf_rule) rs_dynamic;
62 LIST_HEAD(, npf_rule) rs_gc;
63
64 /* Number of array slots and active rules. */
65 u_int rs_slots;
66 u_int rs_nitems;
67
68 /* Array of ordered rules. */
69 npf_rule_t * rs_rules[];
70 };
71
72 struct npf_rule {
73 /* Attributes, interface and skip slot. */
74 uint32_t r_attr;
75 u_int r_ifid;
76 u_int r_skip_to;
77
78 /* Code to process, if any. */
79 int r_type;
80 void * r_code;
81 size_t r_clen;
82
83 /* NAT policy (optional), rule procedure and subset. */
84 npf_natpolicy_t * r_natp;
85 npf_rproc_t * r_rproc;
86
87 /* Rule priority: (highest) 1, 2 ... n (lowest). */
88 pri_t r_priority;
89
90 /*
91 * Dynamic group: subset queue and a dynamic group list entry.
92 * Dynamic rule: entry and the parent rule (the group).
93 */
94 union {
95 TAILQ_HEAD(npf_ruleq, npf_rule) r_subset;
96 TAILQ_ENTRY(npf_rule) r_entry;
97 } /* C11 */;
98 union {
99 LIST_ENTRY(npf_rule) r_dentry;
100 npf_rule_t * r_parent;
101 } /* C11 */;
102
103 /* Dictionary. */
104 prop_dictionary_t r_dict;
105
106 /* Rule name and all-list entry. */
107 char r_name[NPF_RULE_MAXNAMELEN];
108 LIST_ENTRY(npf_rule) r_aentry;
109
110 /* Key (optional). */
111 uint8_t r_key[NPF_RULE_MAXKEYLEN];
112 };
113
114 #define NPF_DYNAMIC_GROUP_P(attr) \
115 (((attr) & NPF_DYNAMIC_GROUP) == NPF_DYNAMIC_GROUP)
116
117 npf_ruleset_t *
118 npf_ruleset_create(size_t slots)
119 {
120 size_t len = offsetof(npf_ruleset_t, rs_rules[slots]);
121 npf_ruleset_t *rlset;
122
123 rlset = kmem_zalloc(len, KM_SLEEP);
124 rlset->rs_slots = slots;
125 LIST_INIT(&rlset->rs_dynamic);
126 LIST_INIT(&rlset->rs_all);
127 return rlset;
128 }
129
130 static void
131 npf_ruleset_unlink(npf_ruleset_t *rlset, npf_rule_t *rl)
132 {
133 if (NPF_DYNAMIC_GROUP_P(rl->r_attr)) {
134 LIST_REMOVE(rl, r_dentry);
135 }
136 if ((rl->r_attr & NPF_DYNAMIC_GROUP) == NPF_RULE_DYNAMIC) {
137 npf_rule_t *rg = rl->r_parent;
138 TAILQ_REMOVE(&rg->r_subset, rl, r_entry);
139 }
140 LIST_REMOVE(rl, r_aentry);
141 }
142
143 void
144 npf_ruleset_destroy(npf_ruleset_t *rlset)
145 {
146 size_t len = offsetof(npf_ruleset_t, rs_rules[rlset->rs_slots]);
147 npf_rule_t *rl;
148
149 while ((rl = LIST_FIRST(&rlset->rs_all)) != NULL) {
150 npf_ruleset_unlink(rlset, rl);
151 npf_rule_free(rl);
152 }
153 KASSERT(LIST_EMPTY(&rlset->rs_dynamic));
154 KASSERT(LIST_EMPTY(&rlset->rs_gc));
155 kmem_free(rlset, len);
156 }
157
158 /*
159 * npf_ruleset_insert: insert the rule into the specified ruleset.
160 */
161 void
162 npf_ruleset_insert(npf_ruleset_t *rlset, npf_rule_t *rl)
163 {
164 u_int n = rlset->rs_nitems;
165
166 KASSERT(n < rlset->rs_slots);
167
168 LIST_INSERT_HEAD(&rlset->rs_all, rl, r_aentry);
169 if (NPF_DYNAMIC_GROUP_P(rl->r_attr)) {
170 LIST_INSERT_HEAD(&rlset->rs_dynamic, rl, r_dentry);
171 }
172
173 rlset->rs_rules[n] = rl;
174 rlset->rs_nitems++;
175
176 if (rl->r_skip_to < ++n) {
177 rl->r_skip_to = n;
178 }
179 }
180
181 static npf_rule_t *
182 npf_ruleset_lookup(npf_ruleset_t *rlset, const char *name)
183 {
184 npf_rule_t *rl;
185
186 KASSERT(npf_config_locked_p());
187
188 LIST_FOREACH(rl, &rlset->rs_dynamic, r_dentry) {
189 KASSERT(NPF_DYNAMIC_GROUP_P(rl->r_attr));
190 if (strncmp(rl->r_name, name, NPF_RULE_MAXNAMELEN) == 0)
191 break;
192 }
193 return rl;
194 }
195
196 int
197 npf_ruleset_add(npf_ruleset_t *rlset, const char *rname, npf_rule_t *rl)
198 {
199 npf_rule_t *rg, *it;
200 pri_t priocmd;
201
202 rg = npf_ruleset_lookup(rlset, rname);
203 if (rg == NULL) {
204 return ENOENT;
205 }
206
207 /* Dynamic rule. */
208 rl->r_attr |= NPF_RULE_DYNAMIC;
209 rl->r_parent = rg;
210
211 /*
212 * Rule priority: (highest) 1, 2 ... n (lowest).
213 * Negative priority indicates an operation and is reset to zero.
214 */
215 if ((priocmd = rl->r_priority) < 0) {
216 rl->r_priority = 0;
217 }
218
219 switch (priocmd) {
220 case NPF_PRI_FIRST:
221 TAILQ_FOREACH(it, &rg->r_subset, r_entry) {
222 if (rl->r_priority <= it->r_priority)
223 break;
224 }
225 if (it) {
226 TAILQ_INSERT_BEFORE(it, rl, r_entry);
227 } else {
228 TAILQ_INSERT_HEAD(&rg->r_subset, rl, r_entry);
229 }
230 break;
231 case NPF_PRI_LAST:
232 default:
233 TAILQ_FOREACH(it, &rg->r_subset, r_entry) {
234 if (rl->r_priority < it->r_priority)
235 break;
236 }
237 if (it) {
238 TAILQ_INSERT_BEFORE(it, rl, r_entry);
239 } else {
240 TAILQ_INSERT_TAIL(&rg->r_subset, rl, r_entry);
241 }
242 break;
243 }
244
245 /* Finally, add into the all-list. */
246 LIST_INSERT_HEAD(&rlset->rs_all, rl, r_aentry);
247 return 0;
248 }
249
250 int
251 npf_ruleset_remove(npf_ruleset_t *rlset, const char *rname, uintptr_t id)
252 {
253 npf_rule_t *rg, *rl;
254
255 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
256 return ENOENT;
257 }
258 TAILQ_FOREACH(rl, &rg->r_subset, r_entry) {
259 /* Compare ID. On match, remove and return. */
260 if ((uintptr_t)rl == id) {
261 npf_ruleset_unlink(rlset, rl);
262 LIST_INSERT_HEAD(&rlset->rs_gc, rl, r_aentry);
263 break;
264 }
265 }
266 return 0;
267 }
268
269 int
270 npf_ruleset_remkey(npf_ruleset_t *rlset, const char *rname,
271 const void *key, size_t len)
272 {
273 npf_rule_t *rg, *rl;
274
275 KASSERT(len && len <= NPF_RULE_MAXKEYLEN);
276
277 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
278 return ENOENT;
279 }
280
281 /* Find the last in the list. */
282 TAILQ_FOREACH_REVERSE(rl, &rg->r_subset, npf_ruleq, r_entry) {
283 /* Compare the key. On match, remove and return. */
284 if (memcmp(rl->r_key, key, len) == 0) {
285 npf_ruleset_unlink(rlset, rl);
286 LIST_INSERT_HEAD(&rlset->rs_gc, rl, r_aentry);
287 break;
288 }
289 }
290 return 0;
291 }
292
293 prop_dictionary_t
294 npf_ruleset_list(npf_ruleset_t *rlset, const char *rname)
295 {
296 prop_dictionary_t rldict;
297 prop_array_t rules;
298 npf_rule_t *rg, *rl;
299
300 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
301 return NULL;
302 }
303 if ((rldict = prop_dictionary_create()) == NULL) {
304 return NULL;
305 }
306 if ((rules = prop_array_create()) == NULL) {
307 prop_object_release(rldict);
308 return NULL;
309 }
310
311 TAILQ_FOREACH(rl, &rg->r_subset, r_entry) {
312 if (rl->r_dict && !prop_array_add(rules, rl->r_dict)) {
313 prop_object_release(rldict);
314 return NULL;
315 }
316 }
317 if (!prop_dictionary_set(rldict, "rules", rules)) {
318 prop_object_release(rldict);
319 rldict = NULL;
320 }
321 prop_object_release(rules);
322 return rldict;
323 }
324
325 int
326 npf_ruleset_flush(npf_ruleset_t *rlset, const char *rname)
327 {
328 npf_rule_t *rg, *rl;
329
330 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
331 return ENOENT;
332 }
333 while ((rl = TAILQ_FIRST(&rg->r_subset)) != NULL) {
334 npf_ruleset_unlink(rlset, rl);
335 LIST_INSERT_HEAD(&rlset->rs_gc, rl, r_aentry);
336 }
337 return 0;
338 }
339
340 void
341 npf_ruleset_gc(npf_ruleset_t *rlset)
342 {
343 npf_rule_t *rl;
344
345 while ((rl = LIST_FIRST(&rlset->rs_gc)) != NULL) {
346 LIST_REMOVE(rl, r_aentry);
347 npf_rule_free(rl);
348 }
349 }
350
351 /*
352 * npf_ruleset_reload: share the dynamic rules.
353 *
354 * => Active ruleset should be exclusively locked.
355 */
356 void
357 npf_ruleset_reload(npf_ruleset_t *rlset, npf_ruleset_t *arlset)
358 {
359 npf_rule_t *rl;
360
361 KASSERT(npf_config_locked_p());
362
363 LIST_FOREACH(rl, &rlset->rs_dynamic, r_dentry) {
364 npf_rule_t *arl, *it;
365
366 if ((arl = npf_ruleset_lookup(arlset, rl->r_name)) == NULL) {
367 continue;
368 }
369
370 /*
371 * Copy the list-head structure and move the rules from the
372 * old ruleset to the new by reinserting to a new all-rules
373 * list. Note that the rules are still active and therefore
374 * accessible for inspection via the old ruleset.
375 */
376 memcpy(&rl->r_subset, &arl->r_subset, sizeof(rl->r_subset));
377 TAILQ_FOREACH(it, &rl->r_subset, r_entry) {
378 LIST_REMOVE(rl, r_aentry);
379 LIST_INSERT_HEAD(&rlset->rs_all, rl, r_aentry);
380 }
381 }
382 }
383
384 /*
385 * npf_ruleset_matchnat: find a matching NAT policy in the ruleset.
386 */
387 npf_rule_t *
388 npf_ruleset_matchnat(npf_ruleset_t *rlset, npf_natpolicy_t *mnp)
389 {
390 npf_rule_t *rl;
391
392 /* Find a matching NAT policy in the old ruleset. */
393 LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
394 if (npf_nat_matchpolicy(rl->r_natp, mnp))
395 break;
396 }
397 return rl;
398 }
399
400 npf_rule_t *
401 npf_ruleset_sharepm(npf_ruleset_t *rlset, npf_natpolicy_t *mnp)
402 {
403 npf_natpolicy_t *np;
404 npf_rule_t *rl;
405
406 /* Find a matching NAT policy in the old ruleset. */
407 LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
408 /*
409 * NAT policy might not yet be set during the creation of
410 * the ruleset (in such case, rule is for our policy), or
411 * policies might be equal due to rule exchange on reload.
412 */
413 np = rl->r_natp;
414 if (np == NULL || np == mnp)
415 continue;
416 if (npf_nat_sharepm(np, mnp))
417 break;
418 }
419 return rl;
420 }
421
422 /*
423 * npf_ruleset_freealg: inspect the ruleset and disassociate specified
424 * ALG from all NAT entries using it.
425 */
426 void
427 npf_ruleset_freealg(npf_ruleset_t *rlset, npf_alg_t *alg)
428 {
429 npf_rule_t *rl;
430 npf_natpolicy_t *np;
431
432 LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
433 if ((np = rl->r_natp) != NULL) {
434 npf_nat_freealg(np, alg);
435 }
436 }
437 }
438
439 /*
440 * npf_ruleset_natreload: minimum reload of NAT policies by maching
441 * two (active and new) NAT rulesets.
442 *
443 * => Active ruleset should be exclusively locked.
444 */
445 void
446 npf_ruleset_natreload(npf_ruleset_t *nrlset, npf_ruleset_t *arlset)
447 {
448 npf_natpolicy_t *np, *anp;
449 npf_rule_t *rl, *arl;
450
451 /* Scan a new NAT ruleset against NAT policies in old ruleset. */
452 LIST_FOREACH(rl, &nrlset->rs_all, r_aentry) {
453 np = rl->r_natp;
454 arl = npf_ruleset_matchnat(arlset, np);
455 if (arl == NULL) {
456 continue;
457 }
458 /* On match - we exchange NAT policies. */
459 anp = arl->r_natp;
460 rl->r_natp = anp;
461 arl->r_natp = np;
462 /* Update other NAT policies to share portmap. */
463 (void)npf_ruleset_sharepm(nrlset, anp);
464 }
465 }
466
467 /*
468 * npf_rule_alloc: allocate a rule and copy n-code from user-space.
469 */
470 npf_rule_t *
471 npf_rule_alloc(prop_dictionary_t rldict)
472 {
473 npf_rule_t *rl;
474 const char *rname;
475
476 /* Allocate a rule structure. */
477 rl = kmem_zalloc(sizeof(npf_rule_t), KM_SLEEP);
478 TAILQ_INIT(&rl->r_subset);
479 rl->r_natp = NULL;
480
481 /* Name (optional) */
482 if (prop_dictionary_get_cstring_nocopy(rldict, "name", &rname)) {
483 strlcpy(rl->r_name, rname, NPF_RULE_MAXNAMELEN);
484 } else {
485 rl->r_name[0] = '\0';
486 }
487
488 /* Attributes, priority and interface ID (optional). */
489 prop_dictionary_get_uint32(rldict, "attributes", &rl->r_attr);
490 prop_dictionary_get_int32(rldict, "priority", &rl->r_priority);
491 prop_dictionary_get_uint32(rldict, "interface", &rl->r_ifid);
492
493 /* Get the skip-to index. No need to validate it. */
494 prop_dictionary_get_uint32(rldict, "skip-to", &rl->r_skip_to);
495
496 /* Key (optional). */
497 prop_object_t obj = prop_dictionary_get(rldict, "key");
498 const void *key = prop_data_data_nocopy(obj);
499
500 if (key) {
501 size_t len = prop_data_size(obj);
502 if (len > NPF_RULE_MAXKEYLEN) {
503 kmem_free(rl, sizeof(npf_rule_t));
504 return NULL;
505 }
506 memcpy(rl->r_key, key, len);
507 }
508
509 if ((rl->r_attr & NPF_DYNAMIC_GROUP) == NPF_RULE_DYNAMIC) {
510 rl->r_dict = prop_dictionary_copy(rldict);
511 }
512
513 return rl;
514 }
515
516 /*
517 * npf_rule_setcode: assign filter code to the rule.
518 *
519 * => The code should be validated by the caller.
520 */
521 void
522 npf_rule_setcode(npf_rule_t *rl, const int type, void *code, size_t size)
523 {
524 rl->r_type = type;
525 rl->r_code = code;
526 rl->r_clen = size;
527 }
528
529 /*
530 * npf_rule_setrproc: assign a rule procedure and hold a reference on it.
531 */
532 void
533 npf_rule_setrproc(npf_rule_t *rl, npf_rproc_t *rp)
534 {
535 npf_rproc_acquire(rp);
536 rl->r_rproc = rp;
537 }
538
539 /*
540 * npf_rule_free: free the specified rule.
541 */
542 void
543 npf_rule_free(npf_rule_t *rl)
544 {
545 npf_natpolicy_t *np = rl->r_natp;
546 npf_rproc_t *rp = rl->r_rproc;
547
548 if (np) {
549 /* Free NAT policy. */
550 npf_nat_freepolicy(np);
551 }
552 if (rp) {
553 /* Release rule procedure. */
554 npf_rproc_release(rp);
555 }
556 if (rl->r_code) {
557 /* Free n-code. */
558 kmem_free(rl->r_code, rl->r_clen);
559 }
560 if (rl->r_dict) {
561 /* Destroy the dictionary. */
562 prop_object_release(rl->r_dict);
563 }
564 kmem_free(rl, sizeof(npf_rule_t));
565 }
566
567 /*
568 * npf_rule_getrproc: acquire a reference and return rule procedure, if any.
569 * npf_rule_getnat: get NAT policy assigned to the rule.
570 */
571
572 npf_rproc_t *
573 npf_rule_getrproc(npf_rule_t *rl)
574 {
575 npf_rproc_t *rp = rl->r_rproc;
576
577 if (rp) {
578 npf_rproc_acquire(rp);
579 }
580 return rp;
581 }
582
583 npf_natpolicy_t *
584 npf_rule_getnat(const npf_rule_t *rl)
585 {
586 return rl->r_natp;
587 }
588
589 /*
590 * npf_rule_setnat: assign NAT policy to the rule and insert into the
591 * NAT policy list in the ruleset.
592 */
593 void
594 npf_rule_setnat(npf_rule_t *rl, npf_natpolicy_t *np)
595 {
596
597 KASSERT(rl->r_natp == NULL);
598 rl->r_natp = np;
599 }
600
601 /*
602 * npf_rule_inspect: match the interface, direction and run the filter code.
603 * Returns true if rule matches, false otherise.
604 */
605 static inline bool
606 npf_rule_inspect(npf_cache_t *npc, nbuf_t *nbuf, const npf_rule_t *rl,
607 const int di_mask, const int layer)
608 {
609 const ifnet_t *ifp = nbuf->nb_ifp;
610 const void *code;
611
612 /* Match the interface. */
613 if (rl->r_ifid && rl->r_ifid != ifp->if_index) {
614 return false;
615 }
616
617 /* Match the direction. */
618 if ((rl->r_attr & NPF_RULE_DIMASK) != NPF_RULE_DIMASK) {
619 if ((rl->r_attr & di_mask) == 0)
620 return false;
621 }
622
623 /* Execute the code, if any. */
624 if ((code = rl->r_code) == NULL) {
625 return true;
626 }
627
628 switch (rl->r_type) {
629 case NPF_CODE_NC:
630 return npf_ncode_process(npc, code, nbuf, layer) == 0;
631 case NPF_CODE_BPF: {
632 struct mbuf *m = nbuf_head_mbuf(nbuf);
633 size_t pktlen = m_length(m);
634 return bpf_filter(code, (unsigned char *)m, pktlen, 0) != 0;
635 }
636 default:
637 KASSERT(false);
638 }
639 return false;
640 }
641
642 /*
643 * npf_rule_reinspect: re-inspect the dynamic rule by iterating its list.
644 * This is only for the dynamic rules. Subrules cannot have nested rules.
645 */
646 static npf_rule_t *
647 npf_rule_reinspect(npf_cache_t *npc, nbuf_t *nbuf, const npf_rule_t *drl,
648 const int di_mask, const int layer)
649 {
650 npf_rule_t *final_rl = NULL, *rl;
651
652 KASSERT(NPF_DYNAMIC_GROUP_P(drl->r_attr));
653
654 TAILQ_FOREACH(rl, &drl->r_subset, r_entry) {
655 if (!npf_rule_inspect(npc, nbuf, rl, di_mask, layer)) {
656 continue;
657 }
658 if (rl->r_attr & NPF_RULE_FINAL) {
659 return rl;
660 }
661 final_rl = rl;
662 }
663 return final_rl;
664 }
665
666 /*
667 * npf_ruleset_inspect: inspect the packet against the given ruleset.
668 *
669 * Loop through the rules in the set and run n-code processor of each rule
670 * against the packet (nbuf chain). If sub-ruleset is found, inspect it.
671 *
672 * => Caller is responsible for nbuf chain protection.
673 */
674 npf_rule_t *
675 npf_ruleset_inspect(npf_cache_t *npc, nbuf_t *nbuf,
676 const npf_ruleset_t *rlset, const int di, const int layer)
677 {
678 const int di_mask = (di & PFIL_IN) ? NPF_RULE_IN : NPF_RULE_OUT;
679 const u_int nitems = rlset->rs_nitems;
680 npf_rule_t *final_rl = NULL;
681 u_int n = 0;
682
683 KASSERT(((di & PFIL_IN) != 0) ^ ((di & PFIL_OUT) != 0));
684
685 while (n < nitems) {
686 npf_rule_t *rl = rlset->rs_rules[n];
687 const u_int skip_to = rl->r_skip_to;
688 const uint32_t attr = rl->r_attr;
689
690 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
691 KASSERT(!final_rl || rl->r_priority >= final_rl->r_priority);
692 KASSERT(n < skip_to);
693
694 /* Group is a barrier: return a matching if found any. */
695 if ((attr & NPF_RULE_GROUP) != 0 && final_rl) {
696 break;
697 }
698
699 /* Main inspection of the rule. */
700 if (!npf_rule_inspect(npc, nbuf, rl, di_mask, layer)) {
701 n = skip_to;
702 continue;
703 }
704
705 if (NPF_DYNAMIC_GROUP_P(attr)) {
706 /*
707 * If this is a dynamic rule, re-inspect the subrules.
708 * If it has any matching rule, then it is final.
709 */
710 rl = npf_rule_reinspect(npc, nbuf, rl, di_mask, layer);
711 if (rl != NULL) {
712 final_rl = rl;
713 break;
714 }
715 } else if ((attr & NPF_RULE_GROUP) == 0) {
716 /*
717 * Groups themselves are not matching.
718 */
719 final_rl = rl;
720 }
721
722 /* Set the matching rule and check for "final". */
723 if (attr & NPF_RULE_FINAL) {
724 break;
725 }
726 n++;
727 }
728
729 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
730 return final_rl;
731 }
732
733 /*
734 * npf_rule_conclude: return decision and the flags for conclusion.
735 *
736 * => Returns ENETUNREACH if "block" and 0 if "pass".
737 */
738 int
739 npf_rule_conclude(const npf_rule_t *rl, int *retfl)
740 {
741 /* If not passing - drop the packet. */
742 *retfl = rl->r_attr;
743 return (rl->r_attr & NPF_RULE_PASS) ? 0 : ENETUNREACH;
744 }
745
746 #if defined(DDB) || defined(_NPF_TESTING)
747
748 void
749 npf_rulenc_dump(const npf_rule_t *rl)
750 {
751 const uint32_t *op = rl->r_code;
752 size_t n = rl->r_clen;
753
754 while (n) {
755 printf("\t> |0x%02x|\n", (uint32_t)*op);
756 op++;
757 n -= sizeof(*op);
758 }
759 printf("-> %s\n", (rl->r_attr & NPF_RULE_PASS) ? "pass" : "block");
760 }
761
762 #endif
763