npf_ruleset.c revision 1.19 1 /* $NetBSD: npf_ruleset.c,v 1.19 2013/02/16 21:11:13 rmind Exp $ */
2
3 /*-
4 * Copyright (c) 2009-2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This material is based upon work partially supported by The
8 * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * NPF ruleset module.
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: npf_ruleset.c,v 1.19 2013/02/16 21:11:13 rmind Exp $");
38
39 #include <sys/param.h>
40 #include <sys/types.h>
41
42 #include <sys/kmem.h>
43 #include <sys/queue.h>
44 #include <sys/mbuf.h>
45 #include <sys/types.h>
46
47 #include <net/bpf.h>
48 #include <net/pfil.h>
49 #include <net/if.h>
50
51 #include "npf_ncode.h"
52 #include "npf_impl.h"
53
54 struct npf_ruleset {
55 /*
56 * - List of all rules.
57 * - Dynamic (i.e. named) rules.
58 * - G/C list for convenience.
59 */
60 LIST_HEAD(, npf_rule) rs_all;
61 LIST_HEAD(, npf_rule) rs_dynamic;
62 LIST_HEAD(, npf_rule) rs_gc;
63
64 /* Unique ID counter. */
65 uint64_t rs_idcnt;
66
67 /* Number of array slots and active rules. */
68 u_int rs_slots;
69 u_int rs_nitems;
70
71 /* Array of ordered rules. */
72 npf_rule_t * rs_rules[];
73 };
74
75 struct npf_rule {
76 /* Attributes, interface and skip slot. */
77 uint32_t r_attr;
78 u_int r_ifid;
79 u_int r_skip_to;
80
81 /* Code to process, if any. */
82 int r_type;
83 void * r_code;
84 size_t r_clen;
85
86 /* NAT policy (optional), rule procedure and subset. */
87 npf_natpolicy_t * r_natp;
88 npf_rproc_t * r_rproc;
89
90 /* Rule priority: (highest) 1, 2 ... n (lowest). */
91 pri_t r_priority;
92
93 /*
94 * Dynamic group: subset queue and a dynamic group list entry.
95 * Dynamic rule: entry and the parent rule (the group).
96 */
97 union {
98 TAILQ_HEAD(npf_ruleq, npf_rule) r_subset;
99 TAILQ_ENTRY(npf_rule) r_entry;
100 } /* C11 */;
101 union {
102 LIST_ENTRY(npf_rule) r_dentry;
103 npf_rule_t * r_parent;
104 } /* C11 */;
105
106 /* Rule ID and the original dictionary. */
107 uint64_t r_id;
108 prop_dictionary_t r_dict;
109
110 /* Rule name and all-list entry. */
111 char r_name[NPF_RULE_MAXNAMELEN];
112 LIST_ENTRY(npf_rule) r_aentry;
113
114 /* Key (optional). */
115 uint8_t r_key[NPF_RULE_MAXKEYLEN];
116 };
117
118 #define NPF_DYNAMIC_GROUP_P(attr) \
119 (((attr) & NPF_DYNAMIC_GROUP) == NPF_DYNAMIC_GROUP)
120
121 #define NPF_DYNAMIC_RULE_P(attr) \
122 (((attr) & NPF_DYNAMIC_GROUP) == NPF_RULE_DYNAMIC)
123
124 npf_ruleset_t *
125 npf_ruleset_create(size_t slots)
126 {
127 size_t len = offsetof(npf_ruleset_t, rs_rules[slots]);
128 npf_ruleset_t *rlset;
129
130 rlset = kmem_zalloc(len, KM_SLEEP);
131 LIST_INIT(&rlset->rs_dynamic);
132 LIST_INIT(&rlset->rs_all);
133 LIST_INIT(&rlset->rs_gc);
134 rlset->rs_slots = slots;
135
136 return rlset;
137 }
138
139 static void
140 npf_ruleset_unlink(npf_ruleset_t *rlset, npf_rule_t *rl)
141 {
142 if (NPF_DYNAMIC_GROUP_P(rl->r_attr)) {
143 LIST_REMOVE(rl, r_dentry);
144 }
145 if (NPF_DYNAMIC_RULE_P(rl->r_attr)) {
146 npf_rule_t *rg = rl->r_parent;
147 TAILQ_REMOVE(&rg->r_subset, rl, r_entry);
148 }
149 LIST_REMOVE(rl, r_aentry);
150 }
151
152 void
153 npf_ruleset_destroy(npf_ruleset_t *rlset)
154 {
155 size_t len = offsetof(npf_ruleset_t, rs_rules[rlset->rs_slots]);
156 npf_rule_t *rl;
157
158 while ((rl = LIST_FIRST(&rlset->rs_all)) != NULL) {
159 npf_ruleset_unlink(rlset, rl);
160 npf_rule_free(rl);
161 }
162 KASSERT(LIST_EMPTY(&rlset->rs_dynamic));
163 KASSERT(LIST_EMPTY(&rlset->rs_gc));
164 kmem_free(rlset, len);
165 }
166
167 /*
168 * npf_ruleset_insert: insert the rule into the specified ruleset.
169 */
170 void
171 npf_ruleset_insert(npf_ruleset_t *rlset, npf_rule_t *rl)
172 {
173 u_int n = rlset->rs_nitems;
174
175 KASSERT(n < rlset->rs_slots);
176
177 LIST_INSERT_HEAD(&rlset->rs_all, rl, r_aentry);
178 if (NPF_DYNAMIC_GROUP_P(rl->r_attr)) {
179 LIST_INSERT_HEAD(&rlset->rs_dynamic, rl, r_dentry);
180 }
181
182 rlset->rs_rules[n] = rl;
183 rlset->rs_nitems++;
184
185 if (rl->r_skip_to < ++n) {
186 rl->r_skip_to = n;
187 }
188 }
189
190 static npf_rule_t *
191 npf_ruleset_lookup(npf_ruleset_t *rlset, const char *name)
192 {
193 npf_rule_t *rl;
194
195 KASSERT(npf_config_locked_p());
196
197 LIST_FOREACH(rl, &rlset->rs_dynamic, r_dentry) {
198 KASSERT(NPF_DYNAMIC_GROUP_P(rl->r_attr));
199 if (strncmp(rl->r_name, name, NPF_RULE_MAXNAMELEN) == 0)
200 break;
201 }
202 return rl;
203 }
204
205 int
206 npf_ruleset_add(npf_ruleset_t *rlset, const char *rname, npf_rule_t *rl)
207 {
208 npf_rule_t *rg, *it;
209 pri_t priocmd;
210
211 rg = npf_ruleset_lookup(rlset, rname);
212 if (rg == NULL) {
213 return ESRCH;
214 }
215 if (!NPF_DYNAMIC_RULE_P(rl->r_attr)) {
216 return EINVAL;
217 }
218
219 /* Dynamic rule - assign a unique ID and save the parent. */
220 rl->r_id = ++rlset->rs_idcnt;
221 rl->r_parent = rg;
222
223 /*
224 * Rule priority: (highest) 1, 2 ... n (lowest).
225 * Negative priority indicates an operation and is reset to zero.
226 */
227 if ((priocmd = rl->r_priority) < 0) {
228 rl->r_priority = 0;
229 }
230
231 switch (priocmd) {
232 case NPF_PRI_FIRST:
233 TAILQ_FOREACH(it, &rg->r_subset, r_entry) {
234 if (rl->r_priority <= it->r_priority)
235 break;
236 }
237 if (it) {
238 TAILQ_INSERT_BEFORE(it, rl, r_entry);
239 } else {
240 TAILQ_INSERT_HEAD(&rg->r_subset, rl, r_entry);
241 }
242 break;
243 case NPF_PRI_LAST:
244 default:
245 TAILQ_FOREACH(it, &rg->r_subset, r_entry) {
246 if (rl->r_priority < it->r_priority)
247 break;
248 }
249 if (it) {
250 TAILQ_INSERT_BEFORE(it, rl, r_entry);
251 } else {
252 TAILQ_INSERT_TAIL(&rg->r_subset, rl, r_entry);
253 }
254 break;
255 }
256
257 /* Finally, add into the all-list. */
258 LIST_INSERT_HEAD(&rlset->rs_all, rl, r_aentry);
259 return 0;
260 }
261
262 int
263 npf_ruleset_remove(npf_ruleset_t *rlset, const char *rname, uint64_t id)
264 {
265 npf_rule_t *rg, *rl;
266
267 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
268 return ESRCH;
269 }
270 TAILQ_FOREACH(rl, &rg->r_subset, r_entry) {
271 /* Compare ID. On match, remove and return. */
272 if (rl->r_id == id) {
273 npf_ruleset_unlink(rlset, rl);
274 LIST_INSERT_HEAD(&rlset->rs_gc, rl, r_aentry);
275 return 0;
276 }
277 }
278 return ENOENT;
279 }
280
281 int
282 npf_ruleset_remkey(npf_ruleset_t *rlset, const char *rname,
283 const void *key, size_t len)
284 {
285 npf_rule_t *rg, *rl;
286
287 KASSERT(len && len <= NPF_RULE_MAXKEYLEN);
288
289 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
290 return ESRCH;
291 }
292
293 /* Find the last in the list. */
294 TAILQ_FOREACH_REVERSE(rl, &rg->r_subset, npf_ruleq, r_entry) {
295 /* Compare the key. On match, remove and return. */
296 if (memcmp(rl->r_key, key, len) == 0) {
297 npf_ruleset_unlink(rlset, rl);
298 LIST_INSERT_HEAD(&rlset->rs_gc, rl, r_aentry);
299 return 0;
300 }
301 }
302 return ENOENT;
303 }
304
305 prop_dictionary_t
306 npf_ruleset_list(npf_ruleset_t *rlset, const char *rname)
307 {
308 prop_dictionary_t rldict;
309 prop_array_t rules;
310 npf_rule_t *rg, *rl;
311
312 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
313 return NULL;
314 }
315 if ((rldict = prop_dictionary_create()) == NULL) {
316 return NULL;
317 }
318 if ((rules = prop_array_create()) == NULL) {
319 prop_object_release(rldict);
320 return NULL;
321 }
322
323 TAILQ_FOREACH(rl, &rg->r_subset, r_entry) {
324 if (rl->r_dict && !prop_array_add(rules, rl->r_dict)) {
325 prop_object_release(rldict);
326 prop_object_release(rules);
327 return NULL;
328 }
329 }
330
331 if (!prop_dictionary_set(rldict, "rules", rules)) {
332 prop_object_release(rldict);
333 rldict = NULL;
334 }
335 prop_object_release(rules);
336 return rldict;
337 }
338
339 int
340 npf_ruleset_flush(npf_ruleset_t *rlset, const char *rname)
341 {
342 npf_rule_t *rg, *rl;
343
344 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
345 return ESRCH;
346 }
347 while ((rl = TAILQ_FIRST(&rg->r_subset)) != NULL) {
348 npf_ruleset_unlink(rlset, rl);
349 LIST_INSERT_HEAD(&rlset->rs_gc, rl, r_aentry);
350 }
351 return 0;
352 }
353
354 void
355 npf_ruleset_gc(npf_ruleset_t *rlset)
356 {
357 npf_rule_t *rl;
358
359 while ((rl = LIST_FIRST(&rlset->rs_gc)) != NULL) {
360 LIST_REMOVE(rl, r_aentry);
361 npf_rule_free(rl);
362 }
363 }
364
365 /*
366 * npf_ruleset_reload: share the dynamic rules.
367 *
368 * => Active ruleset should be exclusively locked.
369 */
370 void
371 npf_ruleset_reload(npf_ruleset_t *rlset, npf_ruleset_t *arlset)
372 {
373 npf_rule_t *rg;
374
375 KASSERT(npf_config_locked_p());
376
377 LIST_FOREACH(rg, &rlset->rs_dynamic, r_dentry) {
378 npf_rule_t *arg, *rl;
379
380 if ((arg = npf_ruleset_lookup(arlset, rg->r_name)) == NULL) {
381 continue;
382 }
383
384 /*
385 * Copy the list-head structure and move the rules from the
386 * old ruleset to the new by reinserting to a new all-rules
387 * list and resetting the parent rule. Note that the rules
388 * are still active and therefore accessible for inspection
389 * via the old ruleset.
390 */
391 memcpy(&rg->r_subset, &arg->r_subset, sizeof(rg->r_subset));
392 TAILQ_FOREACH(rl, &rg->r_subset, r_entry) {
393 LIST_REMOVE(rl, r_aentry);
394 LIST_INSERT_HEAD(&rlset->rs_all, rl, r_aentry);
395 rl->r_parent = rg;
396 }
397 }
398
399 /* Inherit the ID counter. */
400 rlset->rs_idcnt = arlset->rs_idcnt;
401 }
402
403 /*
404 * npf_ruleset_matchnat: find a matching NAT policy in the ruleset.
405 */
406 npf_rule_t *
407 npf_ruleset_matchnat(npf_ruleset_t *rlset, npf_natpolicy_t *mnp)
408 {
409 npf_rule_t *rl;
410
411 /* Find a matching NAT policy in the old ruleset. */
412 LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
413 if (npf_nat_matchpolicy(rl->r_natp, mnp))
414 break;
415 }
416 return rl;
417 }
418
419 npf_rule_t *
420 npf_ruleset_sharepm(npf_ruleset_t *rlset, npf_natpolicy_t *mnp)
421 {
422 npf_natpolicy_t *np;
423 npf_rule_t *rl;
424
425 /* Find a matching NAT policy in the old ruleset. */
426 LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
427 /*
428 * NAT policy might not yet be set during the creation of
429 * the ruleset (in such case, rule is for our policy), or
430 * policies might be equal due to rule exchange on reload.
431 */
432 np = rl->r_natp;
433 if (np == NULL || np == mnp)
434 continue;
435 if (npf_nat_sharepm(np, mnp))
436 break;
437 }
438 return rl;
439 }
440
441 /*
442 * npf_ruleset_freealg: inspect the ruleset and disassociate specified
443 * ALG from all NAT entries using it.
444 */
445 void
446 npf_ruleset_freealg(npf_ruleset_t *rlset, npf_alg_t *alg)
447 {
448 npf_rule_t *rl;
449 npf_natpolicy_t *np;
450
451 LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
452 if ((np = rl->r_natp) != NULL) {
453 npf_nat_freealg(np, alg);
454 }
455 }
456 }
457
458 /*
459 * npf_ruleset_natreload: minimum reload of NAT policies by maching
460 * two (active and new) NAT rulesets.
461 *
462 * => Active ruleset should be exclusively locked.
463 */
464 void
465 npf_ruleset_natreload(npf_ruleset_t *nrlset, npf_ruleset_t *arlset)
466 {
467 npf_natpolicy_t *np, *anp;
468 npf_rule_t *rl, *arl;
469
470 /* Scan a new NAT ruleset against NAT policies in old ruleset. */
471 LIST_FOREACH(rl, &nrlset->rs_all, r_aentry) {
472 np = rl->r_natp;
473 arl = npf_ruleset_matchnat(arlset, np);
474 if (arl == NULL) {
475 continue;
476 }
477 /* On match - we exchange NAT policies. */
478 anp = arl->r_natp;
479 rl->r_natp = anp;
480 arl->r_natp = np;
481 /* Update other NAT policies to share portmap. */
482 (void)npf_ruleset_sharepm(nrlset, anp);
483 }
484 }
485
486 /*
487 * npf_rule_alloc: allocate a rule and copy n-code from user-space.
488 */
489 npf_rule_t *
490 npf_rule_alloc(prop_dictionary_t rldict)
491 {
492 npf_rule_t *rl;
493 const char *rname;
494
495 /* Allocate a rule structure. */
496 rl = kmem_zalloc(sizeof(npf_rule_t), KM_SLEEP);
497 TAILQ_INIT(&rl->r_subset);
498 rl->r_natp = NULL;
499
500 /* Name (optional) */
501 if (prop_dictionary_get_cstring_nocopy(rldict, "name", &rname)) {
502 strlcpy(rl->r_name, rname, NPF_RULE_MAXNAMELEN);
503 } else {
504 rl->r_name[0] = '\0';
505 }
506
507 /* Attributes, priority and interface ID (optional). */
508 prop_dictionary_get_uint32(rldict, "attributes", &rl->r_attr);
509 prop_dictionary_get_int32(rldict, "priority", &rl->r_priority);
510 prop_dictionary_get_uint32(rldict, "interface", &rl->r_ifid);
511
512 /* Get the skip-to index. No need to validate it. */
513 prop_dictionary_get_uint32(rldict, "skip-to", &rl->r_skip_to);
514
515 /* Key (optional). */
516 prop_object_t obj = prop_dictionary_get(rldict, "key");
517 const void *key = prop_data_data_nocopy(obj);
518
519 if (key) {
520 size_t len = prop_data_size(obj);
521 if (len > NPF_RULE_MAXKEYLEN) {
522 kmem_free(rl, sizeof(npf_rule_t));
523 return NULL;
524 }
525 memcpy(rl->r_key, key, len);
526 }
527
528 if (NPF_DYNAMIC_RULE_P(rl->r_attr)) {
529 rl->r_dict = prop_dictionary_copy(rldict);
530 }
531
532 return rl;
533 }
534
535 /*
536 * npf_rule_setcode: assign filter code to the rule.
537 *
538 * => The code should be validated by the caller.
539 */
540 void
541 npf_rule_setcode(npf_rule_t *rl, const int type, void *code, size_t size)
542 {
543 rl->r_type = type;
544 rl->r_code = code;
545 rl->r_clen = size;
546 }
547
548 /*
549 * npf_rule_setrproc: assign a rule procedure and hold a reference on it.
550 */
551 void
552 npf_rule_setrproc(npf_rule_t *rl, npf_rproc_t *rp)
553 {
554 npf_rproc_acquire(rp);
555 rl->r_rproc = rp;
556 }
557
558 /*
559 * npf_rule_free: free the specified rule.
560 */
561 void
562 npf_rule_free(npf_rule_t *rl)
563 {
564 npf_natpolicy_t *np = rl->r_natp;
565 npf_rproc_t *rp = rl->r_rproc;
566
567 if (np) {
568 /* Free NAT policy. */
569 npf_nat_freepolicy(np);
570 }
571 if (rp) {
572 /* Release rule procedure. */
573 npf_rproc_release(rp);
574 }
575 if (rl->r_code) {
576 /* Free n-code. */
577 kmem_free(rl->r_code, rl->r_clen);
578 }
579 if (rl->r_dict) {
580 /* Destroy the dictionary. */
581 prop_object_release(rl->r_dict);
582 }
583 kmem_free(rl, sizeof(npf_rule_t));
584 }
585
586 /*
587 * npf_rule_getid: return the unique ID of a rule.
588 * npf_rule_getrproc: acquire a reference and return rule procedure, if any.
589 * npf_rule_getnat: get NAT policy assigned to the rule.
590 */
591
592 uint64_t
593 npf_rule_getid(const npf_rule_t *rl)
594 {
595 KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
596 return rl->r_id;
597 }
598
599 npf_rproc_t *
600 npf_rule_getrproc(npf_rule_t *rl)
601 {
602 npf_rproc_t *rp = rl->r_rproc;
603
604 if (rp) {
605 npf_rproc_acquire(rp);
606 }
607 return rp;
608 }
609
610 npf_natpolicy_t *
611 npf_rule_getnat(const npf_rule_t *rl)
612 {
613 return rl->r_natp;
614 }
615
616 /*
617 * npf_rule_setnat: assign NAT policy to the rule and insert into the
618 * NAT policy list in the ruleset.
619 */
620 void
621 npf_rule_setnat(npf_rule_t *rl, npf_natpolicy_t *np)
622 {
623
624 KASSERT(rl->r_natp == NULL);
625 rl->r_natp = np;
626 }
627
628 /*
629 * npf_rule_inspect: match the interface, direction and run the filter code.
630 * Returns true if rule matches, false otherise.
631 */
632 static inline bool
633 npf_rule_inspect(npf_cache_t *npc, nbuf_t *nbuf, const npf_rule_t *rl,
634 const int di_mask, const int layer)
635 {
636 const ifnet_t *ifp = nbuf->nb_ifp;
637 const void *code;
638
639 /* Match the interface. */
640 if (rl->r_ifid && rl->r_ifid != ifp->if_index) {
641 return false;
642 }
643
644 /* Match the direction. */
645 if ((rl->r_attr & NPF_RULE_DIMASK) != NPF_RULE_DIMASK) {
646 if ((rl->r_attr & di_mask) == 0)
647 return false;
648 }
649
650 /* Execute the code, if any. */
651 if ((code = rl->r_code) == NULL) {
652 return true;
653 }
654
655 switch (rl->r_type) {
656 case NPF_CODE_NC:
657 return npf_ncode_process(npc, code, nbuf, layer) == 0;
658 case NPF_CODE_BPF: {
659 struct mbuf *m = nbuf_head_mbuf(nbuf);
660 size_t pktlen = m_length(m);
661 return bpf_filter(code, (unsigned char *)m, pktlen, 0) != 0;
662 }
663 default:
664 KASSERT(false);
665 }
666 return false;
667 }
668
669 /*
670 * npf_rule_reinspect: re-inspect the dynamic rule by iterating its list.
671 * This is only for the dynamic rules. Subrules cannot have nested rules.
672 */
673 static npf_rule_t *
674 npf_rule_reinspect(npf_cache_t *npc, nbuf_t *nbuf, const npf_rule_t *drl,
675 const int di_mask, const int layer)
676 {
677 npf_rule_t *final_rl = NULL, *rl;
678
679 KASSERT(NPF_DYNAMIC_GROUP_P(drl->r_attr));
680
681 TAILQ_FOREACH(rl, &drl->r_subset, r_entry) {
682 if (!npf_rule_inspect(npc, nbuf, rl, di_mask, layer)) {
683 continue;
684 }
685 if (rl->r_attr & NPF_RULE_FINAL) {
686 return rl;
687 }
688 final_rl = rl;
689 }
690 return final_rl;
691 }
692
693 /*
694 * npf_ruleset_inspect: inspect the packet against the given ruleset.
695 *
696 * Loop through the rules in the set and run n-code processor of each rule
697 * against the packet (nbuf chain). If sub-ruleset is found, inspect it.
698 *
699 * => Caller is responsible for nbuf chain protection.
700 */
701 npf_rule_t *
702 npf_ruleset_inspect(npf_cache_t *npc, nbuf_t *nbuf,
703 const npf_ruleset_t *rlset, const int di, const int layer)
704 {
705 const int di_mask = (di & PFIL_IN) ? NPF_RULE_IN : NPF_RULE_OUT;
706 const u_int nitems = rlset->rs_nitems;
707 npf_rule_t *final_rl = NULL;
708 u_int n = 0;
709
710 KASSERT(((di & PFIL_IN) != 0) ^ ((di & PFIL_OUT) != 0));
711
712 while (n < nitems) {
713 npf_rule_t *rl = rlset->rs_rules[n];
714 const u_int skip_to = rl->r_skip_to;
715 const uint32_t attr = rl->r_attr;
716
717 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
718 KASSERT(!final_rl || rl->r_priority >= final_rl->r_priority);
719 KASSERT(n < skip_to);
720
721 /* Group is a barrier: return a matching if found any. */
722 if ((attr & NPF_RULE_GROUP) != 0 && final_rl) {
723 break;
724 }
725
726 /* Main inspection of the rule. */
727 if (!npf_rule_inspect(npc, nbuf, rl, di_mask, layer)) {
728 n = skip_to;
729 continue;
730 }
731
732 if (NPF_DYNAMIC_GROUP_P(attr)) {
733 /*
734 * If this is a dynamic rule, re-inspect the subrules.
735 * If it has any matching rule, then it is final.
736 */
737 rl = npf_rule_reinspect(npc, nbuf, rl, di_mask, layer);
738 if (rl != NULL) {
739 final_rl = rl;
740 break;
741 }
742 } else if ((attr & NPF_RULE_GROUP) == 0) {
743 /*
744 * Groups themselves are not matching.
745 */
746 final_rl = rl;
747 }
748
749 /* Set the matching rule and check for "final". */
750 if (attr & NPF_RULE_FINAL) {
751 break;
752 }
753 n++;
754 }
755
756 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
757 return final_rl;
758 }
759
760 /*
761 * npf_rule_conclude: return decision and the flags for conclusion.
762 *
763 * => Returns ENETUNREACH if "block" and 0 if "pass".
764 */
765 int
766 npf_rule_conclude(const npf_rule_t *rl, int *retfl)
767 {
768 /* If not passing - drop the packet. */
769 *retfl = rl->r_attr;
770 return (rl->r_attr & NPF_RULE_PASS) ? 0 : ENETUNREACH;
771 }
772
773 #if defined(DDB) || defined(_NPF_TESTING)
774
775 void
776 npf_rulenc_dump(const npf_rule_t *rl)
777 {
778 const uint32_t *op = rl->r_code;
779 size_t n = rl->r_clen;
780
781 while (n) {
782 printf("\t> |0x%02x|\n", (uint32_t)*op);
783 op++;
784 n -= sizeof(*op);
785 }
786 printf("-> %s\n", (rl->r_attr & NPF_RULE_PASS) ? "pass" : "block");
787 }
788
789 #endif
790