npf_ruleset.c revision 1.24 1 /* $NetBSD: npf_ruleset.c,v 1.24 2013/09/19 01:04:46 rmind Exp $ */
2
3 /*-
4 * Copyright (c) 2009-2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This material is based upon work partially supported by The
8 * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * NPF ruleset module.
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: npf_ruleset.c,v 1.24 2013/09/19 01:04:46 rmind Exp $");
38
39 #include <sys/param.h>
40 #include <sys/types.h>
41
42 #include <sys/atomic.h>
43 #include <sys/kmem.h>
44 #include <sys/queue.h>
45 #include <sys/mbuf.h>
46 #include <sys/types.h>
47
48 #include <net/bpf.h>
49 #include <net/bpfjit.h>
50 #include <net/pfil.h>
51 #include <net/if.h>
52
53 #include "npf_ncode.h"
54 #include "npf_impl.h"
55
56 struct npf_ruleset {
57 /*
58 * - List of all rules.
59 * - Dynamic (i.e. named) rules.
60 * - G/C list for convenience.
61 */
62 LIST_HEAD(, npf_rule) rs_all;
63 LIST_HEAD(, npf_rule) rs_dynamic;
64 LIST_HEAD(, npf_rule) rs_gc;
65
66 /* Unique ID counter. */
67 uint64_t rs_idcnt;
68
69 /* Number of array slots and active rules. */
70 u_int rs_slots;
71 u_int rs_nitems;
72
73 /* Array of ordered rules. */
74 npf_rule_t * rs_rules[];
75 };
76
77 struct npf_rule {
78 /* Attributes, interface and skip slot. */
79 uint32_t r_attr;
80 u_int r_ifid;
81 u_int r_skip_to;
82
83 /* Code to process, if any. */
84 int r_type;
85 bpfjit_function_t r_jcode;
86 void * r_code;
87 size_t r_clen;
88
89 /* NAT policy (optional), rule procedure and subset. */
90 npf_natpolicy_t * r_natp;
91 npf_rproc_t * r_rproc;
92
93 /* Rule priority: (highest) 1, 2 ... n (lowest). */
94 pri_t r_priority;
95
96 /*
97 * Dynamic group: subset queue and a dynamic group list entry.
98 * Dynamic rule: entry and the parent rule (the group).
99 */
100 union {
101 TAILQ_HEAD(npf_ruleq, npf_rule) r_subset;
102 TAILQ_ENTRY(npf_rule) r_entry;
103 } /* C11 */;
104 union {
105 LIST_ENTRY(npf_rule) r_dentry;
106 npf_rule_t * r_parent;
107 } /* C11 */;
108
109 /* Rule ID and the original dictionary. */
110 uint64_t r_id;
111 prop_dictionary_t r_dict;
112
113 /* Rule name and all-list entry. */
114 char r_name[NPF_RULE_MAXNAMELEN];
115 LIST_ENTRY(npf_rule) r_aentry;
116
117 /* Key (optional). */
118 uint8_t r_key[NPF_RULE_MAXKEYLEN];
119 };
120
121 #define NPF_DYNAMIC_GROUP_P(attr) \
122 (((attr) & NPF_DYNAMIC_GROUP) == NPF_DYNAMIC_GROUP)
123
124 #define NPF_DYNAMIC_RULE_P(attr) \
125 (((attr) & NPF_DYNAMIC_GROUP) == NPF_RULE_DYNAMIC)
126
127 npf_ruleset_t *
128 npf_ruleset_create(size_t slots)
129 {
130 size_t len = offsetof(npf_ruleset_t, rs_rules[slots]);
131 npf_ruleset_t *rlset;
132
133 rlset = kmem_zalloc(len, KM_SLEEP);
134 LIST_INIT(&rlset->rs_dynamic);
135 LIST_INIT(&rlset->rs_all);
136 LIST_INIT(&rlset->rs_gc);
137 rlset->rs_slots = slots;
138
139 return rlset;
140 }
141
142 static void
143 npf_ruleset_unlink(npf_ruleset_t *rlset, npf_rule_t *rl)
144 {
145 if (NPF_DYNAMIC_GROUP_P(rl->r_attr)) {
146 LIST_REMOVE(rl, r_dentry);
147 }
148 if (NPF_DYNAMIC_RULE_P(rl->r_attr)) {
149 npf_rule_t *rg = rl->r_parent;
150 TAILQ_REMOVE(&rg->r_subset, rl, r_entry);
151 }
152 LIST_REMOVE(rl, r_aentry);
153 }
154
155 void
156 npf_ruleset_destroy(npf_ruleset_t *rlset)
157 {
158 size_t len = offsetof(npf_ruleset_t, rs_rules[rlset->rs_slots]);
159 npf_rule_t *rl;
160
161 while ((rl = LIST_FIRST(&rlset->rs_all)) != NULL) {
162 npf_ruleset_unlink(rlset, rl);
163 npf_rule_free(rl);
164 }
165 KASSERT(LIST_EMPTY(&rlset->rs_dynamic));
166 KASSERT(LIST_EMPTY(&rlset->rs_gc));
167 kmem_free(rlset, len);
168 }
169
170 /*
171 * npf_ruleset_insert: insert the rule into the specified ruleset.
172 */
173 void
174 npf_ruleset_insert(npf_ruleset_t *rlset, npf_rule_t *rl)
175 {
176 u_int n = rlset->rs_nitems;
177
178 KASSERT(n < rlset->rs_slots);
179
180 LIST_INSERT_HEAD(&rlset->rs_all, rl, r_aentry);
181 if (NPF_DYNAMIC_GROUP_P(rl->r_attr)) {
182 LIST_INSERT_HEAD(&rlset->rs_dynamic, rl, r_dentry);
183 } else {
184 KASSERTMSG(rl->r_parent == NULL, "cannot be dynamic rule");
185 rl->r_attr &= ~NPF_RULE_DYNAMIC;
186 }
187
188 rlset->rs_rules[n] = rl;
189 rlset->rs_nitems++;
190
191 if (rl->r_skip_to < ++n) {
192 rl->r_skip_to = n;
193 }
194 }
195
196 static npf_rule_t *
197 npf_ruleset_lookup(npf_ruleset_t *rlset, const char *name)
198 {
199 npf_rule_t *rl;
200
201 KASSERT(npf_config_locked_p());
202
203 LIST_FOREACH(rl, &rlset->rs_dynamic, r_dentry) {
204 KASSERT(NPF_DYNAMIC_GROUP_P(rl->r_attr));
205 if (strncmp(rl->r_name, name, NPF_RULE_MAXNAMELEN) == 0)
206 break;
207 }
208 return rl;
209 }
210
211 int
212 npf_ruleset_add(npf_ruleset_t *rlset, const char *rname, npf_rule_t *rl)
213 {
214 npf_rule_t *rg, *it;
215 pri_t priocmd;
216
217 rg = npf_ruleset_lookup(rlset, rname);
218 if (rg == NULL) {
219 return ESRCH;
220 }
221 if (!NPF_DYNAMIC_RULE_P(rl->r_attr)) {
222 return EINVAL;
223 }
224
225 /* Dynamic rule - assign a unique ID and save the parent. */
226 rl->r_id = ++rlset->rs_idcnt;
227 rl->r_parent = rg;
228
229 /*
230 * Rule priority: (highest) 1, 2 ... n (lowest).
231 * Negative priority indicates an operation and is reset to zero.
232 */
233 if ((priocmd = rl->r_priority) < 0) {
234 rl->r_priority = 0;
235 }
236
237 switch (priocmd) {
238 case NPF_PRI_FIRST:
239 TAILQ_FOREACH(it, &rg->r_subset, r_entry) {
240 if (rl->r_priority <= it->r_priority)
241 break;
242 }
243 if (it) {
244 TAILQ_INSERT_BEFORE(it, rl, r_entry);
245 } else {
246 TAILQ_INSERT_HEAD(&rg->r_subset, rl, r_entry);
247 }
248 break;
249 case NPF_PRI_LAST:
250 default:
251 TAILQ_FOREACH(it, &rg->r_subset, r_entry) {
252 if (rl->r_priority < it->r_priority)
253 break;
254 }
255 if (it) {
256 TAILQ_INSERT_BEFORE(it, rl, r_entry);
257 } else {
258 TAILQ_INSERT_TAIL(&rg->r_subset, rl, r_entry);
259 }
260 break;
261 }
262
263 /* Finally, add into the all-list. */
264 LIST_INSERT_HEAD(&rlset->rs_all, rl, r_aentry);
265 return 0;
266 }
267
268 int
269 npf_ruleset_remove(npf_ruleset_t *rlset, const char *rname, uint64_t id)
270 {
271 npf_rule_t *rg, *rl;
272
273 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
274 return ESRCH;
275 }
276 TAILQ_FOREACH(rl, &rg->r_subset, r_entry) {
277 KASSERT(rl->r_parent == rg);
278
279 /* Compare ID. On match, remove and return. */
280 if (rl->r_id == id) {
281 npf_ruleset_unlink(rlset, rl);
282 LIST_INSERT_HEAD(&rlset->rs_gc, rl, r_aentry);
283 return 0;
284 }
285 }
286 return ENOENT;
287 }
288
289 int
290 npf_ruleset_remkey(npf_ruleset_t *rlset, const char *rname,
291 const void *key, size_t len)
292 {
293 npf_rule_t *rg, *rl;
294
295 KASSERT(len && len <= NPF_RULE_MAXKEYLEN);
296
297 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
298 return ESRCH;
299 }
300
301 /* Find the last in the list. */
302 TAILQ_FOREACH_REVERSE(rl, &rg->r_subset, npf_ruleq, r_entry) {
303 KASSERT(rl->r_parent == rg);
304
305 /* Compare the key. On match, remove and return. */
306 if (memcmp(rl->r_key, key, len) == 0) {
307 npf_ruleset_unlink(rlset, rl);
308 LIST_INSERT_HEAD(&rlset->rs_gc, rl, r_aentry);
309 return 0;
310 }
311 }
312 return ENOENT;
313 }
314
315 prop_dictionary_t
316 npf_ruleset_list(npf_ruleset_t *rlset, const char *rname)
317 {
318 prop_dictionary_t rldict;
319 prop_array_t rules;
320 npf_rule_t *rg, *rl;
321
322 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
323 return NULL;
324 }
325 if ((rldict = prop_dictionary_create()) == NULL) {
326 return NULL;
327 }
328 if ((rules = prop_array_create()) == NULL) {
329 prop_object_release(rldict);
330 return NULL;
331 }
332
333 TAILQ_FOREACH(rl, &rg->r_subset, r_entry) {
334 KASSERT(rl->r_parent == rg);
335 if (rl->r_dict && !prop_array_add(rules, rl->r_dict)) {
336 prop_object_release(rldict);
337 prop_object_release(rules);
338 return NULL;
339 }
340 }
341
342 if (!prop_dictionary_set(rldict, "rules", rules)) {
343 prop_object_release(rldict);
344 rldict = NULL;
345 }
346 prop_object_release(rules);
347 return rldict;
348 }
349
350 int
351 npf_ruleset_flush(npf_ruleset_t *rlset, const char *rname)
352 {
353 npf_rule_t *rg, *rl;
354
355 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
356 return ESRCH;
357 }
358 while ((rl = TAILQ_FIRST(&rg->r_subset)) != NULL) {
359 KASSERT(rl->r_parent == rg);
360 npf_ruleset_unlink(rlset, rl);
361 LIST_INSERT_HEAD(&rlset->rs_gc, rl, r_aentry);
362 }
363 return 0;
364 }
365
366 void
367 npf_ruleset_gc(npf_ruleset_t *rlset)
368 {
369 npf_rule_t *rl;
370
371 while ((rl = LIST_FIRST(&rlset->rs_gc)) != NULL) {
372 LIST_REMOVE(rl, r_aentry);
373 npf_rule_free(rl);
374 }
375 }
376
377 /*
378 * npf_ruleset_reload: share the dynamic rules.
379 *
380 * => Active ruleset should be exclusively locked.
381 */
382 void
383 npf_ruleset_reload(npf_ruleset_t *rlset, npf_ruleset_t *arlset)
384 {
385 npf_rule_t *rg;
386
387 KASSERT(npf_config_locked_p());
388
389 LIST_FOREACH(rg, &rlset->rs_dynamic, r_dentry) {
390 npf_rule_t *arg, *rl;
391
392 if ((arg = npf_ruleset_lookup(arlset, rg->r_name)) == NULL) {
393 continue;
394 }
395
396 /*
397 * Copy the list-head structure. This is necessary because
398 * the rules are still active and therefore accessible for
399 * inspection via the old ruleset.
400 */
401 memcpy(&rg->r_subset, &arg->r_subset, sizeof(rg->r_subset));
402 TAILQ_FOREACH(rl, &rg->r_subset, r_entry) {
403 /*
404 * We can safely migrate to the new all-rule list
405 * and re-set the parent rule, though.
406 */
407 LIST_REMOVE(rl, r_aentry);
408 LIST_INSERT_HEAD(&rlset->rs_all, rl, r_aentry);
409 rl->r_parent = rg;
410 }
411 }
412
413 /* Inherit the ID counter. */
414 rlset->rs_idcnt = arlset->rs_idcnt;
415 }
416
417 /*
418 * npf_ruleset_matchnat: find a matching NAT policy in the ruleset.
419 */
420 npf_rule_t *
421 npf_ruleset_matchnat(npf_ruleset_t *rlset, npf_natpolicy_t *mnp)
422 {
423 npf_rule_t *rl;
424
425 /* Find a matching NAT policy in the old ruleset. */
426 LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
427 if (npf_nat_matchpolicy(rl->r_natp, mnp))
428 break;
429 }
430 return rl;
431 }
432
433 npf_rule_t *
434 npf_ruleset_sharepm(npf_ruleset_t *rlset, npf_natpolicy_t *mnp)
435 {
436 npf_natpolicy_t *np;
437 npf_rule_t *rl;
438
439 /* Find a matching NAT policy in the old ruleset. */
440 LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
441 /*
442 * NAT policy might not yet be set during the creation of
443 * the ruleset (in such case, rule is for our policy), or
444 * policies might be equal due to rule exchange on reload.
445 */
446 np = rl->r_natp;
447 if (np == NULL || np == mnp)
448 continue;
449 if (npf_nat_sharepm(np, mnp))
450 break;
451 }
452 return rl;
453 }
454
455 /*
456 * npf_ruleset_freealg: inspect the ruleset and disassociate specified
457 * ALG from all NAT entries using it.
458 */
459 void
460 npf_ruleset_freealg(npf_ruleset_t *rlset, npf_alg_t *alg)
461 {
462 npf_rule_t *rl;
463 npf_natpolicy_t *np;
464
465 LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
466 if ((np = rl->r_natp) != NULL) {
467 npf_nat_freealg(np, alg);
468 }
469 }
470 }
471
472 /*
473 * npf_ruleset_natreload: minimum reload of NAT policies by maching
474 * two (active and new) NAT rulesets.
475 *
476 * => Active ruleset should be exclusively locked.
477 */
478 void
479 npf_ruleset_natreload(npf_ruleset_t *nrlset, npf_ruleset_t *arlset)
480 {
481 npf_natpolicy_t *np, *anp;
482 npf_rule_t *rl, *arl;
483
484 /* Scan a new NAT ruleset against NAT policies in old ruleset. */
485 LIST_FOREACH(rl, &nrlset->rs_all, r_aentry) {
486 np = rl->r_natp;
487 arl = npf_ruleset_matchnat(arlset, np);
488 if (arl == NULL) {
489 continue;
490 }
491 /* On match - we exchange NAT policies. */
492 anp = arl->r_natp;
493 rl->r_natp = anp;
494 arl->r_natp = np;
495 /* Update other NAT policies to share portmap. */
496 (void)npf_ruleset_sharepm(nrlset, anp);
497 }
498 }
499
500 /*
501 * npf_rule_alloc: allocate a rule and copy n-code from user-space.
502 */
503 npf_rule_t *
504 npf_rule_alloc(prop_dictionary_t rldict)
505 {
506 npf_rule_t *rl;
507 const char *rname;
508
509 /* Allocate a rule structure. */
510 rl = kmem_zalloc(sizeof(npf_rule_t), KM_SLEEP);
511 TAILQ_INIT(&rl->r_subset);
512 rl->r_natp = NULL;
513
514 /* Name (optional) */
515 if (prop_dictionary_get_cstring_nocopy(rldict, "name", &rname)) {
516 strlcpy(rl->r_name, rname, NPF_RULE_MAXNAMELEN);
517 } else {
518 rl->r_name[0] = '\0';
519 }
520
521 /* Attributes, priority and interface ID (optional). */
522 prop_dictionary_get_uint32(rldict, "attributes", &rl->r_attr);
523 prop_dictionary_get_int32(rldict, "priority", &rl->r_priority);
524 prop_dictionary_get_uint32(rldict, "interface", &rl->r_ifid);
525
526 /* Get the skip-to index. No need to validate it. */
527 prop_dictionary_get_uint32(rldict, "skip-to", &rl->r_skip_to);
528
529 /* Key (optional). */
530 prop_object_t obj = prop_dictionary_get(rldict, "key");
531 const void *key = prop_data_data_nocopy(obj);
532
533 if (key) {
534 size_t len = prop_data_size(obj);
535 if (len > NPF_RULE_MAXKEYLEN) {
536 kmem_free(rl, sizeof(npf_rule_t));
537 return NULL;
538 }
539 memcpy(rl->r_key, key, len);
540 }
541
542 if (NPF_DYNAMIC_RULE_P(rl->r_attr)) {
543 rl->r_dict = prop_dictionary_copy(rldict);
544 }
545
546 return rl;
547 }
548
549 /*
550 * npf_rule_setcode: assign filter code to the rule.
551 *
552 * => The code must be validated by the caller.
553 * => JIT compilation may be performed here.
554 */
555 void
556 npf_rule_setcode(npf_rule_t *rl, const int type, void *code, size_t size)
557 {
558 rl->r_type = type;
559 rl->r_code = code;
560 rl->r_clen = size;
561 #if 0
562 /* Perform BPF JIT if possible. */
563 if (type == NPF_CODE_BPF && (membar_consumer(),
564 bpfjit_module_ops.bj_generate_code != NULL)) {
565 KASSERT(rl->r_jcode == NULL);
566 rl->r_jcode = bpfjit_module_ops.bj_generate_code(code, size);
567 rl->r_code = NULL;
568 }
569 #endif
570 }
571
572 /*
573 * npf_rule_setrproc: assign a rule procedure and hold a reference on it.
574 */
575 void
576 npf_rule_setrproc(npf_rule_t *rl, npf_rproc_t *rp)
577 {
578 npf_rproc_acquire(rp);
579 rl->r_rproc = rp;
580 }
581
582 /*
583 * npf_rule_free: free the specified rule.
584 */
585 void
586 npf_rule_free(npf_rule_t *rl)
587 {
588 npf_natpolicy_t *np = rl->r_natp;
589 npf_rproc_t *rp = rl->r_rproc;
590
591 if (np) {
592 /* Free NAT policy. */
593 npf_nat_freepolicy(np);
594 }
595 if (rp) {
596 /* Release rule procedure. */
597 npf_rproc_release(rp);
598 }
599 if (rl->r_code) {
600 /* Free byte-code. */
601 kmem_free(rl->r_code, rl->r_clen);
602 }
603 if (rl->r_jcode) {
604 /* Free JIT code. */
605 KASSERT(bpfjit_module_ops.bj_free_code != NULL);
606 bpfjit_module_ops.bj_free_code(rl->r_jcode);
607 }
608 if (rl->r_dict) {
609 /* Destroy the dictionary. */
610 prop_object_release(rl->r_dict);
611 }
612 kmem_free(rl, sizeof(npf_rule_t));
613 }
614
615 /*
616 * npf_rule_getid: return the unique ID of a rule.
617 * npf_rule_getrproc: acquire a reference and return rule procedure, if any.
618 * npf_rule_getnat: get NAT policy assigned to the rule.
619 */
620
621 uint64_t
622 npf_rule_getid(const npf_rule_t *rl)
623 {
624 KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
625 return rl->r_id;
626 }
627
628 npf_rproc_t *
629 npf_rule_getrproc(npf_rule_t *rl)
630 {
631 npf_rproc_t *rp = rl->r_rproc;
632
633 if (rp) {
634 npf_rproc_acquire(rp);
635 }
636 return rp;
637 }
638
639 npf_natpolicy_t *
640 npf_rule_getnat(const npf_rule_t *rl)
641 {
642 return rl->r_natp;
643 }
644
645 /*
646 * npf_rule_setnat: assign NAT policy to the rule and insert into the
647 * NAT policy list in the ruleset.
648 */
649 void
650 npf_rule_setnat(npf_rule_t *rl, npf_natpolicy_t *np)
651 {
652
653 KASSERT(rl->r_natp == NULL);
654 rl->r_natp = np;
655 }
656
657 /*
658 * npf_rule_inspect: match the interface, direction and run the filter code.
659 * Returns true if rule matches, false otherise.
660 */
661 static inline bool
662 npf_rule_inspect(npf_cache_t *npc, nbuf_t *nbuf, const npf_rule_t *rl,
663 const int di_mask, const int layer)
664 {
665 const ifnet_t *ifp = nbuf->nb_ifp;
666 const void *code;
667
668 /* Match the interface. */
669 if (rl->r_ifid && rl->r_ifid != ifp->if_index) {
670 return false;
671 }
672
673 /* Match the direction. */
674 if ((rl->r_attr & NPF_RULE_DIMASK) != NPF_RULE_DIMASK) {
675 if ((rl->r_attr & di_mask) == 0)
676 return false;
677 }
678
679 /* Any code? */
680 if (rl->r_jcode == rl->r_code) {
681 KASSERT(rl->r_jcode == NULL);
682 KASSERT(rl->r_code == NULL);
683 return true;
684 }
685
686 switch (rl->r_type) {
687 case NPF_CODE_BPF:
688 return npf_bpf_filter(npc, nbuf, rl->r_code, rl->r_jcode) != 0;
689 case NPF_CODE_NC:
690 return npf_ncode_process(npc, code, nbuf, layer) == 0;
691 default:
692 KASSERT(false);
693 }
694 return false;
695 }
696
697 /*
698 * npf_rule_reinspect: re-inspect the dynamic rule by iterating its list.
699 * This is only for the dynamic rules. Subrules cannot have nested rules.
700 */
701 static npf_rule_t *
702 npf_rule_reinspect(npf_cache_t *npc, nbuf_t *nbuf, const npf_rule_t *drl,
703 const int di_mask, const int layer)
704 {
705 npf_rule_t *final_rl = NULL, *rl;
706
707 KASSERT(NPF_DYNAMIC_GROUP_P(drl->r_attr));
708
709 TAILQ_FOREACH(rl, &drl->r_subset, r_entry) {
710 if (!npf_rule_inspect(npc, nbuf, rl, di_mask, layer)) {
711 continue;
712 }
713 if (rl->r_attr & NPF_RULE_FINAL) {
714 return rl;
715 }
716 final_rl = rl;
717 }
718 return final_rl;
719 }
720
721 /*
722 * npf_ruleset_inspect: inspect the packet against the given ruleset.
723 *
724 * Loop through the rules in the set and run n-code processor of each rule
725 * against the packet (nbuf chain). If sub-ruleset is found, inspect it.
726 *
727 * => Caller is responsible for nbuf chain protection.
728 */
729 npf_rule_t *
730 npf_ruleset_inspect(npf_cache_t *npc, nbuf_t *nbuf,
731 const npf_ruleset_t *rlset, const int di, const int layer)
732 {
733 const int di_mask = (di & PFIL_IN) ? NPF_RULE_IN : NPF_RULE_OUT;
734 const u_int nitems = rlset->rs_nitems;
735 npf_rule_t *final_rl = NULL;
736 u_int n = 0;
737
738 KASSERT(((di & PFIL_IN) != 0) ^ ((di & PFIL_OUT) != 0));
739
740 while (n < nitems) {
741 npf_rule_t *rl = rlset->rs_rules[n];
742 const u_int skip_to = rl->r_skip_to;
743 const uint32_t attr = rl->r_attr;
744
745 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
746 KASSERT(!final_rl || rl->r_priority >= final_rl->r_priority);
747 KASSERT(n < skip_to);
748
749 /* Group is a barrier: return a matching if found any. */
750 if ((attr & NPF_RULE_GROUP) != 0 && final_rl) {
751 break;
752 }
753
754 /* Main inspection of the rule. */
755 if (!npf_rule_inspect(npc, nbuf, rl, di_mask, layer)) {
756 n = skip_to;
757 continue;
758 }
759
760 if (NPF_DYNAMIC_GROUP_P(attr)) {
761 /*
762 * If this is a dynamic rule, re-inspect the subrules.
763 * If it has any matching rule, then it is final.
764 */
765 rl = npf_rule_reinspect(npc, nbuf, rl, di_mask, layer);
766 if (rl != NULL) {
767 final_rl = rl;
768 break;
769 }
770 } else if ((attr & NPF_RULE_GROUP) == 0) {
771 /*
772 * Groups themselves are not matching.
773 */
774 final_rl = rl;
775 }
776
777 /* Set the matching rule and check for "final". */
778 if (attr & NPF_RULE_FINAL) {
779 break;
780 }
781 n++;
782 }
783
784 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
785 return final_rl;
786 }
787
788 /*
789 * npf_rule_conclude: return decision and the flags for conclusion.
790 *
791 * => Returns ENETUNREACH if "block" and 0 if "pass".
792 */
793 int
794 npf_rule_conclude(const npf_rule_t *rl, int *retfl)
795 {
796 /* If not passing - drop the packet. */
797 *retfl = rl->r_attr;
798 return (rl->r_attr & NPF_RULE_PASS) ? 0 : ENETUNREACH;
799 }
800
801 #if defined(DDB) || defined(_NPF_TESTING)
802
803 void
804 npf_rulenc_dump(const npf_rule_t *rl)
805 {
806 const uint32_t *op = rl->r_code;
807 size_t n = rl->r_clen;
808
809 while (n) {
810 printf("\t> |0x%02x|\n", (uint32_t)*op);
811 op++;
812 n -= sizeof(*op);
813 }
814 printf("-> %s\n", (rl->r_attr & NPF_RULE_PASS) ? "pass" : "block");
815 }
816
817 #endif
818