npf_ruleset.c revision 1.36 1 /* $NetBSD: npf_ruleset.c,v 1.36 2014/08/10 19:09:43 rmind Exp $ */
2
3 /*-
4 * Copyright (c) 2009-2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This material is based upon work partially supported by The
8 * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * NPF ruleset module.
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: npf_ruleset.c,v 1.36 2014/08/10 19:09:43 rmind Exp $");
38
39 #include <sys/param.h>
40 #include <sys/types.h>
41
42 #include <sys/atomic.h>
43 #include <sys/kmem.h>
44 #include <sys/queue.h>
45 #include <sys/mbuf.h>
46 #include <sys/types.h>
47
48 #include <net/bpf.h>
49 #include <net/bpfjit.h>
50 #include <net/pfil.h>
51 #include <net/if.h>
52
53 #include "npf_impl.h"
54
55 struct npf_ruleset {
56 /*
57 * - List of all rules.
58 * - Dynamic (i.e. named) rules.
59 * - G/C list for convenience.
60 */
61 LIST_HEAD(, npf_rule) rs_all;
62 LIST_HEAD(, npf_rule) rs_dynamic;
63 LIST_HEAD(, npf_rule) rs_gc;
64
65 /* Unique ID counter. */
66 uint64_t rs_idcnt;
67
68 /* Number of array slots and active rules. */
69 u_int rs_slots;
70 u_int rs_nitems;
71
72 /* Array of ordered rules. */
73 npf_rule_t * rs_rules[];
74 };
75
76 struct npf_rule {
77 /* Attributes, interface and skip slot. */
78 uint32_t r_attr;
79 u_int r_ifid;
80 u_int r_skip_to;
81
82 /* Code to process, if any. */
83 int r_type;
84 bpfjit_func_t r_jcode;
85 void * r_code;
86 u_int r_clen;
87
88 /* NAT policy (optional), rule procedure and subset. */
89 npf_natpolicy_t * r_natp;
90 npf_rproc_t * r_rproc;
91
92 /* Rule priority: (highest) 1, 2 ... n (lowest). */
93 pri_t r_priority;
94
95 /*
96 * Dynamic group: subset queue and a dynamic group list entry.
97 * Dynamic rule: entry and the parent rule (the group).
98 */
99 union {
100 TAILQ_HEAD(npf_ruleq, npf_rule) r_subset;
101 TAILQ_ENTRY(npf_rule) r_entry;
102 } /* C11 */;
103 union {
104 LIST_ENTRY(npf_rule) r_dentry;
105 npf_rule_t * r_parent;
106 } /* C11 */;
107
108 /* Rule ID, name and the optional key. */
109 uint64_t r_id;
110 char r_name[NPF_RULE_MAXNAMELEN];
111 uint8_t r_key[NPF_RULE_MAXKEYLEN];
112
113 /* All-list entry and the auxiliary info. */
114 LIST_ENTRY(npf_rule) r_aentry;
115 prop_data_t r_info;
116 };
117
118 static int npf_rule_export(const npf_rule_t *, prop_dictionary_t);
119
120 /*
121 * Private attributes - must be in the NPF_RULE_PRIVMASK range.
122 */
123 #define NPF_RULE_KEEPNAT (0x01000000 & NPF_RULE_PRIVMASK)
124
125 #define NPF_DYNAMIC_GROUP_P(attr) \
126 (((attr) & NPF_DYNAMIC_GROUP) == NPF_DYNAMIC_GROUP)
127
128 #define NPF_DYNAMIC_RULE_P(attr) \
129 (((attr) & NPF_DYNAMIC_GROUP) == NPF_RULE_DYNAMIC)
130
131 npf_ruleset_t *
132 npf_ruleset_create(size_t slots)
133 {
134 size_t len = offsetof(npf_ruleset_t, rs_rules[slots]);
135 npf_ruleset_t *rlset;
136
137 rlset = kmem_zalloc(len, KM_SLEEP);
138 LIST_INIT(&rlset->rs_dynamic);
139 LIST_INIT(&rlset->rs_all);
140 LIST_INIT(&rlset->rs_gc);
141 rlset->rs_slots = slots;
142
143 return rlset;
144 }
145
146 static void
147 npf_ruleset_unlink(npf_ruleset_t *rlset, npf_rule_t *rl)
148 {
149 if (NPF_DYNAMIC_GROUP_P(rl->r_attr)) {
150 LIST_REMOVE(rl, r_dentry);
151 }
152 if (NPF_DYNAMIC_RULE_P(rl->r_attr)) {
153 npf_rule_t *rg = rl->r_parent;
154 TAILQ_REMOVE(&rg->r_subset, rl, r_entry);
155 }
156 LIST_REMOVE(rl, r_aentry);
157 }
158
159 void
160 npf_ruleset_destroy(npf_ruleset_t *rlset)
161 {
162 size_t len = offsetof(npf_ruleset_t, rs_rules[rlset->rs_slots]);
163 npf_rule_t *rl;
164
165 while ((rl = LIST_FIRST(&rlset->rs_all)) != NULL) {
166 npf_ruleset_unlink(rlset, rl);
167 npf_rule_free(rl);
168 }
169 KASSERT(LIST_EMPTY(&rlset->rs_dynamic));
170 KASSERT(LIST_EMPTY(&rlset->rs_gc));
171 kmem_free(rlset, len);
172 }
173
174 /*
175 * npf_ruleset_insert: insert the rule into the specified ruleset.
176 */
177 void
178 npf_ruleset_insert(npf_ruleset_t *rlset, npf_rule_t *rl)
179 {
180 u_int n = rlset->rs_nitems;
181
182 KASSERT(n < rlset->rs_slots);
183
184 LIST_INSERT_HEAD(&rlset->rs_all, rl, r_aentry);
185 if (NPF_DYNAMIC_GROUP_P(rl->r_attr)) {
186 LIST_INSERT_HEAD(&rlset->rs_dynamic, rl, r_dentry);
187 } else {
188 KASSERTMSG(rl->r_parent == NULL, "cannot be dynamic rule");
189 rl->r_attr &= ~NPF_RULE_DYNAMIC;
190 }
191
192 rlset->rs_rules[n] = rl;
193 rlset->rs_nitems++;
194
195 if (rl->r_skip_to < ++n) {
196 rl->r_skip_to = n;
197 }
198 }
199
200 static npf_rule_t *
201 npf_ruleset_lookup(npf_ruleset_t *rlset, const char *name)
202 {
203 npf_rule_t *rl;
204
205 KASSERT(npf_config_locked_p());
206
207 LIST_FOREACH(rl, &rlset->rs_dynamic, r_dentry) {
208 KASSERT(NPF_DYNAMIC_GROUP_P(rl->r_attr));
209 if (strncmp(rl->r_name, name, NPF_RULE_MAXNAMELEN) == 0)
210 break;
211 }
212 return rl;
213 }
214
215 int
216 npf_ruleset_add(npf_ruleset_t *rlset, const char *rname, npf_rule_t *rl)
217 {
218 npf_rule_t *rg, *it;
219 pri_t priocmd;
220
221 rg = npf_ruleset_lookup(rlset, rname);
222 if (rg == NULL) {
223 return ESRCH;
224 }
225 if (!NPF_DYNAMIC_RULE_P(rl->r_attr)) {
226 return EINVAL;
227 }
228
229 /* Dynamic rule - assign a unique ID and save the parent. */
230 rl->r_id = ++rlset->rs_idcnt;
231 rl->r_parent = rg;
232
233 /*
234 * Rule priority: (highest) 1, 2 ... n (lowest).
235 * Negative priority indicates an operation and is reset to zero.
236 */
237 if ((priocmd = rl->r_priority) < 0) {
238 rl->r_priority = 0;
239 }
240
241 switch (priocmd) {
242 case NPF_PRI_FIRST:
243 TAILQ_FOREACH(it, &rg->r_subset, r_entry) {
244 if (rl->r_priority <= it->r_priority)
245 break;
246 }
247 if (it) {
248 TAILQ_INSERT_BEFORE(it, rl, r_entry);
249 } else {
250 TAILQ_INSERT_HEAD(&rg->r_subset, rl, r_entry);
251 }
252 break;
253 case NPF_PRI_LAST:
254 default:
255 TAILQ_FOREACH(it, &rg->r_subset, r_entry) {
256 if (rl->r_priority < it->r_priority)
257 break;
258 }
259 if (it) {
260 TAILQ_INSERT_BEFORE(it, rl, r_entry);
261 } else {
262 TAILQ_INSERT_TAIL(&rg->r_subset, rl, r_entry);
263 }
264 break;
265 }
266
267 /* Finally, add into the all-list. */
268 LIST_INSERT_HEAD(&rlset->rs_all, rl, r_aentry);
269 return 0;
270 }
271
272 int
273 npf_ruleset_remove(npf_ruleset_t *rlset, const char *rname, uint64_t id)
274 {
275 npf_rule_t *rg, *rl;
276
277 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
278 return ESRCH;
279 }
280 TAILQ_FOREACH(rl, &rg->r_subset, r_entry) {
281 KASSERT(rl->r_parent == rg);
282
283 /* Compare ID. On match, remove and return. */
284 if (rl->r_id == id) {
285 npf_ruleset_unlink(rlset, rl);
286 LIST_INSERT_HEAD(&rlset->rs_gc, rl, r_aentry);
287 return 0;
288 }
289 }
290 return ENOENT;
291 }
292
293 int
294 npf_ruleset_remkey(npf_ruleset_t *rlset, const char *rname,
295 const void *key, size_t len)
296 {
297 npf_rule_t *rg, *rl;
298
299 KASSERT(len && len <= NPF_RULE_MAXKEYLEN);
300
301 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
302 return ESRCH;
303 }
304
305 /* Find the last in the list. */
306 TAILQ_FOREACH_REVERSE(rl, &rg->r_subset, npf_ruleq, r_entry) {
307 KASSERT(rl->r_parent == rg);
308
309 /* Compare the key. On match, remove and return. */
310 if (memcmp(rl->r_key, key, len) == 0) {
311 npf_ruleset_unlink(rlset, rl);
312 LIST_INSERT_HEAD(&rlset->rs_gc, rl, r_aentry);
313 return 0;
314 }
315 }
316 return ENOENT;
317 }
318
319 prop_dictionary_t
320 npf_ruleset_list(npf_ruleset_t *rlset, const char *rname)
321 {
322 prop_dictionary_t rgdict;
323 prop_array_t rules;
324 npf_rule_t *rg, *rl;
325
326 KASSERT(npf_config_locked_p());
327
328 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
329 return NULL;
330 }
331 if ((rgdict = prop_dictionary_create()) == NULL) {
332 return NULL;
333 }
334 if ((rules = prop_array_create()) == NULL) {
335 prop_object_release(rgdict);
336 return NULL;
337 }
338
339 TAILQ_FOREACH(rl, &rg->r_subset, r_entry) {
340 prop_dictionary_t rldict;
341
342 rldict = prop_dictionary_create();
343 KASSERT(rl->r_parent == rg);
344
345 if (npf_rule_export(rl, rldict) ||
346 !prop_array_add(rules, rldict)) {
347 prop_object_release(rldict);
348 prop_object_release(rules);
349 return NULL;
350 }
351 }
352
353 if (!prop_dictionary_set(rgdict, "rules", rules)) {
354 prop_object_release(rgdict);
355 rgdict = NULL;
356 }
357 prop_object_release(rules);
358 return rgdict;
359 }
360
361 int
362 npf_ruleset_flush(npf_ruleset_t *rlset, const char *rname)
363 {
364 npf_rule_t *rg, *rl;
365
366 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
367 return ESRCH;
368 }
369 while ((rl = TAILQ_FIRST(&rg->r_subset)) != NULL) {
370 KASSERT(rl->r_parent == rg);
371 npf_ruleset_unlink(rlset, rl);
372 LIST_INSERT_HEAD(&rlset->rs_gc, rl, r_aentry);
373 }
374 return 0;
375 }
376
377 int
378 npf_ruleset_export(const npf_ruleset_t *rlset, prop_array_t rules)
379 {
380 const npf_rule_t *rl;
381 int error = 0;
382
383 KASSERT(npf_config_locked_p());
384
385 LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
386 const npf_natpolicy_t *natp = rl->r_natp;
387 prop_dictionary_t rldict;
388
389 rldict = prop_dictionary_create();
390 if ((error = npf_rule_export(rl, rldict)) != 0) {
391 prop_object_release(rldict);
392 break;
393 }
394 if (natp && (error = npf_nat_policyexport(natp, rldict)) != 0) {
395 prop_object_release(rldict);
396 break;
397 }
398 if (!prop_array_add(rules, rldict)) {
399 prop_object_release(rldict);
400 return ENOMEM;
401 }
402 }
403 return error;
404 }
405
406 void
407 npf_ruleset_gc(npf_ruleset_t *rlset)
408 {
409 npf_rule_t *rl;
410
411 while ((rl = LIST_FIRST(&rlset->rs_gc)) != NULL) {
412 LIST_REMOVE(rl, r_aentry);
413 npf_rule_free(rl);
414 }
415 }
416
417 /*
418 * npf_ruleset_cmpnat: find a matching NAT policy in the ruleset.
419 */
420 static inline npf_rule_t *
421 npf_ruleset_cmpnat(npf_ruleset_t *rlset, npf_natpolicy_t *mnp)
422 {
423 npf_rule_t *rl;
424
425 /* Find a matching NAT policy in the old ruleset. */
426 LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
427 if (rl->r_natp && npf_nat_cmppolicy(rl->r_natp, mnp))
428 break;
429 }
430 return rl;
431 }
432
433 /*
434 * npf_ruleset_reload: prepare the new ruleset by scanning the active
435 * ruleset and 1) sharing the dynamic rules 2) sharing NAT policies.
436 *
437 * => The active (old) ruleset should be exclusively locked.
438 */
439 void
440 npf_ruleset_reload(npf_ruleset_t *newset, npf_ruleset_t *oldset)
441 {
442 npf_rule_t *rg, *rl;
443 uint64_t nid = 0;
444
445 KASSERT(npf_config_locked_p());
446
447 /*
448 * Scan the dynamic rules and share (migrate) if needed.
449 */
450 LIST_FOREACH(rg, &newset->rs_dynamic, r_dentry) {
451 npf_rule_t *actrg;
452
453 /* Look for a dynamic ruleset group with such name. */
454 actrg = npf_ruleset_lookup(oldset, rg->r_name);
455 if (actrg == NULL) {
456 continue;
457 }
458
459 /*
460 * Copy the list-head structure. This is necessary because
461 * the rules are still active and therefore accessible for
462 * inspection via the old ruleset.
463 */
464 memcpy(&rg->r_subset, &actrg->r_subset, sizeof(rg->r_subset));
465 TAILQ_FOREACH(rl, &rg->r_subset, r_entry) {
466 /*
467 * We can safely migrate to the new all-rule list
468 * and re-set the parent rule, though.
469 */
470 LIST_REMOVE(rl, r_aentry);
471 LIST_INSERT_HEAD(&newset->rs_all, rl, r_aentry);
472 rl->r_parent = rg;
473 }
474 }
475
476 /*
477 * Scan all rules in the new ruleset and share NAT policies.
478 * Also, assign a unique ID for each policy here.
479 */
480 LIST_FOREACH(rl, &newset->rs_all, r_aentry) {
481 npf_natpolicy_t *np;
482 npf_rule_t *actrl;
483
484 /* Does the rule have a NAT policy associated? */
485 if ((np = rl->r_natp) == NULL) {
486 continue;
487 }
488
489 /* Does it match with any policy in the active ruleset? */
490 if ((actrl = npf_ruleset_cmpnat(oldset, np)) == NULL) {
491 npf_nat_setid(np, ++nid);
492 continue;
493 }
494
495 /*
496 * Inherit the matching NAT policy and check other ones
497 * in the new ruleset for sharing the portmap.
498 */
499 rl->r_natp = actrl->r_natp;
500 npf_ruleset_sharepm(newset, rl->r_natp);
501 npf_nat_setid(rl->r_natp, ++nid);
502
503 /*
504 * Finally, mark the active rule to not destroy its NAT
505 * policy later as we inherited it (but the rule must be
506 * kept active for now). Destroy the new/unused policy.
507 */
508 actrl->r_attr |= NPF_RULE_KEEPNAT;
509 npf_nat_freepolicy(np);
510 }
511
512 /* Inherit the ID counter. */
513 newset->rs_idcnt = oldset->rs_idcnt;
514 }
515
516 npf_rule_t *
517 npf_ruleset_sharepm(npf_ruleset_t *rlset, npf_natpolicy_t *mnp)
518 {
519 npf_natpolicy_t *np;
520 npf_rule_t *rl;
521
522 /* Find a matching NAT policy in the old ruleset. */
523 LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
524 /*
525 * NAT policy might not yet be set during the creation of
526 * the ruleset (in such case, rule is for our policy), or
527 * policies might be equal due to rule exchange on reload.
528 */
529 np = rl->r_natp;
530 if (np == NULL || np == mnp)
531 continue;
532 if (npf_nat_sharepm(np, mnp))
533 break;
534 }
535 return rl;
536 }
537
538 npf_natpolicy_t *
539 npf_ruleset_findnat(npf_ruleset_t *rlset, uint64_t id)
540 {
541 npf_rule_t *rl;
542
543 LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
544 npf_natpolicy_t *np = rl->r_natp;
545 if (np && npf_nat_getid(np) == id) {
546 return np;
547 }
548 }
549 return NULL;
550 }
551
552 /*
553 * npf_ruleset_freealg: inspect the ruleset and disassociate specified
554 * ALG from all NAT entries using it.
555 */
556 void
557 npf_ruleset_freealg(npf_ruleset_t *rlset, npf_alg_t *alg)
558 {
559 npf_rule_t *rl;
560 npf_natpolicy_t *np;
561
562 LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
563 if ((np = rl->r_natp) != NULL) {
564 npf_nat_freealg(np, alg);
565 }
566 }
567 }
568
569 /*
570 * npf_rule_alloc: allocate a rule and initialise it.
571 */
572 npf_rule_t *
573 npf_rule_alloc(prop_dictionary_t rldict)
574 {
575 npf_rule_t *rl;
576 const char *rname;
577 prop_data_t d;
578
579 /* Allocate a rule structure. */
580 rl = kmem_zalloc(sizeof(npf_rule_t), KM_SLEEP);
581 TAILQ_INIT(&rl->r_subset);
582 rl->r_natp = NULL;
583
584 /* Name (optional) */
585 if (prop_dictionary_get_cstring_nocopy(rldict, "name", &rname)) {
586 strlcpy(rl->r_name, rname, NPF_RULE_MAXNAMELEN);
587 } else {
588 rl->r_name[0] = '\0';
589 }
590
591 /* Attributes, priority and interface ID (optional). */
592 prop_dictionary_get_uint32(rldict, "attr", &rl->r_attr);
593 prop_dictionary_get_int32(rldict, "prio", &rl->r_priority);
594 rl->r_attr &= ~NPF_RULE_PRIVMASK;
595
596 if (prop_dictionary_get_cstring_nocopy(rldict, "ifname", &rname)) {
597 if ((rl->r_ifid = npf_ifmap_register(rname)) == 0) {
598 kmem_free(rl, sizeof(npf_rule_t));
599 return NULL;
600 }
601 } else {
602 rl->r_ifid = 0;
603 }
604
605 /* Get the skip-to index. No need to validate it. */
606 prop_dictionary_get_uint32(rldict, "skip-to", &rl->r_skip_to);
607
608 /* Key (optional). */
609 prop_object_t obj = prop_dictionary_get(rldict, "key");
610 const void *key = prop_data_data_nocopy(obj);
611
612 if (key) {
613 size_t len = prop_data_size(obj);
614 if (len > NPF_RULE_MAXKEYLEN) {
615 kmem_free(rl, sizeof(npf_rule_t));
616 return NULL;
617 }
618 memcpy(rl->r_key, key, len);
619 }
620
621 if ((d = prop_dictionary_get(rldict, "info")) != NULL) {
622 rl->r_info = prop_data_copy(d);
623 }
624 return rl;
625 }
626
627 static int
628 npf_rule_export(const npf_rule_t *rl, prop_dictionary_t rldict)
629 {
630 prop_data_t d;
631
632 prop_dictionary_set_uint32(rldict, "attr", rl->r_attr);
633 prop_dictionary_set_int32(rldict, "prio", rl->r_priority);
634 prop_dictionary_set_uint32(rldict, "skip-to", rl->r_skip_to);
635
636 prop_dictionary_set_int32(rldict, "code-type", rl->r_type);
637 if (rl->r_code) {
638 d = prop_data_create_data(rl->r_code, rl->r_clen);
639 prop_dictionary_set_and_rel(rldict, "code", d);
640 }
641
642 if (rl->r_ifid) {
643 const char *ifname = npf_ifmap_getname(rl->r_ifid);
644 prop_dictionary_set_cstring(rldict, "ifname", ifname);
645 }
646 prop_dictionary_set_uint64(rldict, "id", rl->r_id);
647
648 if (rl->r_name[0]) {
649 prop_dictionary_set_cstring(rldict, "name", rl->r_name);
650 }
651 if (NPF_DYNAMIC_RULE_P(rl->r_attr)) {
652 d = prop_data_create_data(rl->r_key, NPF_RULE_MAXKEYLEN);
653 prop_dictionary_set_and_rel(rldict, "key", d);
654 }
655 prop_dictionary_set(rldict, "info", rl->r_info);
656 return 0;
657 }
658
659 /*
660 * npf_rule_setcode: assign filter code to the rule.
661 *
662 * => The code must be validated by the caller.
663 * => JIT compilation may be performed here.
664 */
665 void
666 npf_rule_setcode(npf_rule_t *rl, const int type, void *code, size_t size)
667 {
668 KASSERT(type == NPF_CODE_BPF);
669
670 rl->r_type = type;
671 rl->r_code = code;
672 rl->r_clen = size;
673 rl->r_jcode = npf_bpf_compile(code, size);
674 }
675
676 /*
677 * npf_rule_setrproc: assign a rule procedure and hold a reference on it.
678 */
679 void
680 npf_rule_setrproc(npf_rule_t *rl, npf_rproc_t *rp)
681 {
682 npf_rproc_acquire(rp);
683 rl->r_rproc = rp;
684 }
685
686 /*
687 * npf_rule_free: free the specified rule.
688 */
689 void
690 npf_rule_free(npf_rule_t *rl)
691 {
692 npf_natpolicy_t *np = rl->r_natp;
693 npf_rproc_t *rp = rl->r_rproc;
694
695 if (np && (rl->r_attr & NPF_RULE_KEEPNAT) == 0) {
696 /* Free NAT policy. */
697 npf_nat_freepolicy(np);
698 }
699 if (rp) {
700 /* Release rule procedure. */
701 npf_rproc_release(rp);
702 }
703 if (rl->r_code) {
704 /* Free byte-code. */
705 kmem_free(rl->r_code, rl->r_clen);
706 }
707 if (rl->r_jcode) {
708 /* Free JIT code. */
709 bpf_jit_freecode(rl->r_jcode);
710 }
711 if (rl->r_info) {
712 prop_object_release(rl->r_info);
713 }
714 kmem_free(rl, sizeof(npf_rule_t));
715 }
716
717 /*
718 * npf_rule_getid: return the unique ID of a rule.
719 * npf_rule_getrproc: acquire a reference and return rule procedure, if any.
720 * npf_rule_getnat: get NAT policy assigned to the rule.
721 */
722
723 uint64_t
724 npf_rule_getid(const npf_rule_t *rl)
725 {
726 KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
727 return rl->r_id;
728 }
729
730 npf_rproc_t *
731 npf_rule_getrproc(const npf_rule_t *rl)
732 {
733 npf_rproc_t *rp = rl->r_rproc;
734
735 if (rp) {
736 npf_rproc_acquire(rp);
737 }
738 return rp;
739 }
740
741 npf_natpolicy_t *
742 npf_rule_getnat(const npf_rule_t *rl)
743 {
744 return rl->r_natp;
745 }
746
747 /*
748 * npf_rule_setnat: assign NAT policy to the rule and insert into the
749 * NAT policy list in the ruleset.
750 */
751 void
752 npf_rule_setnat(npf_rule_t *rl, npf_natpolicy_t *np)
753 {
754 KASSERT(rl->r_natp == NULL);
755 rl->r_natp = np;
756 }
757
758 /*
759 * npf_rule_inspect: match the interface, direction and run the filter code.
760 * Returns true if rule matches and false otherwise.
761 */
762 static inline bool
763 npf_rule_inspect(const npf_rule_t *rl, bpf_args_t *bc_args,
764 const int di_mask, const u_int ifid)
765 {
766 /* Match the interface. */
767 if (rl->r_ifid && rl->r_ifid != ifid) {
768 return false;
769 }
770
771 /* Match the direction. */
772 if ((rl->r_attr & NPF_RULE_DIMASK) != NPF_RULE_DIMASK) {
773 if ((rl->r_attr & di_mask) == 0)
774 return false;
775 }
776
777 /* Any code? */
778 if (!rl->r_code) {
779 KASSERT(rl->r_jcode == NULL);
780 return true;
781 }
782 KASSERT(rl->r_type == NPF_CODE_BPF);
783 return npf_bpf_filter(bc_args, rl->r_code, rl->r_jcode) != 0;
784 }
785
786 /*
787 * npf_rule_reinspect: re-inspect the dynamic rule by iterating its list.
788 * This is only for the dynamic rules. Subrules cannot have nested rules.
789 */
790 static npf_rule_t *
791 npf_rule_reinspect(const npf_rule_t *drl, bpf_args_t *bc_args,
792 const int di_mask, const u_int ifid)
793 {
794 npf_rule_t *final_rl = NULL, *rl;
795
796 KASSERT(NPF_DYNAMIC_GROUP_P(drl->r_attr));
797
798 TAILQ_FOREACH(rl, &drl->r_subset, r_entry) {
799 if (!npf_rule_inspect(rl, bc_args, di_mask, ifid)) {
800 continue;
801 }
802 if (rl->r_attr & NPF_RULE_FINAL) {
803 return rl;
804 }
805 final_rl = rl;
806 }
807 return final_rl;
808 }
809
810 /*
811 * npf_ruleset_inspect: inspect the packet against the given ruleset.
812 *
813 * Loop through the rules in the set and run the byte-code of each rule
814 * against the packet (nbuf chain). If sub-ruleset is found, inspect it.
815 */
816 npf_rule_t *
817 npf_ruleset_inspect(npf_cache_t *npc, const npf_ruleset_t *rlset,
818 const int di, const int layer)
819 {
820 nbuf_t *nbuf = npc->npc_nbuf;
821 const int di_mask = (di & PFIL_IN) ? NPF_RULE_IN : NPF_RULE_OUT;
822 const u_int nitems = rlset->rs_nitems;
823 const u_int ifid = nbuf->nb_ifid;
824 npf_rule_t *final_rl = NULL;
825 bpf_args_t bc_args;
826 u_int n = 0;
827
828 KASSERT(((di & PFIL_IN) != 0) ^ ((di & PFIL_OUT) != 0));
829
830 /*
831 * Prepare the external memory store and the arguments for
832 * the BPF programs to be executed.
833 */
834 uint32_t bc_words[NPF_BPF_NWORDS];
835 npf_bpf_prepare(npc, &bc_args, bc_words);
836
837 while (n < nitems) {
838 npf_rule_t *rl = rlset->rs_rules[n];
839 const u_int skip_to = rl->r_skip_to;
840 const uint32_t attr = rl->r_attr;
841
842 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
843 KASSERT(!final_rl || rl->r_priority >= final_rl->r_priority);
844 KASSERT(n < skip_to);
845
846 /* Group is a barrier: return a matching if found any. */
847 if ((attr & NPF_RULE_GROUP) != 0 && final_rl) {
848 break;
849 }
850
851 /* Main inspection of the rule. */
852 if (!npf_rule_inspect(rl, &bc_args, di_mask, ifid)) {
853 n = skip_to;
854 continue;
855 }
856
857 if (NPF_DYNAMIC_GROUP_P(attr)) {
858 /*
859 * If this is a dynamic rule, re-inspect the subrules.
860 * If it has any matching rule, then it is final.
861 */
862 rl = npf_rule_reinspect(rl, &bc_args, di_mask, ifid);
863 if (rl != NULL) {
864 final_rl = rl;
865 break;
866 }
867 } else if ((attr & NPF_RULE_GROUP) == 0) {
868 /*
869 * Groups themselves are not matching.
870 */
871 final_rl = rl;
872 }
873
874 /* Set the matching rule and check for "final". */
875 if (attr & NPF_RULE_FINAL) {
876 break;
877 }
878 n++;
879 }
880
881 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
882 return final_rl;
883 }
884
885 /*
886 * npf_rule_conclude: return decision and the flags for conclusion.
887 *
888 * => Returns ENETUNREACH if "block" and 0 if "pass".
889 */
890 int
891 npf_rule_conclude(const npf_rule_t *rl, int *retfl)
892 {
893 /* If not passing - drop the packet. */
894 *retfl = rl->r_attr;
895 return (rl->r_attr & NPF_RULE_PASS) ? 0 : ENETUNREACH;
896 }
897