npf_ruleset.c revision 1.46.4.2 1 /*-
2 * Copyright (c) 2009-2015 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This material is based upon work partially supported by The
6 * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * NPF ruleset module.
32 */
33
34 #ifdef _KERNEL
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: npf_ruleset.c,v 1.46.4.2 2020/04/08 14:08:57 martin Exp $");
37
38 #include <sys/param.h>
39 #include <sys/types.h>
40
41 #include <sys/atomic.h>
42 #include <sys/kmem.h>
43 #include <sys/queue.h>
44 #include <sys/mbuf.h>
45 #include <sys/types.h>
46
47 #include <net/bpf.h>
48 #include <net/bpfjit.h>
49 #include <net/pfil.h>
50 #include <net/if.h>
51 #endif
52
53 #include "npf_impl.h"
54
55 struct npf_ruleset {
56 /*
57 * - List of all rules.
58 * - Dynamic (i.e. named) rules.
59 * - G/C list for convenience.
60 */
61 LIST_HEAD(, npf_rule) rs_all;
62 LIST_HEAD(, npf_rule) rs_dynamic;
63 LIST_HEAD(, npf_rule) rs_gc;
64
65 /* Unique ID counter. */
66 uint64_t rs_idcnt;
67
68 /* Number of array slots and active rules. */
69 u_int rs_slots;
70 u_int rs_nitems;
71
72 /* Array of ordered rules. */
73 npf_rule_t * rs_rules[];
74 };
75
76 struct npf_rule {
77 /* Attributes, interface and skip slot. */
78 uint32_t r_attr;
79 u_int r_ifid;
80 u_int r_skip_to;
81
82 /* Code to process, if any. */
83 int r_type;
84 bpfjit_func_t r_jcode;
85 void * r_code;
86 u_int r_clen;
87
88 /* NAT policy (optional), rule procedure and subset. */
89 npf_natpolicy_t * r_natp;
90 npf_rproc_t * r_rproc;
91
92 union {
93 /*
94 * Dynamic group: rule subset and a group list entry.
95 */
96 struct {
97 npf_rule_t * r_subset;
98 LIST_ENTRY(npf_rule) r_dentry;
99 };
100
101 /*
102 * Dynamic rule: priority, parent group and next rule.
103 */
104 struct {
105 int r_priority;
106 npf_rule_t * r_parent;
107 npf_rule_t * r_next;
108 };
109 };
110
111 /* Rule ID, name and the optional key. */
112 uint64_t r_id;
113 char r_name[NPF_RULE_MAXNAMELEN];
114 uint8_t r_key[NPF_RULE_MAXKEYLEN];
115
116 /* All-list entry and the auxiliary info. */
117 LIST_ENTRY(npf_rule) r_aentry;
118 nvlist_t * r_info;
119 size_t r_info_len;
120 };
121
122 #define SKIPTO_ADJ_FLAG (1U << 31)
123 #define SKIPTO_MASK (SKIPTO_ADJ_FLAG - 1)
124
125 static nvlist_t * npf_rule_export(npf_t *, const npf_rule_t *);
126
127 /*
128 * Private attributes - must be in the NPF_RULE_PRIVMASK range.
129 */
130 #define NPF_RULE_KEEPNAT (0x01000000 & NPF_RULE_PRIVMASK)
131
132 #define NPF_DYNAMIC_GROUP_P(attr) \
133 (((attr) & NPF_DYNAMIC_GROUP) == NPF_DYNAMIC_GROUP)
134
135 #define NPF_DYNAMIC_RULE_P(attr) \
136 (((attr) & NPF_DYNAMIC_GROUP) == NPF_RULE_DYNAMIC)
137
138 npf_ruleset_t *
139 npf_ruleset_create(size_t slots)
140 {
141 size_t len = offsetof(npf_ruleset_t, rs_rules[slots]);
142 npf_ruleset_t *rlset;
143
144 rlset = kmem_zalloc(len, KM_SLEEP);
145 LIST_INIT(&rlset->rs_dynamic);
146 LIST_INIT(&rlset->rs_all);
147 LIST_INIT(&rlset->rs_gc);
148 rlset->rs_slots = slots;
149
150 return rlset;
151 }
152
153 void
154 npf_ruleset_destroy(npf_ruleset_t *rlset)
155 {
156 if (rlset == NULL)
157 return;
158
159 size_t len = offsetof(npf_ruleset_t, rs_rules[rlset->rs_slots]);
160 npf_rule_t *rl;
161
162 while ((rl = LIST_FIRST(&rlset->rs_all)) != NULL) {
163 if (NPF_DYNAMIC_GROUP_P(rl->r_attr)) {
164 /*
165 * Note: r_subset may point to the rules which
166 * were inherited by a new ruleset.
167 */
168 rl->r_subset = NULL;
169 LIST_REMOVE(rl, r_dentry);
170 }
171 if (NPF_DYNAMIC_RULE_P(rl->r_attr)) {
172 /* Not removing from r_subset, see above. */
173 KASSERT(rl->r_parent != NULL);
174 }
175 LIST_REMOVE(rl, r_aentry);
176 npf_rule_free(rl);
177 }
178 KASSERT(LIST_EMPTY(&rlset->rs_dynamic));
179
180 npf_ruleset_gc(rlset);
181 KASSERT(LIST_EMPTY(&rlset->rs_gc));
182 kmem_free(rlset, len);
183 }
184
185 /*
186 * npf_ruleset_insert: insert the rule into the specified ruleset.
187 */
188 void
189 npf_ruleset_insert(npf_ruleset_t *rlset, npf_rule_t *rl)
190 {
191 u_int n = rlset->rs_nitems;
192
193 KASSERT(n < rlset->rs_slots);
194
195 LIST_INSERT_HEAD(&rlset->rs_all, rl, r_aentry);
196 if (NPF_DYNAMIC_GROUP_P(rl->r_attr)) {
197 LIST_INSERT_HEAD(&rlset->rs_dynamic, rl, r_dentry);
198 } else {
199 KASSERTMSG(rl->r_parent == NULL, "cannot be dynamic rule");
200 rl->r_attr &= ~NPF_RULE_DYNAMIC;
201 }
202
203 rlset->rs_rules[n] = rl;
204 rlset->rs_nitems++;
205 rl->r_id = ++rlset->rs_idcnt;
206
207 if (rl->r_skip_to < ++n) {
208 rl->r_skip_to = SKIPTO_ADJ_FLAG | n;
209 }
210 }
211
212 npf_rule_t *
213 npf_ruleset_lookup(npf_ruleset_t *rlset, const char *name)
214 {
215 npf_rule_t *rl;
216
217 LIST_FOREACH(rl, &rlset->rs_dynamic, r_dentry) {
218 KASSERT(NPF_DYNAMIC_GROUP_P(rl->r_attr));
219 if (strncmp(rl->r_name, name, NPF_RULE_MAXNAMELEN) == 0)
220 break;
221 }
222 return rl;
223 }
224
225 /*
226 * npf_ruleset_add: insert dynamic rule into the (active) ruleset.
227 */
228 int
229 npf_ruleset_add(npf_ruleset_t *rlset, const char *rname, npf_rule_t *rl)
230 {
231 npf_rule_t *rg, *it, *target;
232 int priocmd;
233
234 if (!NPF_DYNAMIC_RULE_P(rl->r_attr)) {
235 return EINVAL;
236 }
237 rg = npf_ruleset_lookup(rlset, rname);
238 if (rg == NULL) {
239 return ESRCH;
240 }
241
242 /* Dynamic rule - assign a unique ID and save the parent. */
243 rl->r_id = ++rlset->rs_idcnt;
244 rl->r_parent = rg;
245
246 /*
247 * Rule priority: (highest) 1, 2 ... n (lowest).
248 * Negative priority indicates an operation and is reset to zero.
249 */
250 if ((priocmd = rl->r_priority) < 0) {
251 rl->r_priority = 0;
252 }
253
254 /*
255 * WARNING: once rg->subset or target->r_next of an *active*
256 * rule is set, then our rule becomes globally visible and active.
257 * Must issue a load fence to ensure rl->r_next visibility first.
258 */
259 switch (priocmd) {
260 case NPF_PRI_LAST:
261 default:
262 target = NULL;
263 it = rg->r_subset;
264 while (it && it->r_priority <= rl->r_priority) {
265 target = it;
266 it = it->r_next;
267 }
268 if (target) {
269 rl->r_next = target->r_next;
270 membar_producer();
271 target->r_next = rl;
272 break;
273 }
274 /* FALLTHROUGH */
275
276 case NPF_PRI_FIRST:
277 rl->r_next = rg->r_subset;
278 membar_producer();
279 rg->r_subset = rl;
280 break;
281 }
282
283 /* Finally, add into the all-list. */
284 LIST_INSERT_HEAD(&rlset->rs_all, rl, r_aentry);
285 return 0;
286 }
287
288 static void
289 npf_ruleset_unlink(npf_rule_t *rl, npf_rule_t *prev)
290 {
291 KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
292 if (prev) {
293 prev->r_next = rl->r_next;
294 } else {
295 npf_rule_t *rg = rl->r_parent;
296 rg->r_subset = rl->r_next;
297 }
298 LIST_REMOVE(rl, r_aentry);
299 }
300
301 /*
302 * npf_ruleset_remove: remove the dynamic rule given the rule ID.
303 */
304 int
305 npf_ruleset_remove(npf_ruleset_t *rlset, const char *rname, uint64_t id)
306 {
307 npf_rule_t *rg, *prev = NULL;
308
309 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
310 return ESRCH;
311 }
312 for (npf_rule_t *rl = rg->r_subset; rl; rl = rl->r_next) {
313 KASSERT(rl->r_parent == rg);
314 KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
315
316 /* Compare ID. On match, remove and return. */
317 if (rl->r_id == id) {
318 npf_ruleset_unlink(rl, prev);
319 LIST_INSERT_HEAD(&rlset->rs_gc, rl, r_aentry);
320 return 0;
321 }
322 prev = rl;
323 }
324 return ENOENT;
325 }
326
327 /*
328 * npf_ruleset_remkey: remove the dynamic rule given the rule key.
329 */
330 int
331 npf_ruleset_remkey(npf_ruleset_t *rlset, const char *rname,
332 const void *key, size_t len)
333 {
334 npf_rule_t *rg, *rlast = NULL, *prev = NULL, *lastprev = NULL;
335
336 KASSERT(len && len <= NPF_RULE_MAXKEYLEN);
337
338 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
339 return ESRCH;
340 }
341
342 /* Compare the key and find the last in the list. */
343 for (npf_rule_t *rl = rg->r_subset; rl; rl = rl->r_next) {
344 KASSERT(rl->r_parent == rg);
345 KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
346 if (memcmp(rl->r_key, key, len) == 0) {
347 lastprev = prev;
348 rlast = rl;
349 }
350 prev = rl;
351 }
352 if (!rlast) {
353 return ENOENT;
354 }
355 npf_ruleset_unlink(rlast, lastprev);
356 LIST_INSERT_HEAD(&rlset->rs_gc, rlast, r_aentry);
357 return 0;
358 }
359
360 /*
361 * npf_ruleset_list: serialise and return the dynamic rules.
362 */
363 nvlist_t *
364 npf_ruleset_list(npf_t *npf, npf_ruleset_t *rlset, const char *rname)
365 {
366 nvlist_t *rgroup;
367 npf_rule_t *rg;
368
369 KASSERT(npf_config_locked_p(npf));
370
371 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
372 return NULL;
373 }
374 if ((rgroup = nvlist_create(0)) == NULL) {
375 return NULL;
376 }
377 for (npf_rule_t *rl = rg->r_subset; rl; rl = rl->r_next) {
378 nvlist_t *rule;
379
380 KASSERT(rl->r_parent == rg);
381 KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
382
383 rule = npf_rule_export(npf, rl);
384 if (!rule) {
385 nvlist_destroy(rgroup);
386 return NULL;
387 }
388 nvlist_append_nvlist_array(rgroup, "rules", rule);
389 nvlist_destroy(rule);
390 }
391 return rgroup;
392 }
393
394 /*
395 * npf_ruleset_flush: flush the dynamic rules in the ruleset by inserting
396 * them into the G/C list.
397 */
398 int
399 npf_ruleset_flush(npf_ruleset_t *rlset, const char *rname)
400 {
401 npf_rule_t *rg, *rl;
402
403 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
404 return ESRCH;
405 }
406
407 rl = atomic_swap_ptr(&rg->r_subset, NULL);
408 membar_producer();
409
410 while (rl) {
411 KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
412 KASSERT(rl->r_parent == rg);
413
414 LIST_REMOVE(rl, r_aentry);
415 LIST_INSERT_HEAD(&rlset->rs_gc, rl, r_aentry);
416 rl = rl->r_next;
417 }
418 rlset->rs_idcnt = 0;
419 return 0;
420 }
421
422 /*
423 * npf_ruleset_gc: destroy the rules in G/C list.
424 */
425 void
426 npf_ruleset_gc(npf_ruleset_t *rlset)
427 {
428 npf_rule_t *rl;
429
430 while ((rl = LIST_FIRST(&rlset->rs_gc)) != NULL) {
431 LIST_REMOVE(rl, r_aentry);
432 npf_rule_free(rl);
433 }
434 }
435
436 /*
437 * npf_ruleset_export: serialise and return the static rules.
438 */
439 int
440 npf_ruleset_export(npf_t *npf, const npf_ruleset_t *rlset,
441 const char *key, nvlist_t *npf_dict)
442 {
443 const unsigned nitems = rlset->rs_nitems;
444 unsigned n = 0;
445 int error = 0;
446
447 KASSERT(npf_config_locked_p(npf));
448
449 while (n < nitems) {
450 const npf_rule_t *rl = rlset->rs_rules[n];
451 const npf_natpolicy_t *natp = rl->r_natp;
452 nvlist_t *rule;
453
454 rule = npf_rule_export(npf, rl);
455 if (!rule) {
456 error = ENOMEM;
457 break;
458 }
459 if (natp && (error = npf_nat_policyexport(natp, rule)) != 0) {
460 nvlist_destroy(rule);
461 break;
462 }
463 nvlist_append_nvlist_array(npf_dict, key, rule);
464 nvlist_destroy(rule);
465 n++;
466 }
467 return error;
468 }
469
470 /*
471 * npf_ruleset_reload: prepare the new ruleset by scanning the active
472 * ruleset and: 1) sharing the dynamic rules 2) sharing NAT policies.
473 *
474 * => The active (old) ruleset should be exclusively locked.
475 */
476 void
477 npf_ruleset_reload(npf_t *npf, npf_ruleset_t *newset,
478 npf_ruleset_t *oldset, bool load)
479 {
480 npf_rule_t *rg, *rl;
481 uint64_t nid = 0;
482
483 KASSERT(npf_config_locked_p(npf));
484
485 /*
486 * Scan the dynamic rules and share (migrate) if needed.
487 */
488 LIST_FOREACH(rg, &newset->rs_dynamic, r_dentry) {
489 npf_rule_t *active_rgroup;
490
491 /* Look for a dynamic ruleset group with such name. */
492 active_rgroup = npf_ruleset_lookup(oldset, rg->r_name);
493 if (active_rgroup == NULL) {
494 continue;
495 }
496
497 /*
498 * ATOMICITY: Copy the head pointer of the linked-list,
499 * but do not remove the rules from the active r_subset.
500 * This is necessary because the rules are still active
501 * and therefore are accessible for inspection via the
502 * old ruleset.
503 */
504 rg->r_subset = active_rgroup->r_subset;
505
506 /*
507 * We can safely migrate to the new all-rule list and
508 * reset the parent rule, though.
509 */
510 for (rl = rg->r_subset; rl; rl = rl->r_next) {
511 KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
512 LIST_REMOVE(rl, r_aentry);
513 LIST_INSERT_HEAD(&newset->rs_all, rl, r_aentry);
514
515 KASSERT(rl->r_parent == active_rgroup);
516 rl->r_parent = rg;
517 }
518 }
519
520 /*
521 * If performing the load of connections then NAT policies may
522 * already have translated connections associated with them and
523 * we should not share or inherit anything.
524 */
525 if (load)
526 return;
527
528 /*
529 * Scan all rules in the new ruleset and share NAT policies.
530 * Also, assign a unique ID for each policy here.
531 */
532 LIST_FOREACH(rl, &newset->rs_all, r_aentry) {
533 npf_natpolicy_t *np;
534 npf_rule_t *actrl;
535
536 /* Does the rule have a NAT policy associated? */
537 if ((np = rl->r_natp) == NULL) {
538 continue;
539 }
540
541 /*
542 * First, try to share the active port map. If this
543 * policy will be unused, npf_nat_freepolicy() will
544 * drop the reference.
545 */
546 npf_ruleset_sharepm(oldset, np);
547
548 /* Does it match with any policy in the active ruleset? */
549 LIST_FOREACH(actrl, &oldset->rs_all, r_aentry) {
550 if (!actrl->r_natp)
551 continue;
552 if ((actrl->r_attr & NPF_RULE_KEEPNAT) != 0)
553 continue;
554 if (npf_nat_cmppolicy(actrl->r_natp, np))
555 break;
556 }
557 if (!actrl) {
558 /* No: just set the ID and continue. */
559 npf_nat_setid(np, ++nid);
560 continue;
561 }
562
563 /* Yes: inherit the matching NAT policy. */
564 rl->r_natp = actrl->r_natp;
565 npf_nat_setid(rl->r_natp, ++nid);
566
567 /*
568 * Finally, mark the active rule to not destroy its NAT
569 * policy later as we inherited it (but the rule must be
570 * kept active for now). Destroy the new/unused policy.
571 */
572 actrl->r_attr |= NPF_RULE_KEEPNAT;
573 npf_nat_freepolicy(np);
574 }
575
576 /* Inherit the ID counter. */
577 newset->rs_idcnt = oldset->rs_idcnt;
578 }
579
580 /*
581 * npf_ruleset_sharepm: attempt to share the active NAT portmap.
582 */
583 npf_rule_t *
584 npf_ruleset_sharepm(npf_ruleset_t *rlset, npf_natpolicy_t *mnp)
585 {
586 npf_natpolicy_t *np;
587 npf_rule_t *rl;
588
589 /*
590 * Scan the NAT policies in the ruleset and match with the
591 * given policy based on the translation IP address. If they
592 * match - adjust the given NAT policy to use the active NAT
593 * portmap. In such case the reference on the old portmap is
594 * dropped and acquired on the active one.
595 */
596 LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
597 np = rl->r_natp;
598 if (np == NULL || np == mnp)
599 continue;
600 if (npf_nat_sharepm(np, mnp))
601 break;
602 }
603 return rl;
604 }
605
606 npf_natpolicy_t *
607 npf_ruleset_findnat(npf_ruleset_t *rlset, uint64_t id)
608 {
609 npf_rule_t *rl;
610
611 LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
612 npf_natpolicy_t *np = rl->r_natp;
613 if (np && npf_nat_getid(np) == id) {
614 return np;
615 }
616 }
617 return NULL;
618 }
619
620 /*
621 * npf_ruleset_freealg: inspect the ruleset and disassociate specified
622 * ALG from all NAT entries using it.
623 */
624 void
625 npf_ruleset_freealg(npf_ruleset_t *rlset, npf_alg_t *alg)
626 {
627 npf_rule_t *rl;
628 npf_natpolicy_t *np;
629
630 LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
631 if ((np = rl->r_natp) != NULL) {
632 npf_nat_freealg(np, alg);
633 }
634 }
635 }
636
637 /*
638 * npf_rule_alloc: allocate a rule and initialise it.
639 */
640 npf_rule_t *
641 npf_rule_alloc(npf_t *npf, const nvlist_t *rule)
642 {
643 npf_rule_t *rl;
644 const char *rname;
645 const void *key, *info;
646 size_t len;
647
648 /* Allocate a rule structure and keep the information. */
649 rl = kmem_zalloc(sizeof(npf_rule_t), KM_SLEEP);
650 info = dnvlist_get_binary(rule, "info", &rl->r_info_len, NULL, 0);
651 if (info) {
652 rl->r_info = kmem_alloc(rl->r_info_len, KM_SLEEP);
653 memcpy(rl->r_info, info, rl->r_info_len);
654 }
655 rl->r_natp = NULL;
656
657 /* Name (optional) */
658 if ((rname = dnvlist_get_string(rule, "name", NULL)) != NULL) {
659 strlcpy(rl->r_name, rname, NPF_RULE_MAXNAMELEN);
660 } else {
661 rl->r_name[0] = '\0';
662 }
663
664 /* Attributes, priority and interface ID (optional). */
665 rl->r_attr = dnvlist_get_number(rule, "attr", 0);
666 rl->r_attr &= ~NPF_RULE_PRIVMASK;
667
668 if (NPF_DYNAMIC_RULE_P(rl->r_attr)) {
669 /* Priority of the dynamic rule. */
670 rl->r_priority = dnvlist_get_number(rule, "prio", 0);
671 } else {
672 /* The skip-to index. No need to validate it. */
673 rl->r_skip_to = dnvlist_get_number(rule, "skip-to", 0);
674 }
675
676 /* Interface name; register and get the npf-if-id. */
677 if ((rname = dnvlist_get_string(rule, "ifname", NULL)) != NULL) {
678 if ((rl->r_ifid = npf_ifmap_register(npf, rname)) == 0) {
679 kmem_free(rl, sizeof(npf_rule_t));
680 return NULL;
681 }
682 } else {
683 rl->r_ifid = 0;
684 }
685
686 /* Key (optional). */
687 if ((key = dnvlist_get_binary(rule, "key", &len, NULL, 0)) != NULL) {
688 if (len > NPF_RULE_MAXKEYLEN) {
689 kmem_free(rl, sizeof(npf_rule_t));
690 return NULL;
691 }
692 memcpy(rl->r_key, key, len);
693 }
694 return rl;
695 }
696
697 static nvlist_t *
698 npf_rule_export(npf_t *npf, const npf_rule_t *rl)
699 {
700 nvlist_t *rule = nvlist_create(0);
701 unsigned skip_to = 0;
702 npf_rproc_t *rp;
703
704 nvlist_add_number(rule, "attr", rl->r_attr);
705 nvlist_add_number(rule, "prio", rl->r_priority);
706 if ((rl->r_skip_to & SKIPTO_ADJ_FLAG) == 0) {
707 skip_to = rl->r_skip_to & SKIPTO_MASK;
708 }
709 nvlist_add_number(rule, "skip-to", skip_to);
710 nvlist_add_number(rule, "code-type", rl->r_type);
711 if (rl->r_code) {
712 nvlist_add_binary(rule, "code", rl->r_code, rl->r_clen);
713 }
714 if (rl->r_ifid) {
715 const char *ifname = npf_ifmap_getname(npf, rl->r_ifid);
716 nvlist_add_string(rule, "ifname", ifname);
717 }
718 nvlist_add_number(rule, "id", rl->r_id);
719
720 if (rl->r_name[0]) {
721 nvlist_add_string(rule, "name", rl->r_name);
722 }
723 if (NPF_DYNAMIC_RULE_P(rl->r_attr)) {
724 nvlist_add_binary(rule, "key", rl->r_key, NPF_RULE_MAXKEYLEN);
725 }
726 if (rl->r_info) {
727 nvlist_add_binary(rule, "info", rl->r_info, rl->r_info_len);
728 }
729 if ((rp = npf_rule_getrproc(rl)) != NULL) {
730 const char *rname = npf_rproc_getname(rp);
731 nvlist_add_string(rule, "rproc", rname);
732 npf_rproc_release(rp);
733 }
734 return rule;
735 }
736
737 /*
738 * npf_rule_setcode: assign filter code to the rule.
739 *
740 * => The code must be validated by the caller.
741 * => JIT compilation may be performed here.
742 */
743 void
744 npf_rule_setcode(npf_rule_t *rl, const int type, void *code, size_t size)
745 {
746 KASSERT(type == NPF_CODE_BPF);
747
748 rl->r_type = type;
749 rl->r_code = code;
750 rl->r_clen = size;
751 rl->r_jcode = npf_bpf_compile(code, size);
752 }
753
754 /*
755 * npf_rule_setrproc: assign a rule procedure and hold a reference on it.
756 */
757 void
758 npf_rule_setrproc(npf_rule_t *rl, npf_rproc_t *rp)
759 {
760 npf_rproc_acquire(rp);
761 rl->r_rproc = rp;
762 }
763
764 /*
765 * npf_rule_free: free the specified rule.
766 */
767 void
768 npf_rule_free(npf_rule_t *rl)
769 {
770 npf_natpolicy_t *np = rl->r_natp;
771 npf_rproc_t *rp = rl->r_rproc;
772
773 if (np && (rl->r_attr & NPF_RULE_KEEPNAT) == 0) {
774 /* Free NAT policy. */
775 npf_nat_freepolicy(np);
776 }
777 if (rp) {
778 /* Release rule procedure. */
779 npf_rproc_release(rp);
780 }
781 if (rl->r_code) {
782 /* Free byte-code. */
783 kmem_free(rl->r_code, rl->r_clen);
784 }
785 if (rl->r_jcode) {
786 /* Free JIT code. */
787 bpf_jit_freecode(rl->r_jcode);
788 }
789 if (rl->r_info) {
790 kmem_free(rl->r_info, rl->r_info_len);
791 }
792 kmem_free(rl, sizeof(npf_rule_t));
793 }
794
795 /*
796 * npf_rule_getid: return the unique ID of a rule.
797 * npf_rule_getrproc: acquire a reference and return rule procedure, if any.
798 * npf_rule_getnat: get NAT policy assigned to the rule.
799 */
800
801 uint64_t
802 npf_rule_getid(const npf_rule_t *rl)
803 {
804 KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
805 return rl->r_id;
806 }
807
808 npf_rproc_t *
809 npf_rule_getrproc(const npf_rule_t *rl)
810 {
811 npf_rproc_t *rp = rl->r_rproc;
812
813 if (rp) {
814 npf_rproc_acquire(rp);
815 }
816 return rp;
817 }
818
819 npf_natpolicy_t *
820 npf_rule_getnat(const npf_rule_t *rl)
821 {
822 return rl->r_natp;
823 }
824
825 /*
826 * npf_rule_setnat: assign NAT policy to the rule and insert into the
827 * NAT policy list in the ruleset.
828 */
829 void
830 npf_rule_setnat(npf_rule_t *rl, npf_natpolicy_t *np)
831 {
832 KASSERT(rl->r_natp == NULL);
833 rl->r_natp = np;
834 }
835
836 /*
837 * npf_rule_inspect: match the interface, direction and run the filter code.
838 * Returns true if rule matches and false otherwise.
839 */
840 static inline bool
841 npf_rule_inspect(const npf_rule_t *rl, bpf_args_t *bc_args,
842 const int di_mask, const u_int ifid)
843 {
844 /* Match the interface. */
845 if (rl->r_ifid && rl->r_ifid != ifid) {
846 return false;
847 }
848
849 /* Match the direction. */
850 if ((rl->r_attr & NPF_RULE_DIMASK) != NPF_RULE_DIMASK) {
851 if ((rl->r_attr & di_mask) == 0)
852 return false;
853 }
854
855 /* Any code? */
856 if (!rl->r_code) {
857 KASSERT(rl->r_jcode == NULL);
858 return true;
859 }
860 KASSERT(rl->r_type == NPF_CODE_BPF);
861 return npf_bpf_filter(bc_args, rl->r_code, rl->r_jcode) != 0;
862 }
863
864 /*
865 * npf_rule_reinspect: re-inspect the dynamic rule by iterating its list.
866 * This is only for the dynamic rules. Subrules cannot have nested rules.
867 */
868 static inline npf_rule_t *
869 npf_rule_reinspect(const npf_rule_t *rg, bpf_args_t *bc_args,
870 const int di_mask, const u_int ifid)
871 {
872 npf_rule_t *final_rl = NULL, *rl;
873
874 KASSERT(NPF_DYNAMIC_GROUP_P(rg->r_attr));
875
876 for (rl = rg->r_subset; rl; rl = rl->r_next) {
877 KASSERT(!final_rl || rl->r_priority >= final_rl->r_priority);
878 if (!npf_rule_inspect(rl, bc_args, di_mask, ifid)) {
879 continue;
880 }
881 if (rl->r_attr & NPF_RULE_FINAL) {
882 return rl;
883 }
884 final_rl = rl;
885 }
886 return final_rl;
887 }
888
889 /*
890 * npf_ruleset_inspect: inspect the packet against the given ruleset.
891 *
892 * Loop through the rules in the set and run the byte-code of each rule
893 * against the packet (nbuf chain). If sub-ruleset is found, inspect it.
894 */
895 npf_rule_t *
896 npf_ruleset_inspect(npf_cache_t *npc, const npf_ruleset_t *rlset,
897 const int di, const int layer)
898 {
899 nbuf_t *nbuf = npc->npc_nbuf;
900 const int di_mask = (di & PFIL_IN) ? NPF_RULE_IN : NPF_RULE_OUT;
901 const u_int nitems = rlset->rs_nitems;
902 const u_int ifid = nbuf->nb_ifid;
903 npf_rule_t *final_rl = NULL;
904 bpf_args_t bc_args;
905 u_int n = 0;
906
907 KASSERT(((di & PFIL_IN) != 0) ^ ((di & PFIL_OUT) != 0));
908
909 /*
910 * Prepare the external memory store and the arguments for
911 * the BPF programs to be executed. Reset mbuf before taking
912 * any pointers for the BPF.
913 */
914 uint32_t bc_words[NPF_BPF_NWORDS];
915
916 nbuf_reset(nbuf);
917 npf_bpf_prepare(npc, &bc_args, bc_words);
918
919 while (n < nitems) {
920 npf_rule_t *rl = rlset->rs_rules[n];
921 const u_int skip_to = rl->r_skip_to & SKIPTO_MASK;
922 const uint32_t attr = rl->r_attr;
923
924 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
925 KASSERT(n < skip_to);
926
927 /* Group is a barrier: return a matching if found any. */
928 if ((attr & NPF_RULE_GROUP) != 0 && final_rl) {
929 break;
930 }
931
932 /* Main inspection of the rule. */
933 if (!npf_rule_inspect(rl, &bc_args, di_mask, ifid)) {
934 n = skip_to;
935 continue;
936 }
937
938 if (NPF_DYNAMIC_GROUP_P(attr)) {
939 /*
940 * If this is a dynamic rule, re-inspect the subrules.
941 * If it has any matching rule, then it is final.
942 */
943 rl = npf_rule_reinspect(rl, &bc_args, di_mask, ifid);
944 if (rl != NULL) {
945 final_rl = rl;
946 break;
947 }
948 } else if ((attr & NPF_RULE_GROUP) == 0) {
949 /*
950 * Groups themselves are not matching.
951 */
952 final_rl = rl;
953 }
954
955 /* Set the matching rule and check for "final". */
956 if (attr & NPF_RULE_FINAL) {
957 break;
958 }
959 n++;
960 }
961
962 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
963 return final_rl;
964 }
965
966 /*
967 * npf_rule_conclude: return decision and the flags for conclusion.
968 *
969 * => Returns ENETUNREACH if "block" and 0 if "pass".
970 */
971 int
972 npf_rule_conclude(const npf_rule_t *rl, npf_match_info_t *mi)
973 {
974 /* If not passing - drop the packet. */
975 mi->mi_retfl = rl->r_attr;
976 mi->mi_rid = rl->r_id;
977 return (rl->r_attr & NPF_RULE_PASS) ? 0 : ENETUNREACH;
978 }
979
980
981 #if defined(DDB) || defined(_NPF_TESTING)
982
983 void
984 npf_ruleset_dump(npf_t *npf, const char *name)
985 {
986 npf_ruleset_t *rlset = npf_config_ruleset(npf);
987 npf_rule_t *rg, *rl;
988
989 LIST_FOREACH(rg, &rlset->rs_dynamic, r_dentry) {
990 printf("ruleset '%s':\n", rg->r_name);
991 for (rl = rg->r_subset; rl; rl = rl->r_next) {
992 printf("\tid %"PRIu64", key: ", rl->r_id);
993 for (u_int i = 0; i < NPF_RULE_MAXKEYLEN; i++)
994 printf("%x", rl->r_key[i]);
995 printf("\n");
996 }
997 }
998 }
999
1000 #endif
1001