npf_ruleset.c revision 1.46.4.3 1 /*-
2 * Copyright (c) 2009-2015 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This material is based upon work partially supported by The
6 * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * NPF ruleset module.
32 */
33
34 #ifdef _KERNEL
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: npf_ruleset.c,v 1.46.4.3 2020/04/13 08:05:15 martin Exp $");
37
38 #include <sys/param.h>
39 #include <sys/types.h>
40
41 #include <sys/atomic.h>
42 #include <sys/kmem.h>
43 #include <sys/queue.h>
44 #include <sys/mbuf.h>
45 #include <sys/types.h>
46
47 #include <net/bpf.h>
48 #include <net/bpfjit.h>
49 #include <net/pfil.h>
50 #include <net/if.h>
51 #endif
52
53 #include "npf_impl.h"
54
55 struct npf_ruleset {
56 /*
57 * - List of all rules.
58 * - Dynamic (i.e. named) rules.
59 * - G/C list for convenience.
60 */
61 LIST_HEAD(, npf_rule) rs_all;
62 LIST_HEAD(, npf_rule) rs_dynamic;
63 LIST_HEAD(, npf_rule) rs_gc;
64
65 /* Unique ID counter. */
66 uint64_t rs_idcnt;
67
68 /* Number of array slots and active rules. */
69 u_int rs_slots;
70 u_int rs_nitems;
71
72 /* Array of ordered rules. */
73 npf_rule_t * rs_rules[];
74 };
75
76 struct npf_rule {
77 /* Attributes, interface and skip slot. */
78 uint32_t r_attr;
79 u_int r_ifid;
80 u_int r_skip_to;
81
82 /* Code to process, if any. */
83 int r_type;
84 bpfjit_func_t r_jcode;
85 void * r_code;
86 u_int r_clen;
87
88 /* NAT policy (optional), rule procedure and subset. */
89 npf_natpolicy_t * r_natp;
90 npf_rproc_t * r_rproc;
91
92 union {
93 /*
94 * Dynamic group: rule subset and a group list entry.
95 */
96 struct {
97 npf_rule_t * r_subset;
98 LIST_ENTRY(npf_rule) r_dentry;
99 };
100
101 /*
102 * Dynamic rule: priority, parent group and next rule.
103 */
104 struct {
105 int r_priority;
106 npf_rule_t * r_parent;
107 npf_rule_t * r_next;
108 };
109 };
110
111 /* Rule ID, name and the optional key. */
112 uint64_t r_id;
113 char r_name[NPF_RULE_MAXNAMELEN];
114 uint8_t r_key[NPF_RULE_MAXKEYLEN];
115
116 /* All-list entry and the auxiliary info. */
117 LIST_ENTRY(npf_rule) r_aentry;
118 nvlist_t * r_info;
119 size_t r_info_len;
120 };
121
122 #define SKIPTO_ADJ_FLAG (1U << 31)
123 #define SKIPTO_MASK (SKIPTO_ADJ_FLAG - 1)
124
125 static nvlist_t * npf_rule_export(npf_t *, const npf_rule_t *);
126
127 /*
128 * Private attributes - must be in the NPF_RULE_PRIVMASK range.
129 */
130 #define NPF_RULE_KEEPNAT (0x01000000 & NPF_RULE_PRIVMASK)
131
132 #define NPF_DYNAMIC_GROUP_P(attr) \
133 (((attr) & NPF_DYNAMIC_GROUP) == NPF_DYNAMIC_GROUP)
134
135 #define NPF_DYNAMIC_RULE_P(attr) \
136 (((attr) & NPF_DYNAMIC_GROUP) == NPF_RULE_DYNAMIC)
137
138 npf_ruleset_t *
139 npf_ruleset_create(size_t slots)
140 {
141 size_t len = offsetof(npf_ruleset_t, rs_rules[slots]);
142 npf_ruleset_t *rlset;
143
144 rlset = kmem_zalloc(len, KM_SLEEP);
145 LIST_INIT(&rlset->rs_dynamic);
146 LIST_INIT(&rlset->rs_all);
147 LIST_INIT(&rlset->rs_gc);
148 rlset->rs_slots = slots;
149
150 return rlset;
151 }
152
153 void
154 npf_ruleset_destroy(npf_ruleset_t *rlset)
155 {
156 if (rlset == NULL)
157 return;
158
159 size_t len = offsetof(npf_ruleset_t, rs_rules[rlset->rs_slots]);
160 npf_rule_t *rl;
161
162 while ((rl = LIST_FIRST(&rlset->rs_all)) != NULL) {
163 if (NPF_DYNAMIC_GROUP_P(rl->r_attr)) {
164 /*
165 * Note: r_subset may point to the rules which
166 * were inherited by a new ruleset.
167 */
168 rl->r_subset = NULL;
169 LIST_REMOVE(rl, r_dentry);
170 }
171 if (NPF_DYNAMIC_RULE_P(rl->r_attr)) {
172 /* Not removing from r_subset, see above. */
173 KASSERT(rl->r_parent != NULL);
174 }
175 LIST_REMOVE(rl, r_aentry);
176 npf_rule_free(rl);
177 }
178 KASSERT(LIST_EMPTY(&rlset->rs_dynamic));
179
180 npf_ruleset_gc(rlset);
181 KASSERT(LIST_EMPTY(&rlset->rs_gc));
182 kmem_free(rlset, len);
183 }
184
185 /*
186 * npf_ruleset_insert: insert the rule into the specified ruleset.
187 */
188 void
189 npf_ruleset_insert(npf_ruleset_t *rlset, npf_rule_t *rl)
190 {
191 u_int n = rlset->rs_nitems;
192
193 KASSERT(n < rlset->rs_slots);
194
195 LIST_INSERT_HEAD(&rlset->rs_all, rl, r_aentry);
196 if (NPF_DYNAMIC_GROUP_P(rl->r_attr)) {
197 LIST_INSERT_HEAD(&rlset->rs_dynamic, rl, r_dentry);
198 } else {
199 KASSERTMSG(rl->r_parent == NULL, "cannot be dynamic rule");
200 rl->r_attr &= ~NPF_RULE_DYNAMIC;
201 }
202
203 rlset->rs_rules[n] = rl;
204 rlset->rs_nitems++;
205 rl->r_id = ++rlset->rs_idcnt;
206
207 if (rl->r_skip_to < ++n) {
208 rl->r_skip_to = SKIPTO_ADJ_FLAG | n;
209 }
210 }
211
212 npf_rule_t *
213 npf_ruleset_lookup(npf_ruleset_t *rlset, const char *name)
214 {
215 npf_rule_t *rl;
216
217 LIST_FOREACH(rl, &rlset->rs_dynamic, r_dentry) {
218 KASSERT(NPF_DYNAMIC_GROUP_P(rl->r_attr));
219 if (strncmp(rl->r_name, name, NPF_RULE_MAXNAMELEN) == 0)
220 break;
221 }
222 return rl;
223 }
224
225 /*
226 * npf_ruleset_add: insert dynamic rule into the (active) ruleset.
227 */
228 int
229 npf_ruleset_add(npf_ruleset_t *rlset, const char *rname, npf_rule_t *rl)
230 {
231 npf_rule_t *rg, *it, *target;
232 int priocmd;
233
234 if (!NPF_DYNAMIC_RULE_P(rl->r_attr)) {
235 return EINVAL;
236 }
237 rg = npf_ruleset_lookup(rlset, rname);
238 if (rg == NULL) {
239 return ESRCH;
240 }
241
242 /* Dynamic rule - assign a unique ID and save the parent. */
243 rl->r_id = ++rlset->rs_idcnt;
244 rl->r_parent = rg;
245
246 /*
247 * Rule priority: (highest) 1, 2 ... n (lowest).
248 * Negative priority indicates an operation and is reset to zero.
249 */
250 if ((priocmd = rl->r_priority) < 0) {
251 rl->r_priority = 0;
252 }
253
254 /*
255 * WARNING: once rg->subset or target->r_next of an *active*
256 * rule is set, then our rule becomes globally visible and active.
257 * Must issue a load fence to ensure rl->r_next visibility first.
258 */
259 switch (priocmd) {
260 case NPF_PRI_LAST:
261 default:
262 target = NULL;
263 it = rg->r_subset;
264 while (it && it->r_priority <= rl->r_priority) {
265 target = it;
266 it = it->r_next;
267 }
268 if (target) {
269 rl->r_next = target->r_next;
270 membar_producer();
271 target->r_next = rl;
272 break;
273 }
274 /* FALLTHROUGH */
275
276 case NPF_PRI_FIRST:
277 rl->r_next = rg->r_subset;
278 membar_producer();
279 rg->r_subset = rl;
280 break;
281 }
282
283 /* Finally, add into the all-list. */
284 LIST_INSERT_HEAD(&rlset->rs_all, rl, r_aentry);
285 return 0;
286 }
287
288 static void
289 npf_ruleset_unlink(npf_rule_t *rl, npf_rule_t *prev)
290 {
291 KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
292 if (prev) {
293 prev->r_next = rl->r_next;
294 } else {
295 npf_rule_t *rg = rl->r_parent;
296 rg->r_subset = rl->r_next;
297 }
298 LIST_REMOVE(rl, r_aentry);
299 }
300
301 /*
302 * npf_ruleset_remove: remove the dynamic rule given the rule ID.
303 */
304 int
305 npf_ruleset_remove(npf_ruleset_t *rlset, const char *rname, uint64_t id)
306 {
307 npf_rule_t *rg, *prev = NULL;
308
309 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
310 return ESRCH;
311 }
312 for (npf_rule_t *rl = rg->r_subset; rl; rl = rl->r_next) {
313 KASSERT(rl->r_parent == rg);
314 KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
315
316 /* Compare ID. On match, remove and return. */
317 if (rl->r_id == id) {
318 npf_ruleset_unlink(rl, prev);
319 LIST_INSERT_HEAD(&rlset->rs_gc, rl, r_aentry);
320 return 0;
321 }
322 prev = rl;
323 }
324 return ENOENT;
325 }
326
327 /*
328 * npf_ruleset_remkey: remove the dynamic rule given the rule key.
329 */
330 int
331 npf_ruleset_remkey(npf_ruleset_t *rlset, const char *rname,
332 const void *key, size_t len)
333 {
334 npf_rule_t *rg, *rlast = NULL, *prev = NULL, *lastprev = NULL;
335
336 KASSERT(len && len <= NPF_RULE_MAXKEYLEN);
337
338 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
339 return ESRCH;
340 }
341
342 /* Compare the key and find the last in the list. */
343 for (npf_rule_t *rl = rg->r_subset; rl; rl = rl->r_next) {
344 KASSERT(rl->r_parent == rg);
345 KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
346 if (memcmp(rl->r_key, key, len) == 0) {
347 lastprev = prev;
348 rlast = rl;
349 }
350 prev = rl;
351 }
352 if (!rlast) {
353 return ENOENT;
354 }
355 npf_ruleset_unlink(rlast, lastprev);
356 LIST_INSERT_HEAD(&rlset->rs_gc, rlast, r_aentry);
357 return 0;
358 }
359
360 /*
361 * npf_ruleset_list: serialise and return the dynamic rules.
362 */
363 nvlist_t *
364 npf_ruleset_list(npf_t *npf, npf_ruleset_t *rlset, const char *rname)
365 {
366 nvlist_t *rgroup;
367 npf_rule_t *rg;
368
369 KASSERT(npf_config_locked_p(npf));
370
371 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
372 return NULL;
373 }
374 if ((rgroup = nvlist_create(0)) == NULL) {
375 return NULL;
376 }
377 for (npf_rule_t *rl = rg->r_subset; rl; rl = rl->r_next) {
378 nvlist_t *rule;
379
380 KASSERT(rl->r_parent == rg);
381 KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
382
383 rule = npf_rule_export(npf, rl);
384 if (!rule) {
385 nvlist_destroy(rgroup);
386 return NULL;
387 }
388 nvlist_append_nvlist_array(rgroup, "rules", rule);
389 nvlist_destroy(rule);
390 }
391 return rgroup;
392 }
393
394 /*
395 * npf_ruleset_flush: flush the dynamic rules in the ruleset by inserting
396 * them into the G/C list.
397 */
398 int
399 npf_ruleset_flush(npf_ruleset_t *rlset, const char *rname)
400 {
401 npf_rule_t *rg, *rl;
402
403 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
404 return ESRCH;
405 }
406
407 rl = atomic_swap_ptr(&rg->r_subset, NULL);
408 membar_producer();
409
410 while (rl) {
411 KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
412 KASSERT(rl->r_parent == rg);
413
414 LIST_REMOVE(rl, r_aentry);
415 LIST_INSERT_HEAD(&rlset->rs_gc, rl, r_aentry);
416 rl = rl->r_next;
417 }
418 rlset->rs_idcnt = 0;
419 return 0;
420 }
421
422 /*
423 * npf_ruleset_gc: destroy the rules in G/C list.
424 */
425 void
426 npf_ruleset_gc(npf_ruleset_t *rlset)
427 {
428 npf_rule_t *rl;
429
430 while ((rl = LIST_FIRST(&rlset->rs_gc)) != NULL) {
431 LIST_REMOVE(rl, r_aentry);
432 npf_rule_free(rl);
433 }
434 }
435
436 /*
437 * npf_ruleset_export: serialise and return the static rules.
438 */
439 int
440 npf_ruleset_export(npf_t *npf, const npf_ruleset_t *rlset,
441 const char *key, nvlist_t *npf_dict)
442 {
443 const unsigned nitems = rlset->rs_nitems;
444 unsigned n = 0;
445 int error = 0;
446
447 KASSERT(npf_config_locked_p(npf));
448
449 while (n < nitems) {
450 const npf_rule_t *rl = rlset->rs_rules[n];
451 const npf_natpolicy_t *natp = rl->r_natp;
452 nvlist_t *rule;
453
454 rule = npf_rule_export(npf, rl);
455 if (!rule) {
456 error = ENOMEM;
457 break;
458 }
459 if (natp && (error = npf_nat_policyexport(natp, rule)) != 0) {
460 nvlist_destroy(rule);
461 break;
462 }
463 nvlist_append_nvlist_array(npf_dict, key, rule);
464 nvlist_destroy(rule);
465 n++;
466 }
467 return error;
468 }
469
470 /*
471 * npf_ruleset_reload: prepare the new ruleset by scanning the active
472 * ruleset and: 1) sharing the dynamic rules 2) sharing NAT policies.
473 *
474 * => The active (old) ruleset should be exclusively locked.
475 */
476 void
477 npf_ruleset_reload(npf_t *npf, npf_ruleset_t *newset,
478 npf_ruleset_t *oldset, bool load)
479 {
480 npf_rule_t *rg, *rl;
481 uint64_t nid = 0;
482
483 KASSERT(npf_config_locked_p(npf));
484
485 /*
486 * Scan the dynamic rules and share (migrate) if needed.
487 */
488 LIST_FOREACH(rg, &newset->rs_dynamic, r_dentry) {
489 npf_rule_t *active_rgroup;
490
491 /* Look for a dynamic ruleset group with such name. */
492 active_rgroup = npf_ruleset_lookup(oldset, rg->r_name);
493 if (active_rgroup == NULL) {
494 continue;
495 }
496
497 /*
498 * ATOMICITY: Copy the head pointer of the linked-list,
499 * but do not remove the rules from the active r_subset.
500 * This is necessary because the rules are still active
501 * and therefore are accessible for inspection via the
502 * old ruleset.
503 */
504 rg->r_subset = active_rgroup->r_subset;
505
506 /*
507 * We can safely migrate to the new all-rule list and
508 * reset the parent rule, though.
509 */
510 for (rl = rg->r_subset; rl; rl = rl->r_next) {
511 KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
512 LIST_REMOVE(rl, r_aentry);
513 LIST_INSERT_HEAD(&newset->rs_all, rl, r_aentry);
514
515 KASSERT(rl->r_parent == active_rgroup);
516 rl->r_parent = rg;
517 }
518 }
519
520 /*
521 * If performing the load of connections then NAT policies may
522 * already have translated connections associated with them and
523 * we should not share or inherit anything.
524 */
525 if (load)
526 return;
527
528 /*
529 * Scan all rules in the new ruleset and inherit the active NAT
530 * policies if they are the same. Also, assign a unique ID for
531 * each policy here.
532 */
533 LIST_FOREACH(rl, &newset->rs_all, r_aentry) {
534 npf_natpolicy_t *np;
535 npf_rule_t *actrl;
536
537 /* Does the rule have a NAT policy associated? */
538 if ((np = rl->r_natp) == NULL) {
539 continue;
540 }
541
542 /* Does it match with any policy in the active ruleset? */
543 LIST_FOREACH(actrl, &oldset->rs_all, r_aentry) {
544 if (!actrl->r_natp)
545 continue;
546 if ((actrl->r_attr & NPF_RULE_KEEPNAT) != 0)
547 continue;
548 if (npf_nat_cmppolicy(actrl->r_natp, np))
549 break;
550 }
551 if (!actrl) {
552 /* No: just set the ID and continue. */
553 npf_nat_setid(np, ++nid);
554 continue;
555 }
556
557 /* Yes: inherit the matching NAT policy. */
558 rl->r_natp = actrl->r_natp;
559 npf_nat_setid(rl->r_natp, ++nid);
560
561 /*
562 * Finally, mark the active rule to not destroy its NAT
563 * policy later as we inherited it (but the rule must be
564 * kept active for now). Destroy the new/unused policy.
565 */
566 actrl->r_attr |= NPF_RULE_KEEPNAT;
567 npf_nat_freepolicy(np);
568 }
569
570 /* Inherit the ID counter. */
571 newset->rs_idcnt = oldset->rs_idcnt;
572 }
573
574 /*
575 * npf_ruleset_findnat: find a NAT policy in the ruleset by a given ID.
576 */
577 npf_natpolicy_t *
578 npf_ruleset_findnat(npf_ruleset_t *rlset, uint64_t id)
579 {
580 npf_rule_t *rl;
581
582 LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
583 npf_natpolicy_t *np = rl->r_natp;
584 if (np && npf_nat_getid(np) == id) {
585 return np;
586 }
587 }
588 return NULL;
589 }
590
591 /*
592 * npf_ruleset_freealg: inspect the ruleset and disassociate specified
593 * ALG from all NAT entries using it.
594 */
595 void
596 npf_ruleset_freealg(npf_ruleset_t *rlset, npf_alg_t *alg)
597 {
598 npf_rule_t *rl;
599 npf_natpolicy_t *np;
600
601 LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
602 if ((np = rl->r_natp) != NULL) {
603 npf_nat_freealg(np, alg);
604 }
605 }
606 }
607
608 /*
609 * npf_rule_alloc: allocate a rule and initialise it.
610 */
611 npf_rule_t *
612 npf_rule_alloc(npf_t *npf, const nvlist_t *rule)
613 {
614 npf_rule_t *rl;
615 const char *rname;
616 const void *key, *info;
617 size_t len;
618
619 /* Allocate a rule structure and keep the information. */
620 rl = kmem_zalloc(sizeof(npf_rule_t), KM_SLEEP);
621 info = dnvlist_get_binary(rule, "info", &rl->r_info_len, NULL, 0);
622 if (info) {
623 rl->r_info = kmem_alloc(rl->r_info_len, KM_SLEEP);
624 memcpy(rl->r_info, info, rl->r_info_len);
625 }
626 rl->r_natp = NULL;
627
628 /* Name (optional) */
629 if ((rname = dnvlist_get_string(rule, "name", NULL)) != NULL) {
630 strlcpy(rl->r_name, rname, NPF_RULE_MAXNAMELEN);
631 } else {
632 rl->r_name[0] = '\0';
633 }
634
635 /* Attributes, priority and interface ID (optional). */
636 rl->r_attr = dnvlist_get_number(rule, "attr", 0);
637 rl->r_attr &= ~NPF_RULE_PRIVMASK;
638
639 if (NPF_DYNAMIC_RULE_P(rl->r_attr)) {
640 /* Priority of the dynamic rule. */
641 rl->r_priority = (int)dnvlist_get_number(rule, "prio", 0);
642 } else {
643 /* The skip-to index. No need to validate it. */
644 rl->r_skip_to = dnvlist_get_number(rule, "skip-to", 0);
645 }
646
647 /* Interface name; register and get the npf-if-id. */
648 if ((rname = dnvlist_get_string(rule, "ifname", NULL)) != NULL) {
649 if ((rl->r_ifid = npf_ifmap_register(npf, rname)) == 0) {
650 kmem_free(rl, sizeof(npf_rule_t));
651 return NULL;
652 }
653 } else {
654 rl->r_ifid = 0;
655 }
656
657 /* Key (optional). */
658 if ((key = dnvlist_get_binary(rule, "key", &len, NULL, 0)) != NULL) {
659 if (len > NPF_RULE_MAXKEYLEN) {
660 kmem_free(rl, sizeof(npf_rule_t));
661 return NULL;
662 }
663 memcpy(rl->r_key, key, len);
664 }
665 return rl;
666 }
667
668 static nvlist_t *
669 npf_rule_export(npf_t *npf, const npf_rule_t *rl)
670 {
671 nvlist_t *rule = nvlist_create(0);
672 unsigned skip_to = 0;
673 npf_rproc_t *rp;
674
675 nvlist_add_number(rule, "attr", rl->r_attr);
676 nvlist_add_number(rule, "prio", rl->r_priority);
677 if ((rl->r_skip_to & SKIPTO_ADJ_FLAG) == 0) {
678 skip_to = rl->r_skip_to & SKIPTO_MASK;
679 }
680 nvlist_add_number(rule, "skip-to", skip_to);
681 nvlist_add_number(rule, "code-type", rl->r_type);
682 if (rl->r_code) {
683 nvlist_add_binary(rule, "code", rl->r_code, rl->r_clen);
684 }
685 if (rl->r_ifid) {
686 char ifname[IFNAMSIZ];
687 npf_ifmap_copyname(npf, rl->r_ifid, ifname, sizeof(ifname));
688 nvlist_add_string(rule, "ifname", ifname);
689 }
690 nvlist_add_number(rule, "id", rl->r_id);
691
692 if (rl->r_name[0]) {
693 nvlist_add_string(rule, "name", rl->r_name);
694 }
695 if (NPF_DYNAMIC_RULE_P(rl->r_attr)) {
696 nvlist_add_binary(rule, "key", rl->r_key, NPF_RULE_MAXKEYLEN);
697 }
698 if (rl->r_info) {
699 nvlist_add_binary(rule, "info", rl->r_info, rl->r_info_len);
700 }
701 if ((rp = npf_rule_getrproc(rl)) != NULL) {
702 const char *rname = npf_rproc_getname(rp);
703 nvlist_add_string(rule, "rproc", rname);
704 npf_rproc_release(rp);
705 }
706 return rule;
707 }
708
709 /*
710 * npf_rule_setcode: assign filter code to the rule.
711 *
712 * => The code must be validated by the caller.
713 * => JIT compilation may be performed here.
714 */
715 void
716 npf_rule_setcode(npf_rule_t *rl, const int type, void *code, size_t size)
717 {
718 KASSERT(type == NPF_CODE_BPF);
719
720 rl->r_type = type;
721 rl->r_code = code;
722 rl->r_clen = size;
723 rl->r_jcode = npf_bpf_compile(code, size);
724 }
725
726 /*
727 * npf_rule_setrproc: assign a rule procedure and hold a reference on it.
728 */
729 void
730 npf_rule_setrproc(npf_rule_t *rl, npf_rproc_t *rp)
731 {
732 npf_rproc_acquire(rp);
733 rl->r_rproc = rp;
734 }
735
736 /*
737 * npf_rule_free: free the specified rule.
738 */
739 void
740 npf_rule_free(npf_rule_t *rl)
741 {
742 npf_natpolicy_t *np = rl->r_natp;
743 npf_rproc_t *rp = rl->r_rproc;
744
745 if (np && (rl->r_attr & NPF_RULE_KEEPNAT) == 0) {
746 /* Free NAT policy. */
747 npf_nat_freepolicy(np);
748 }
749 if (rp) {
750 /* Release rule procedure. */
751 npf_rproc_release(rp);
752 }
753 if (rl->r_code) {
754 /* Free byte-code. */
755 kmem_free(rl->r_code, rl->r_clen);
756 }
757 if (rl->r_jcode) {
758 /* Free JIT code. */
759 bpf_jit_freecode(rl->r_jcode);
760 }
761 if (rl->r_info) {
762 kmem_free(rl->r_info, rl->r_info_len);
763 }
764 kmem_free(rl, sizeof(npf_rule_t));
765 }
766
767 /*
768 * npf_rule_getid: return the unique ID of a rule.
769 * npf_rule_getrproc: acquire a reference and return rule procedure, if any.
770 * npf_rule_getnat: get NAT policy assigned to the rule.
771 */
772
773 uint64_t
774 npf_rule_getid(const npf_rule_t *rl)
775 {
776 KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
777 return rl->r_id;
778 }
779
780 npf_rproc_t *
781 npf_rule_getrproc(const npf_rule_t *rl)
782 {
783 npf_rproc_t *rp = rl->r_rproc;
784
785 if (rp) {
786 npf_rproc_acquire(rp);
787 }
788 return rp;
789 }
790
791 npf_natpolicy_t *
792 npf_rule_getnat(const npf_rule_t *rl)
793 {
794 return rl->r_natp;
795 }
796
797 /*
798 * npf_rule_setnat: assign NAT policy to the rule and insert into the
799 * NAT policy list in the ruleset.
800 */
801 void
802 npf_rule_setnat(npf_rule_t *rl, npf_natpolicy_t *np)
803 {
804 KASSERT(rl->r_natp == NULL);
805 rl->r_natp = np;
806 }
807
808 /*
809 * npf_rule_inspect: match the interface, direction and run the filter code.
810 * Returns true if rule matches and false otherwise.
811 */
812 static inline bool
813 npf_rule_inspect(const npf_rule_t *rl, bpf_args_t *bc_args,
814 const int di_mask, const u_int ifid)
815 {
816 /* Match the interface. */
817 if (rl->r_ifid && rl->r_ifid != ifid) {
818 return false;
819 }
820
821 /* Match the direction. */
822 if ((rl->r_attr & NPF_RULE_DIMASK) != NPF_RULE_DIMASK) {
823 if ((rl->r_attr & di_mask) == 0)
824 return false;
825 }
826
827 /* Any code? */
828 if (!rl->r_code) {
829 KASSERT(rl->r_jcode == NULL);
830 return true;
831 }
832 KASSERT(rl->r_type == NPF_CODE_BPF);
833 return npf_bpf_filter(bc_args, rl->r_code, rl->r_jcode) != 0;
834 }
835
836 /*
837 * npf_rule_reinspect: re-inspect the dynamic rule by iterating its list.
838 * This is only for the dynamic rules. Subrules cannot have nested rules.
839 */
840 static inline npf_rule_t *
841 npf_rule_reinspect(const npf_rule_t *rg, bpf_args_t *bc_args,
842 const int di_mask, const u_int ifid)
843 {
844 npf_rule_t *final_rl = NULL, *rl;
845
846 KASSERT(NPF_DYNAMIC_GROUP_P(rg->r_attr));
847
848 for (rl = rg->r_subset; rl; rl = rl->r_next) {
849 KASSERT(!final_rl || rl->r_priority >= final_rl->r_priority);
850 if (!npf_rule_inspect(rl, bc_args, di_mask, ifid)) {
851 continue;
852 }
853 if (rl->r_attr & NPF_RULE_FINAL) {
854 return rl;
855 }
856 final_rl = rl;
857 }
858 return final_rl;
859 }
860
861 /*
862 * npf_ruleset_inspect: inspect the packet against the given ruleset.
863 *
864 * Loop through the rules in the set and run the byte-code of each rule
865 * against the packet (nbuf chain). If sub-ruleset is found, inspect it.
866 */
867 npf_rule_t *
868 npf_ruleset_inspect(npf_cache_t *npc, const npf_ruleset_t *rlset,
869 const int di, const int layer)
870 {
871 nbuf_t *nbuf = npc->npc_nbuf;
872 const int di_mask = (di & PFIL_IN) ? NPF_RULE_IN : NPF_RULE_OUT;
873 const u_int nitems = rlset->rs_nitems;
874 const u_int ifid = nbuf->nb_ifid;
875 npf_rule_t *final_rl = NULL;
876 bpf_args_t bc_args;
877 u_int n = 0;
878
879 KASSERT(((di & PFIL_IN) != 0) ^ ((di & PFIL_OUT) != 0));
880
881 /*
882 * Prepare the external memory store and the arguments for
883 * the BPF programs to be executed. Reset mbuf before taking
884 * any pointers for the BPF.
885 */
886 uint32_t bc_words[NPF_BPF_NWORDS];
887
888 nbuf_reset(nbuf);
889 npf_bpf_prepare(npc, &bc_args, bc_words);
890
891 while (n < nitems) {
892 npf_rule_t *rl = rlset->rs_rules[n];
893 const u_int skip_to = rl->r_skip_to & SKIPTO_MASK;
894 const uint32_t attr = rl->r_attr;
895
896 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
897 KASSERT(n < skip_to);
898
899 /* Group is a barrier: return a matching if found any. */
900 if ((attr & NPF_RULE_GROUP) != 0 && final_rl) {
901 break;
902 }
903
904 /* Main inspection of the rule. */
905 if (!npf_rule_inspect(rl, &bc_args, di_mask, ifid)) {
906 n = skip_to;
907 continue;
908 }
909
910 if (NPF_DYNAMIC_GROUP_P(attr)) {
911 /*
912 * If this is a dynamic rule, re-inspect the subrules.
913 * If it has any matching rule, then it is final.
914 */
915 rl = npf_rule_reinspect(rl, &bc_args, di_mask, ifid);
916 if (rl != NULL) {
917 final_rl = rl;
918 break;
919 }
920 } else if ((attr & NPF_RULE_GROUP) == 0) {
921 /*
922 * Groups themselves are not matching.
923 */
924 final_rl = rl;
925 }
926
927 /* Set the matching rule and check for "final". */
928 if (attr & NPF_RULE_FINAL) {
929 break;
930 }
931 n++;
932 }
933
934 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
935 return final_rl;
936 }
937
938 /*
939 * npf_rule_conclude: return decision and the flags for conclusion.
940 *
941 * => Returns ENETUNREACH if "block" and 0 if "pass".
942 */
943 int
944 npf_rule_conclude(const npf_rule_t *rl, npf_match_info_t *mi)
945 {
946 /* If not passing - drop the packet. */
947 mi->mi_retfl = rl->r_attr;
948 mi->mi_rid = rl->r_id;
949 return (rl->r_attr & NPF_RULE_PASS) ? 0 : ENETUNREACH;
950 }
951
952
953 #if defined(DDB) || defined(_NPF_TESTING)
954
955 void
956 npf_ruleset_dump(npf_t *npf, const char *name)
957 {
958 npf_ruleset_t *rlset = npf_config_ruleset(npf);
959 npf_rule_t *rg, *rl;
960
961 LIST_FOREACH(rg, &rlset->rs_dynamic, r_dentry) {
962 printf("ruleset '%s':\n", rg->r_name);
963 for (rl = rg->r_subset; rl; rl = rl->r_next) {
964 printf("\tid %"PRIu64", key: ", rl->r_id);
965 for (u_int i = 0; i < NPF_RULE_MAXKEYLEN; i++)
966 printf("%x", rl->r_key[i]);
967 printf("\n");
968 }
969 }
970 }
971
972 #endif
973