npf_ruleset.c revision 1.49 1 /*-
2 * Copyright (c) 2009-2015 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This material is based upon work partially supported by The
6 * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * NPF ruleset module.
32 */
33
34 #ifdef _KERNEL
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: npf_ruleset.c,v 1.49 2019/09/29 17:00:29 rmind Exp $");
37
38 #include <sys/param.h>
39 #include <sys/types.h>
40
41 #include <sys/atomic.h>
42 #include <sys/kmem.h>
43 #include <sys/queue.h>
44 #include <sys/mbuf.h>
45 #include <sys/types.h>
46
47 #include <net/bpf.h>
48 #include <net/bpfjit.h>
49 #include <net/pfil.h>
50 #include <net/if.h>
51 #endif
52
53 #include "npf_impl.h"
54
55 struct npf_ruleset {
56 /*
57 * - List of all rules.
58 * - Dynamic (i.e. named) rules.
59 * - G/C list for convenience.
60 */
61 LIST_HEAD(, npf_rule) rs_all;
62 LIST_HEAD(, npf_rule) rs_dynamic;
63 LIST_HEAD(, npf_rule) rs_gc;
64
65 /* Unique ID counter. */
66 uint64_t rs_idcnt;
67
68 /* Number of array slots and active rules. */
69 u_int rs_slots;
70 u_int rs_nitems;
71
72 /* Array of ordered rules. */
73 npf_rule_t * rs_rules[];
74 };
75
76 struct npf_rule {
77 /* Attributes, interface and skip slot. */
78 uint32_t r_attr;
79 u_int r_ifid;
80 u_int r_skip_to;
81
82 /* Code to process, if any. */
83 int r_type;
84 bpfjit_func_t r_jcode;
85 void * r_code;
86 u_int r_clen;
87
88 /* NAT policy (optional), rule procedure and subset. */
89 npf_natpolicy_t * r_natp;
90 npf_rproc_t * r_rproc;
91
92 union {
93 /*
94 * Dynamic group: rule subset and a group list entry.
95 */
96 struct {
97 npf_rule_t * r_subset;
98 LIST_ENTRY(npf_rule) r_dentry;
99 };
100
101 /*
102 * Dynamic rule: priority, parent group and next rule.
103 */
104 struct {
105 int r_priority;
106 npf_rule_t * r_parent;
107 npf_rule_t * r_next;
108 };
109 };
110
111 /* Rule ID, name and the optional key. */
112 uint64_t r_id;
113 char r_name[NPF_RULE_MAXNAMELEN];
114 uint8_t r_key[NPF_RULE_MAXKEYLEN];
115
116 /* All-list entry and the auxiliary info. */
117 LIST_ENTRY(npf_rule) r_aentry;
118 nvlist_t * r_info;
119 size_t r_info_len;
120 };
121
122 #define SKIPTO_ADJ_FLAG (1U << 31)
123 #define SKIPTO_MASK (SKIPTO_ADJ_FLAG - 1)
124
125 static nvlist_t * npf_rule_export(npf_t *, const npf_rule_t *);
126
127 /*
128 * Private attributes - must be in the NPF_RULE_PRIVMASK range.
129 */
130 #define NPF_RULE_KEEPNAT (0x01000000 & NPF_RULE_PRIVMASK)
131
132 #define NPF_DYNAMIC_GROUP_P(attr) \
133 (((attr) & NPF_DYNAMIC_GROUP) == NPF_DYNAMIC_GROUP)
134
135 #define NPF_DYNAMIC_RULE_P(attr) \
136 (((attr) & NPF_DYNAMIC_GROUP) == NPF_RULE_DYNAMIC)
137
138 npf_ruleset_t *
139 npf_ruleset_create(size_t slots)
140 {
141 size_t len = offsetof(npf_ruleset_t, rs_rules[slots]);
142 npf_ruleset_t *rlset;
143
144 rlset = kmem_zalloc(len, KM_SLEEP);
145 LIST_INIT(&rlset->rs_dynamic);
146 LIST_INIT(&rlset->rs_all);
147 LIST_INIT(&rlset->rs_gc);
148 rlset->rs_slots = slots;
149
150 return rlset;
151 }
152
153 void
154 npf_ruleset_destroy(npf_ruleset_t *rlset)
155 {
156 size_t len = offsetof(npf_ruleset_t, rs_rules[rlset->rs_slots]);
157 npf_rule_t *rl;
158
159 while ((rl = LIST_FIRST(&rlset->rs_all)) != NULL) {
160 if (NPF_DYNAMIC_GROUP_P(rl->r_attr)) {
161 /*
162 * Note: r_subset may point to the rules which
163 * were inherited by a new ruleset.
164 */
165 rl->r_subset = NULL;
166 LIST_REMOVE(rl, r_dentry);
167 }
168 if (NPF_DYNAMIC_RULE_P(rl->r_attr)) {
169 /* Not removing from r_subset, see above. */
170 KASSERT(rl->r_parent != NULL);
171 }
172 LIST_REMOVE(rl, r_aentry);
173 npf_rule_free(rl);
174 }
175 KASSERT(LIST_EMPTY(&rlset->rs_dynamic));
176
177 npf_ruleset_gc(rlset);
178 KASSERT(LIST_EMPTY(&rlset->rs_gc));
179 kmem_free(rlset, len);
180 }
181
182 /*
183 * npf_ruleset_insert: insert the rule into the specified ruleset.
184 */
185 void
186 npf_ruleset_insert(npf_ruleset_t *rlset, npf_rule_t *rl)
187 {
188 u_int n = rlset->rs_nitems;
189
190 KASSERT(n < rlset->rs_slots);
191
192 LIST_INSERT_HEAD(&rlset->rs_all, rl, r_aentry);
193 if (NPF_DYNAMIC_GROUP_P(rl->r_attr)) {
194 LIST_INSERT_HEAD(&rlset->rs_dynamic, rl, r_dentry);
195 } else {
196 KASSERTMSG(rl->r_parent == NULL, "cannot be dynamic rule");
197 rl->r_attr &= ~NPF_RULE_DYNAMIC;
198 }
199
200 rlset->rs_rules[n] = rl;
201 rlset->rs_nitems++;
202 rl->r_id = ++rlset->rs_idcnt;
203
204 if (rl->r_skip_to < ++n) {
205 rl->r_skip_to = SKIPTO_ADJ_FLAG | n;
206 }
207 }
208
209 npf_rule_t *
210 npf_ruleset_lookup(npf_ruleset_t *rlset, const char *name)
211 {
212 npf_rule_t *rl;
213
214 LIST_FOREACH(rl, &rlset->rs_dynamic, r_dentry) {
215 KASSERT(NPF_DYNAMIC_GROUP_P(rl->r_attr));
216 if (strncmp(rl->r_name, name, NPF_RULE_MAXNAMELEN) == 0)
217 break;
218 }
219 return rl;
220 }
221
222 /*
223 * npf_ruleset_add: insert dynamic rule into the (active) ruleset.
224 */
225 int
226 npf_ruleset_add(npf_ruleset_t *rlset, const char *rname, npf_rule_t *rl)
227 {
228 npf_rule_t *rg, *it, *target;
229 int priocmd;
230
231 if (!NPF_DYNAMIC_RULE_P(rl->r_attr)) {
232 return EINVAL;
233 }
234 rg = npf_ruleset_lookup(rlset, rname);
235 if (rg == NULL) {
236 return ESRCH;
237 }
238
239 /* Dynamic rule - assign a unique ID and save the parent. */
240 rl->r_id = ++rlset->rs_idcnt;
241 rl->r_parent = rg;
242
243 /*
244 * Rule priority: (highest) 1, 2 ... n (lowest).
245 * Negative priority indicates an operation and is reset to zero.
246 */
247 if ((priocmd = rl->r_priority) < 0) {
248 rl->r_priority = 0;
249 }
250
251 /*
252 * WARNING: once rg->subset or target->r_next of an *active*
253 * rule is set, then our rule becomes globally visible and active.
254 * Must issue a load fence to ensure rl->r_next visibility first.
255 */
256 switch (priocmd) {
257 case NPF_PRI_LAST:
258 default:
259 target = NULL;
260 it = rg->r_subset;
261 while (it && it->r_priority <= rl->r_priority) {
262 target = it;
263 it = it->r_next;
264 }
265 if (target) {
266 rl->r_next = target->r_next;
267 membar_producer();
268 target->r_next = rl;
269 break;
270 }
271 /* FALLTHROUGH */
272
273 case NPF_PRI_FIRST:
274 rl->r_next = rg->r_subset;
275 membar_producer();
276 rg->r_subset = rl;
277 break;
278 }
279
280 /* Finally, add into the all-list. */
281 LIST_INSERT_HEAD(&rlset->rs_all, rl, r_aentry);
282 return 0;
283 }
284
285 static void
286 npf_ruleset_unlink(npf_rule_t *rl, npf_rule_t *prev)
287 {
288 KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
289 if (prev) {
290 prev->r_next = rl->r_next;
291 } else {
292 npf_rule_t *rg = rl->r_parent;
293 rg->r_subset = rl->r_next;
294 }
295 LIST_REMOVE(rl, r_aentry);
296 }
297
298 /*
299 * npf_ruleset_remove: remove the dynamic rule given the rule ID.
300 */
301 int
302 npf_ruleset_remove(npf_ruleset_t *rlset, const char *rname, uint64_t id)
303 {
304 npf_rule_t *rg, *prev = NULL;
305
306 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
307 return ESRCH;
308 }
309 for (npf_rule_t *rl = rg->r_subset; rl; rl = rl->r_next) {
310 KASSERT(rl->r_parent == rg);
311 KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
312
313 /* Compare ID. On match, remove and return. */
314 if (rl->r_id == id) {
315 npf_ruleset_unlink(rl, prev);
316 LIST_INSERT_HEAD(&rlset->rs_gc, rl, r_aentry);
317 return 0;
318 }
319 prev = rl;
320 }
321 return ENOENT;
322 }
323
324 /*
325 * npf_ruleset_remkey: remove the dynamic rule given the rule key.
326 */
327 int
328 npf_ruleset_remkey(npf_ruleset_t *rlset, const char *rname,
329 const void *key, size_t len)
330 {
331 npf_rule_t *rg, *rlast = NULL, *prev = NULL, *lastprev = NULL;
332
333 KASSERT(len && len <= NPF_RULE_MAXKEYLEN);
334
335 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
336 return ESRCH;
337 }
338
339 /* Compare the key and find the last in the list. */
340 for (npf_rule_t *rl = rg->r_subset; rl; rl = rl->r_next) {
341 KASSERT(rl->r_parent == rg);
342 KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
343 if (memcmp(rl->r_key, key, len) == 0) {
344 lastprev = prev;
345 rlast = rl;
346 }
347 prev = rl;
348 }
349 if (!rlast) {
350 return ENOENT;
351 }
352 npf_ruleset_unlink(rlast, lastprev);
353 LIST_INSERT_HEAD(&rlset->rs_gc, rlast, r_aentry);
354 return 0;
355 }
356
357 /*
358 * npf_ruleset_list: serialise and return the dynamic rules.
359 */
360 nvlist_t *
361 npf_ruleset_list(npf_t *npf, npf_ruleset_t *rlset, const char *rname)
362 {
363 nvlist_t *rgroup;
364 npf_rule_t *rg;
365
366 KASSERT(npf_config_locked_p(npf));
367
368 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
369 return NULL;
370 }
371 if ((rgroup = nvlist_create(0)) == NULL) {
372 return NULL;
373 }
374 for (npf_rule_t *rl = rg->r_subset; rl; rl = rl->r_next) {
375 nvlist_t *rule;
376
377 KASSERT(rl->r_parent == rg);
378 KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
379
380 rule = npf_rule_export(npf, rl);
381 if (!rule) {
382 nvlist_destroy(rgroup);
383 return NULL;
384 }
385 nvlist_append_nvlist_array(rgroup, "rules", rule);
386 nvlist_destroy(rule);
387 }
388 return rgroup;
389 }
390
391 /*
392 * npf_ruleset_flush: flush the dynamic rules in the ruleset by inserting
393 * them into the G/C list.
394 */
395 int
396 npf_ruleset_flush(npf_ruleset_t *rlset, const char *rname)
397 {
398 npf_rule_t *rg, *rl;
399
400 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
401 return ESRCH;
402 }
403
404 rl = atomic_swap_ptr(&rg->r_subset, NULL);
405 membar_producer();
406
407 while (rl) {
408 KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
409 KASSERT(rl->r_parent == rg);
410
411 LIST_REMOVE(rl, r_aentry);
412 LIST_INSERT_HEAD(&rlset->rs_gc, rl, r_aentry);
413 rl = rl->r_next;
414 }
415 rlset->rs_idcnt = 0;
416 return 0;
417 }
418
419 /*
420 * npf_ruleset_gc: destroy the rules in G/C list.
421 */
422 void
423 npf_ruleset_gc(npf_ruleset_t *rlset)
424 {
425 npf_rule_t *rl;
426
427 while ((rl = LIST_FIRST(&rlset->rs_gc)) != NULL) {
428 LIST_REMOVE(rl, r_aentry);
429 npf_rule_free(rl);
430 }
431 }
432
433 /*
434 * npf_ruleset_export: serialise and return the static rules.
435 */
436 int
437 npf_ruleset_export(npf_t *npf, const npf_ruleset_t *rlset,
438 const char *key, nvlist_t *npf_dict)
439 {
440 const unsigned nitems = rlset->rs_nitems;
441 unsigned n = 0;
442 int error = 0;
443
444 KASSERT(npf_config_locked_p(npf));
445
446 while (n < nitems) {
447 const npf_rule_t *rl = rlset->rs_rules[n];
448 const npf_natpolicy_t *natp = rl->r_natp;
449 nvlist_t *rule;
450
451 rule = npf_rule_export(npf, rl);
452 if (!rule) {
453 error = ENOMEM;
454 break;
455 }
456 if (natp && (error = npf_nat_policyexport(natp, rule)) != 0) {
457 nvlist_destroy(rule);
458 break;
459 }
460 nvlist_append_nvlist_array(npf_dict, key, rule);
461 nvlist_destroy(rule);
462 n++;
463 }
464 return error;
465 }
466
467 /*
468 * npf_ruleset_reload: prepare the new ruleset by scanning the active
469 * ruleset and: 1) sharing the dynamic rules 2) sharing NAT policies.
470 *
471 * => The active (old) ruleset should be exclusively locked.
472 */
473 void
474 npf_ruleset_reload(npf_t *npf, npf_ruleset_t *newset,
475 npf_ruleset_t *oldset, bool load)
476 {
477 npf_rule_t *rg, *rl;
478 uint64_t nid = 0;
479
480 KASSERT(npf_config_locked_p(npf));
481
482 /*
483 * Scan the dynamic rules and share (migrate) if needed.
484 */
485 LIST_FOREACH(rg, &newset->rs_dynamic, r_dentry) {
486 npf_rule_t *active_rgroup;
487
488 /* Look for a dynamic ruleset group with such name. */
489 active_rgroup = npf_ruleset_lookup(oldset, rg->r_name);
490 if (active_rgroup == NULL) {
491 continue;
492 }
493
494 /*
495 * ATOMICITY: Copy the head pointer of the linked-list,
496 * but do not remove the rules from the active r_subset.
497 * This is necessary because the rules are still active
498 * and therefore are accessible for inspection via the
499 * old ruleset.
500 */
501 rg->r_subset = active_rgroup->r_subset;
502
503 /*
504 * We can safely migrate to the new all-rule list and
505 * reset the parent rule, though.
506 */
507 for (rl = rg->r_subset; rl; rl = rl->r_next) {
508 KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
509 LIST_REMOVE(rl, r_aentry);
510 LIST_INSERT_HEAD(&newset->rs_all, rl, r_aentry);
511
512 KASSERT(rl->r_parent == active_rgroup);
513 rl->r_parent = rg;
514 }
515 }
516
517 /*
518 * If performing the load of connections then NAT policies may
519 * already have translated connections associated with them and
520 * we should not share or inherit anything.
521 */
522 if (load)
523 return;
524
525 /*
526 * Scan all rules in the new ruleset and inherit the active NAT
527 * policies if they are the same. Also, assign a unique ID for
528 * each policy here.
529 */
530 LIST_FOREACH(rl, &newset->rs_all, r_aentry) {
531 npf_natpolicy_t *np;
532 npf_rule_t *actrl;
533
534 /* Does the rule have a NAT policy associated? */
535 if ((np = rl->r_natp) == NULL) {
536 continue;
537 }
538
539 /* Does it match with any policy in the active ruleset? */
540 LIST_FOREACH(actrl, &oldset->rs_all, r_aentry) {
541 if (!actrl->r_natp)
542 continue;
543 if ((actrl->r_attr & NPF_RULE_KEEPNAT) != 0)
544 continue;
545 if (npf_nat_cmppolicy(actrl->r_natp, np))
546 break;
547 }
548 if (!actrl) {
549 /* No: just set the ID and continue. */
550 npf_nat_setid(np, ++nid);
551 continue;
552 }
553
554 /* Yes: inherit the matching NAT policy. */
555 rl->r_natp = actrl->r_natp;
556 npf_nat_setid(rl->r_natp, ++nid);
557
558 /*
559 * Finally, mark the active rule to not destroy its NAT
560 * policy later as we inherited it (but the rule must be
561 * kept active for now). Destroy the new/unused policy.
562 */
563 actrl->r_attr |= NPF_RULE_KEEPNAT;
564 npf_nat_freepolicy(np);
565 }
566
567 /* Inherit the ID counter. */
568 newset->rs_idcnt = oldset->rs_idcnt;
569 }
570
571 /*
572 * npf_ruleset_findnat: find a NAT policy in the ruleset by a given ID.
573 */
574 npf_natpolicy_t *
575 npf_ruleset_findnat(npf_ruleset_t *rlset, uint64_t id)
576 {
577 npf_rule_t *rl;
578
579 LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
580 npf_natpolicy_t *np = rl->r_natp;
581 if (np && npf_nat_getid(np) == id) {
582 return np;
583 }
584 }
585 return NULL;
586 }
587
588 /*
589 * npf_ruleset_freealg: inspect the ruleset and disassociate specified
590 * ALG from all NAT entries using it.
591 */
592 void
593 npf_ruleset_freealg(npf_ruleset_t *rlset, npf_alg_t *alg)
594 {
595 npf_rule_t *rl;
596 npf_natpolicy_t *np;
597
598 LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
599 if ((np = rl->r_natp) != NULL) {
600 npf_nat_freealg(np, alg);
601 }
602 }
603 }
604
605 /*
606 * npf_rule_alloc: allocate a rule and initialise it.
607 */
608 npf_rule_t *
609 npf_rule_alloc(npf_t *npf, const nvlist_t *rule)
610 {
611 npf_rule_t *rl;
612 const char *rname;
613 const void *key, *info;
614 size_t len;
615
616 /* Allocate a rule structure and keep the information. */
617 rl = kmem_zalloc(sizeof(npf_rule_t), KM_SLEEP);
618 info = dnvlist_get_binary(rule, "info", &rl->r_info_len, NULL, 0);
619 if (info) {
620 rl->r_info = kmem_alloc(rl->r_info_len, KM_SLEEP);
621 memcpy(rl->r_info, info, rl->r_info_len);
622 }
623 rl->r_natp = NULL;
624
625 /* Name (optional) */
626 if ((rname = dnvlist_get_string(rule, "name", NULL)) != NULL) {
627 strlcpy(rl->r_name, rname, NPF_RULE_MAXNAMELEN);
628 } else {
629 rl->r_name[0] = '\0';
630 }
631
632 /* Attributes, priority and interface ID (optional). */
633 rl->r_attr = dnvlist_get_number(rule, "attr", 0);
634 rl->r_attr &= ~NPF_RULE_PRIVMASK;
635
636 if (NPF_DYNAMIC_RULE_P(rl->r_attr)) {
637 /* Priority of the dynamic rule. */
638 rl->r_priority = (int)dnvlist_get_number(rule, "prio", 0);
639 } else {
640 /* The skip-to index. No need to validate it. */
641 rl->r_skip_to = dnvlist_get_number(rule, "skip-to", 0);
642 }
643
644 /* Interface name; register and get the npf-if-id. */
645 if ((rname = dnvlist_get_string(rule, "ifname", NULL)) != NULL) {
646 if ((rl->r_ifid = npf_ifmap_register(npf, rname)) == 0) {
647 kmem_free(rl, sizeof(npf_rule_t));
648 return NULL;
649 }
650 } else {
651 rl->r_ifid = 0;
652 }
653
654 /* Key (optional). */
655 if ((key = dnvlist_get_binary(rule, "key", &len, NULL, 0)) != NULL) {
656 if (len > NPF_RULE_MAXKEYLEN) {
657 kmem_free(rl, sizeof(npf_rule_t));
658 return NULL;
659 }
660 memcpy(rl->r_key, key, len);
661 }
662 return rl;
663 }
664
665 static nvlist_t *
666 npf_rule_export(npf_t *npf, const npf_rule_t *rl)
667 {
668 nvlist_t *rule = nvlist_create(0);
669 unsigned skip_to = 0;
670 npf_rproc_t *rp;
671
672 nvlist_add_number(rule, "attr", rl->r_attr);
673 nvlist_add_number(rule, "prio", rl->r_priority);
674 if ((rl->r_skip_to & SKIPTO_ADJ_FLAG) == 0) {
675 skip_to = rl->r_skip_to & SKIPTO_MASK;
676 }
677 nvlist_add_number(rule, "skip-to", skip_to);
678 nvlist_add_number(rule, "code-type", rl->r_type);
679 if (rl->r_code) {
680 nvlist_add_binary(rule, "code", rl->r_code, rl->r_clen);
681 }
682 if (rl->r_ifid) {
683 char ifname[IFNAMSIZ];
684 npf_ifmap_copyname(npf, rl->r_ifid, ifname, sizeof(ifname));
685 nvlist_add_string(rule, "ifname", ifname);
686 }
687 nvlist_add_number(rule, "id", rl->r_id);
688
689 if (rl->r_name[0]) {
690 nvlist_add_string(rule, "name", rl->r_name);
691 }
692 if (NPF_DYNAMIC_RULE_P(rl->r_attr)) {
693 nvlist_add_binary(rule, "key", rl->r_key, NPF_RULE_MAXKEYLEN);
694 }
695 if (rl->r_info) {
696 nvlist_add_binary(rule, "info", rl->r_info, rl->r_info_len);
697 }
698 if ((rp = npf_rule_getrproc(rl)) != NULL) {
699 const char *rname = npf_rproc_getname(rp);
700 nvlist_add_string(rule, "rproc", rname);
701 npf_rproc_release(rp);
702 }
703 return rule;
704 }
705
706 /*
707 * npf_rule_setcode: assign filter code to the rule.
708 *
709 * => The code must be validated by the caller.
710 * => JIT compilation may be performed here.
711 */
712 void
713 npf_rule_setcode(npf_rule_t *rl, const int type, void *code, size_t size)
714 {
715 KASSERT(type == NPF_CODE_BPF);
716
717 rl->r_type = type;
718 rl->r_code = code;
719 rl->r_clen = size;
720 rl->r_jcode = npf_bpf_compile(code, size);
721 }
722
723 /*
724 * npf_rule_setrproc: assign a rule procedure and hold a reference on it.
725 */
726 void
727 npf_rule_setrproc(npf_rule_t *rl, npf_rproc_t *rp)
728 {
729 npf_rproc_acquire(rp);
730 rl->r_rproc = rp;
731 }
732
733 /*
734 * npf_rule_free: free the specified rule.
735 */
736 void
737 npf_rule_free(npf_rule_t *rl)
738 {
739 npf_natpolicy_t *np = rl->r_natp;
740 npf_rproc_t *rp = rl->r_rproc;
741
742 if (np && (rl->r_attr & NPF_RULE_KEEPNAT) == 0) {
743 /* Free NAT policy. */
744 npf_nat_freepolicy(np);
745 }
746 if (rp) {
747 /* Release rule procedure. */
748 npf_rproc_release(rp);
749 }
750 if (rl->r_code) {
751 /* Free byte-code. */
752 kmem_free(rl->r_code, rl->r_clen);
753 }
754 if (rl->r_jcode) {
755 /* Free JIT code. */
756 bpf_jit_freecode(rl->r_jcode);
757 }
758 if (rl->r_info) {
759 kmem_free(rl->r_info, rl->r_info_len);
760 }
761 kmem_free(rl, sizeof(npf_rule_t));
762 }
763
764 /*
765 * npf_rule_getid: return the unique ID of a rule.
766 * npf_rule_getrproc: acquire a reference and return rule procedure, if any.
767 * npf_rule_getnat: get NAT policy assigned to the rule.
768 */
769
770 uint64_t
771 npf_rule_getid(const npf_rule_t *rl)
772 {
773 KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
774 return rl->r_id;
775 }
776
777 npf_rproc_t *
778 npf_rule_getrproc(const npf_rule_t *rl)
779 {
780 npf_rproc_t *rp = rl->r_rproc;
781
782 if (rp) {
783 npf_rproc_acquire(rp);
784 }
785 return rp;
786 }
787
788 npf_natpolicy_t *
789 npf_rule_getnat(const npf_rule_t *rl)
790 {
791 return rl->r_natp;
792 }
793
794 /*
795 * npf_rule_setnat: assign NAT policy to the rule and insert into the
796 * NAT policy list in the ruleset.
797 */
798 void
799 npf_rule_setnat(npf_rule_t *rl, npf_natpolicy_t *np)
800 {
801 KASSERT(rl->r_natp == NULL);
802 rl->r_natp = np;
803 }
804
805 /*
806 * npf_rule_inspect: match the interface, direction and run the filter code.
807 * Returns true if rule matches and false otherwise.
808 */
809 static inline bool
810 npf_rule_inspect(const npf_rule_t *rl, bpf_args_t *bc_args,
811 const int di_mask, const u_int ifid)
812 {
813 /* Match the interface. */
814 if (rl->r_ifid && rl->r_ifid != ifid) {
815 return false;
816 }
817
818 /* Match the direction. */
819 if ((rl->r_attr & NPF_RULE_DIMASK) != NPF_RULE_DIMASK) {
820 if ((rl->r_attr & di_mask) == 0)
821 return false;
822 }
823
824 /* Any code? */
825 if (!rl->r_code) {
826 KASSERT(rl->r_jcode == NULL);
827 return true;
828 }
829 KASSERT(rl->r_type == NPF_CODE_BPF);
830 return npf_bpf_filter(bc_args, rl->r_code, rl->r_jcode) != 0;
831 }
832
833 /*
834 * npf_rule_reinspect: re-inspect the dynamic rule by iterating its list.
835 * This is only for the dynamic rules. Subrules cannot have nested rules.
836 */
837 static inline npf_rule_t *
838 npf_rule_reinspect(const npf_rule_t *rg, bpf_args_t *bc_args,
839 const int di_mask, const u_int ifid)
840 {
841 npf_rule_t *final_rl = NULL, *rl;
842
843 KASSERT(NPF_DYNAMIC_GROUP_P(rg->r_attr));
844
845 for (rl = rg->r_subset; rl; rl = rl->r_next) {
846 KASSERT(!final_rl || rl->r_priority >= final_rl->r_priority);
847 if (!npf_rule_inspect(rl, bc_args, di_mask, ifid)) {
848 continue;
849 }
850 if (rl->r_attr & NPF_RULE_FINAL) {
851 return rl;
852 }
853 final_rl = rl;
854 }
855 return final_rl;
856 }
857
858 /*
859 * npf_ruleset_inspect: inspect the packet against the given ruleset.
860 *
861 * Loop through the rules in the set and run the byte-code of each rule
862 * against the packet (nbuf chain). If sub-ruleset is found, inspect it.
863 */
864 npf_rule_t *
865 npf_ruleset_inspect(npf_cache_t *npc, const npf_ruleset_t *rlset,
866 const int di, const int layer)
867 {
868 nbuf_t *nbuf = npc->npc_nbuf;
869 const int di_mask = (di & PFIL_IN) ? NPF_RULE_IN : NPF_RULE_OUT;
870 const u_int nitems = rlset->rs_nitems;
871 const u_int ifid = nbuf->nb_ifid;
872 npf_rule_t *final_rl = NULL;
873 bpf_args_t bc_args;
874 u_int n = 0;
875
876 KASSERT(((di & PFIL_IN) != 0) ^ ((di & PFIL_OUT) != 0));
877
878 /*
879 * Prepare the external memory store and the arguments for
880 * the BPF programs to be executed. Reset mbuf before taking
881 * any pointers for the BPF.
882 */
883 uint32_t bc_words[NPF_BPF_NWORDS];
884
885 nbuf_reset(nbuf);
886 npf_bpf_prepare(npc, &bc_args, bc_words);
887
888 while (n < nitems) {
889 npf_rule_t *rl = rlset->rs_rules[n];
890 const u_int skip_to = rl->r_skip_to & SKIPTO_MASK;
891 const uint32_t attr = rl->r_attr;
892
893 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
894 KASSERT(n < skip_to);
895
896 /* Group is a barrier: return a matching if found any. */
897 if ((attr & NPF_RULE_GROUP) != 0 && final_rl) {
898 break;
899 }
900
901 /* Main inspection of the rule. */
902 if (!npf_rule_inspect(rl, &bc_args, di_mask, ifid)) {
903 n = skip_to;
904 continue;
905 }
906
907 if (NPF_DYNAMIC_GROUP_P(attr)) {
908 /*
909 * If this is a dynamic rule, re-inspect the subrules.
910 * If it has any matching rule, then it is final.
911 */
912 rl = npf_rule_reinspect(rl, &bc_args, di_mask, ifid);
913 if (rl != NULL) {
914 final_rl = rl;
915 break;
916 }
917 } else if ((attr & NPF_RULE_GROUP) == 0) {
918 /*
919 * Groups themselves are not matching.
920 */
921 final_rl = rl;
922 }
923
924 /* Set the matching rule and check for "final". */
925 if (attr & NPF_RULE_FINAL) {
926 break;
927 }
928 n++;
929 }
930
931 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
932 return final_rl;
933 }
934
935 /*
936 * npf_rule_conclude: return decision and the flags for conclusion.
937 *
938 * => Returns ENETUNREACH if "block" and 0 if "pass".
939 */
940 int
941 npf_rule_conclude(const npf_rule_t *rl, npf_match_info_t *mi)
942 {
943 /* If not passing - drop the packet. */
944 mi->mi_retfl = rl->r_attr;
945 mi->mi_rid = rl->r_id;
946 return (rl->r_attr & NPF_RULE_PASS) ? 0 : ENETUNREACH;
947 }
948
949
950 #if defined(DDB) || defined(_NPF_TESTING)
951
952 void
953 npf_ruleset_dump(npf_t *npf, const char *name)
954 {
955 npf_ruleset_t *rlset = npf_config_ruleset(npf);
956 npf_rule_t *rg, *rl;
957
958 LIST_FOREACH(rg, &rlset->rs_dynamic, r_dentry) {
959 printf("ruleset '%s':\n", rg->r_name);
960 for (rl = rg->r_subset; rl; rl = rl->r_next) {
961 printf("\tid %"PRIu64", key: ", rl->r_id);
962 for (u_int i = 0; i < NPF_RULE_MAXKEYLEN; i++)
963 printf("%x", rl->r_key[i]);
964 printf("\n");
965 }
966 }
967 }
968
969 #endif
970