npf_ruleset.c revision 1.14.2.3 1 /* $NetBSD: npf_ruleset.c,v 1.14.2.3 2014/08/20 00:04:35 tls Exp $ */
2
3 /*-
4 * Copyright (c) 2009-2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This material is based upon work partially supported by The
8 * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * NPF ruleset module.
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: npf_ruleset.c,v 1.14.2.3 2014/08/20 00:04:35 tls Exp $");
38
39 #include <sys/param.h>
40 #include <sys/types.h>
41
42 #include <sys/atomic.h>
43 #include <sys/kmem.h>
44 #include <sys/queue.h>
45 #include <sys/mbuf.h>
46 #include <sys/types.h>
47
48 #include <net/bpf.h>
49 #include <net/bpfjit.h>
50 #include <net/pfil.h>
51 #include <net/if.h>
52
53 #include "npf_impl.h"
54
55 struct npf_ruleset {
56 /*
57 * - List of all rules.
58 * - Dynamic (i.e. named) rules.
59 * - G/C list for convenience.
60 */
61 LIST_HEAD(, npf_rule) rs_all;
62 LIST_HEAD(, npf_rule) rs_dynamic;
63 LIST_HEAD(, npf_rule) rs_gc;
64
65 /* Unique ID counter. */
66 uint64_t rs_idcnt;
67
68 /* Number of array slots and active rules. */
69 u_int rs_slots;
70 u_int rs_nitems;
71
72 /* Array of ordered rules. */
73 npf_rule_t * rs_rules[];
74 };
75
76 struct npf_rule {
77 /* Attributes, interface and skip slot. */
78 uint32_t r_attr;
79 u_int r_ifid;
80 u_int r_skip_to;
81
82 /* Code to process, if any. */
83 int r_type;
84 bpfjit_func_t r_jcode;
85 void * r_code;
86 u_int r_clen;
87
88 /* NAT policy (optional), rule procedure and subset. */
89 npf_natpolicy_t * r_natp;
90 npf_rproc_t * r_rproc;
91
92 /* Rule priority: (highest) 1, 2 ... n (lowest). */
93 pri_t r_priority;
94
95 /*
96 * Dynamic group: subset queue and a dynamic group list entry.
97 * Dynamic rule: entry and the parent rule (the group).
98 */
99 union {
100 TAILQ_HEAD(npf_ruleq, npf_rule) r_subset;
101 TAILQ_ENTRY(npf_rule) r_entry;
102 } /* C11 */;
103 union {
104 LIST_ENTRY(npf_rule) r_dentry;
105 npf_rule_t * r_parent;
106 } /* C11 */;
107
108 /* Rule ID, name and the optional key. */
109 uint64_t r_id;
110 char r_name[NPF_RULE_MAXNAMELEN];
111 uint8_t r_key[NPF_RULE_MAXKEYLEN];
112
113 /* All-list entry and the auxiliary info. */
114 LIST_ENTRY(npf_rule) r_aentry;
115 prop_data_t r_info;
116 };
117
118 #define SKIPTO_ADJ_FLAG (1U << 31)
119 #define SKIPTO_MASK (SKIPTO_ADJ_FLAG - 1)
120
121 static int npf_rule_export(const npf_ruleset_t *,
122 const npf_rule_t *, prop_dictionary_t);
123
124 /*
125 * Private attributes - must be in the NPF_RULE_PRIVMASK range.
126 */
127 #define NPF_RULE_KEEPNAT (0x01000000 & NPF_RULE_PRIVMASK)
128
129 #define NPF_DYNAMIC_GROUP_P(attr) \
130 (((attr) & NPF_DYNAMIC_GROUP) == NPF_DYNAMIC_GROUP)
131
132 #define NPF_DYNAMIC_RULE_P(attr) \
133 (((attr) & NPF_DYNAMIC_GROUP) == NPF_RULE_DYNAMIC)
134
135 npf_ruleset_t *
136 npf_ruleset_create(size_t slots)
137 {
138 size_t len = offsetof(npf_ruleset_t, rs_rules[slots]);
139 npf_ruleset_t *rlset;
140
141 rlset = kmem_zalloc(len, KM_SLEEP);
142 LIST_INIT(&rlset->rs_dynamic);
143 LIST_INIT(&rlset->rs_all);
144 LIST_INIT(&rlset->rs_gc);
145 rlset->rs_slots = slots;
146
147 return rlset;
148 }
149
150 static void
151 npf_ruleset_unlink(npf_ruleset_t *rlset, npf_rule_t *rl)
152 {
153 if (NPF_DYNAMIC_GROUP_P(rl->r_attr)) {
154 LIST_REMOVE(rl, r_dentry);
155 }
156 if (NPF_DYNAMIC_RULE_P(rl->r_attr)) {
157 npf_rule_t *rg = rl->r_parent;
158 TAILQ_REMOVE(&rg->r_subset, rl, r_entry);
159 }
160 LIST_REMOVE(rl, r_aentry);
161 }
162
163 void
164 npf_ruleset_destroy(npf_ruleset_t *rlset)
165 {
166 size_t len = offsetof(npf_ruleset_t, rs_rules[rlset->rs_slots]);
167 npf_rule_t *rl;
168
169 while ((rl = LIST_FIRST(&rlset->rs_all)) != NULL) {
170 npf_ruleset_unlink(rlset, rl);
171 npf_rule_free(rl);
172 }
173 KASSERT(LIST_EMPTY(&rlset->rs_dynamic));
174 KASSERT(LIST_EMPTY(&rlset->rs_gc));
175 kmem_free(rlset, len);
176 }
177
178 /*
179 * npf_ruleset_insert: insert the rule into the specified ruleset.
180 */
181 void
182 npf_ruleset_insert(npf_ruleset_t *rlset, npf_rule_t *rl)
183 {
184 u_int n = rlset->rs_nitems;
185
186 KASSERT(n < rlset->rs_slots);
187
188 LIST_INSERT_HEAD(&rlset->rs_all, rl, r_aentry);
189 if (NPF_DYNAMIC_GROUP_P(rl->r_attr)) {
190 LIST_INSERT_HEAD(&rlset->rs_dynamic, rl, r_dentry);
191 } else {
192 KASSERTMSG(rl->r_parent == NULL, "cannot be dynamic rule");
193 rl->r_attr &= ~NPF_RULE_DYNAMIC;
194 }
195
196 rlset->rs_rules[n] = rl;
197 rlset->rs_nitems++;
198
199 if (rl->r_skip_to < ++n) {
200 rl->r_skip_to = SKIPTO_ADJ_FLAG | n;
201 }
202 }
203
204 static npf_rule_t *
205 npf_ruleset_lookup(npf_ruleset_t *rlset, const char *name)
206 {
207 npf_rule_t *rl;
208
209 KASSERT(npf_config_locked_p());
210
211 LIST_FOREACH(rl, &rlset->rs_dynamic, r_dentry) {
212 KASSERT(NPF_DYNAMIC_GROUP_P(rl->r_attr));
213 if (strncmp(rl->r_name, name, NPF_RULE_MAXNAMELEN) == 0)
214 break;
215 }
216 return rl;
217 }
218
219 int
220 npf_ruleset_add(npf_ruleset_t *rlset, const char *rname, npf_rule_t *rl)
221 {
222 npf_rule_t *rg, *it;
223 pri_t priocmd;
224
225 rg = npf_ruleset_lookup(rlset, rname);
226 if (rg == NULL) {
227 return ESRCH;
228 }
229 if (!NPF_DYNAMIC_RULE_P(rl->r_attr)) {
230 return EINVAL;
231 }
232
233 /* Dynamic rule - assign a unique ID and save the parent. */
234 rl->r_id = ++rlset->rs_idcnt;
235 rl->r_parent = rg;
236
237 /*
238 * Rule priority: (highest) 1, 2 ... n (lowest).
239 * Negative priority indicates an operation and is reset to zero.
240 */
241 if ((priocmd = rl->r_priority) < 0) {
242 rl->r_priority = 0;
243 }
244
245 switch (priocmd) {
246 case NPF_PRI_FIRST:
247 TAILQ_FOREACH(it, &rg->r_subset, r_entry) {
248 if (rl->r_priority <= it->r_priority)
249 break;
250 }
251 if (it) {
252 TAILQ_INSERT_BEFORE(it, rl, r_entry);
253 } else {
254 TAILQ_INSERT_HEAD(&rg->r_subset, rl, r_entry);
255 }
256 break;
257 case NPF_PRI_LAST:
258 default:
259 TAILQ_FOREACH(it, &rg->r_subset, r_entry) {
260 if (rl->r_priority < it->r_priority)
261 break;
262 }
263 if (it) {
264 TAILQ_INSERT_BEFORE(it, rl, r_entry);
265 } else {
266 TAILQ_INSERT_TAIL(&rg->r_subset, rl, r_entry);
267 }
268 break;
269 }
270
271 /* Finally, add into the all-list. */
272 LIST_INSERT_HEAD(&rlset->rs_all, rl, r_aentry);
273 return 0;
274 }
275
276 int
277 npf_ruleset_remove(npf_ruleset_t *rlset, const char *rname, uint64_t id)
278 {
279 npf_rule_t *rg, *rl;
280
281 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
282 return ESRCH;
283 }
284 TAILQ_FOREACH(rl, &rg->r_subset, r_entry) {
285 KASSERT(rl->r_parent == rg);
286
287 /* Compare ID. On match, remove and return. */
288 if (rl->r_id == id) {
289 npf_ruleset_unlink(rlset, rl);
290 LIST_INSERT_HEAD(&rlset->rs_gc, rl, r_aentry);
291 return 0;
292 }
293 }
294 return ENOENT;
295 }
296
297 int
298 npf_ruleset_remkey(npf_ruleset_t *rlset, const char *rname,
299 const void *key, size_t len)
300 {
301 npf_rule_t *rg, *rl;
302
303 KASSERT(len && len <= NPF_RULE_MAXKEYLEN);
304
305 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
306 return ESRCH;
307 }
308
309 /* Find the last in the list. */
310 TAILQ_FOREACH_REVERSE(rl, &rg->r_subset, npf_ruleq, r_entry) {
311 KASSERT(rl->r_parent == rg);
312
313 /* Compare the key. On match, remove and return. */
314 if (memcmp(rl->r_key, key, len) == 0) {
315 npf_ruleset_unlink(rlset, rl);
316 LIST_INSERT_HEAD(&rlset->rs_gc, rl, r_aentry);
317 return 0;
318 }
319 }
320 return ENOENT;
321 }
322
323 prop_dictionary_t
324 npf_ruleset_list(npf_ruleset_t *rlset, const char *rname)
325 {
326 prop_dictionary_t rgdict;
327 prop_array_t rules;
328 npf_rule_t *rg, *rl;
329
330 KASSERT(npf_config_locked_p());
331
332 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
333 return NULL;
334 }
335 if ((rgdict = prop_dictionary_create()) == NULL) {
336 return NULL;
337 }
338 if ((rules = prop_array_create()) == NULL) {
339 prop_object_release(rgdict);
340 return NULL;
341 }
342
343 TAILQ_FOREACH(rl, &rg->r_subset, r_entry) {
344 prop_dictionary_t rldict;
345
346 rldict = prop_dictionary_create();
347 KASSERT(rl->r_parent == rg);
348
349 if (npf_rule_export(rlset, rl, rldict)) {
350 prop_object_release(rldict);
351 prop_object_release(rules);
352 return NULL;
353 }
354 prop_array_add(rules, rldict);
355 prop_object_release(rldict);
356 }
357
358 if (!prop_dictionary_set(rgdict, "rules", rules)) {
359 prop_object_release(rgdict);
360 rgdict = NULL;
361 }
362 prop_object_release(rules);
363 return rgdict;
364 }
365
366 int
367 npf_ruleset_flush(npf_ruleset_t *rlset, const char *rname)
368 {
369 npf_rule_t *rg, *rl;
370
371 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
372 return ESRCH;
373 }
374 while ((rl = TAILQ_FIRST(&rg->r_subset)) != NULL) {
375 KASSERT(rl->r_parent == rg);
376 npf_ruleset_unlink(rlset, rl);
377 LIST_INSERT_HEAD(&rlset->rs_gc, rl, r_aentry);
378 }
379 return 0;
380 }
381
382 int
383 npf_ruleset_export(const npf_ruleset_t *rlset, prop_array_t rules)
384 {
385 const u_int nitems = rlset->rs_nitems;
386 int error = 0;
387 u_int n = 0;
388
389 KASSERT(npf_config_locked_p());
390
391 while (n < nitems) {
392 const npf_rule_t *rl = rlset->rs_rules[n];
393 const npf_natpolicy_t *natp = rl->r_natp;
394 prop_dictionary_t rldict;
395
396 rldict = prop_dictionary_create();
397 if ((error = npf_rule_export(rlset, rl, rldict)) != 0) {
398 prop_object_release(rldict);
399 break;
400 }
401 if (natp && (error = npf_nat_policyexport(natp, rldict)) != 0) {
402 prop_object_release(rldict);
403 break;
404 }
405 prop_array_add(rules, rldict);
406 prop_object_release(rldict);
407 n++;
408 }
409 return error;
410 }
411
412 void
413 npf_ruleset_gc(npf_ruleset_t *rlset)
414 {
415 npf_rule_t *rl;
416
417 while ((rl = LIST_FIRST(&rlset->rs_gc)) != NULL) {
418 LIST_REMOVE(rl, r_aentry);
419 npf_rule_free(rl);
420 }
421 }
422
423 /*
424 * npf_ruleset_cmpnat: find a matching NAT policy in the ruleset.
425 */
426 static inline npf_rule_t *
427 npf_ruleset_cmpnat(npf_ruleset_t *rlset, npf_natpolicy_t *mnp)
428 {
429 npf_rule_t *rl;
430
431 /* Find a matching NAT policy in the old ruleset. */
432 LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
433 if (rl->r_natp && npf_nat_cmppolicy(rl->r_natp, mnp))
434 break;
435 }
436 return rl;
437 }
438
439 /*
440 * npf_ruleset_reload: prepare the new ruleset by scanning the active
441 * ruleset and 1) sharing the dynamic rules 2) sharing NAT policies.
442 *
443 * => The active (old) ruleset should be exclusively locked.
444 */
445 void
446 npf_ruleset_reload(npf_ruleset_t *newset, npf_ruleset_t *oldset)
447 {
448 npf_rule_t *rg, *rl;
449 uint64_t nid = 0;
450
451 KASSERT(npf_config_locked_p());
452
453 /*
454 * Scan the dynamic rules and share (migrate) if needed.
455 */
456 LIST_FOREACH(rg, &newset->rs_dynamic, r_dentry) {
457 npf_rule_t *actrg;
458
459 /* Look for a dynamic ruleset group with such name. */
460 actrg = npf_ruleset_lookup(oldset, rg->r_name);
461 if (actrg == NULL) {
462 continue;
463 }
464
465 /*
466 * Copy the list-head structure. This is necessary because
467 * the rules are still active and therefore accessible for
468 * inspection via the old ruleset.
469 */
470 memcpy(&rg->r_subset, &actrg->r_subset, sizeof(rg->r_subset));
471 TAILQ_FOREACH(rl, &rg->r_subset, r_entry) {
472 /*
473 * We can safely migrate to the new all-rule list
474 * and re-set the parent rule, though.
475 */
476 LIST_REMOVE(rl, r_aentry);
477 LIST_INSERT_HEAD(&newset->rs_all, rl, r_aentry);
478 rl->r_parent = rg;
479 }
480 }
481
482 /*
483 * Scan all rules in the new ruleset and share NAT policies.
484 * Also, assign a unique ID for each policy here.
485 */
486 LIST_FOREACH(rl, &newset->rs_all, r_aentry) {
487 npf_natpolicy_t *np;
488 npf_rule_t *actrl;
489
490 /* Does the rule have a NAT policy associated? */
491 if ((np = rl->r_natp) == NULL) {
492 continue;
493 }
494
495 /* Does it match with any policy in the active ruleset? */
496 if ((actrl = npf_ruleset_cmpnat(oldset, np)) == NULL) {
497 npf_nat_setid(np, ++nid);
498 continue;
499 }
500
501 /*
502 * Inherit the matching NAT policy and check other ones
503 * in the new ruleset for sharing the portmap.
504 */
505 rl->r_natp = actrl->r_natp;
506 npf_ruleset_sharepm(newset, rl->r_natp);
507 npf_nat_setid(rl->r_natp, ++nid);
508
509 /*
510 * Finally, mark the active rule to not destroy its NAT
511 * policy later as we inherited it (but the rule must be
512 * kept active for now). Destroy the new/unused policy.
513 */
514 actrl->r_attr |= NPF_RULE_KEEPNAT;
515 npf_nat_freepolicy(np);
516 }
517
518 /* Inherit the ID counter. */
519 newset->rs_idcnt = oldset->rs_idcnt;
520 }
521
522 npf_rule_t *
523 npf_ruleset_sharepm(npf_ruleset_t *rlset, npf_natpolicy_t *mnp)
524 {
525 npf_natpolicy_t *np;
526 npf_rule_t *rl;
527
528 /* Find a matching NAT policy in the old ruleset. */
529 LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
530 /*
531 * NAT policy might not yet be set during the creation of
532 * the ruleset (in such case, rule is for our policy), or
533 * policies might be equal due to rule exchange on reload.
534 */
535 np = rl->r_natp;
536 if (np == NULL || np == mnp)
537 continue;
538 if (npf_nat_sharepm(np, mnp))
539 break;
540 }
541 return rl;
542 }
543
544 npf_natpolicy_t *
545 npf_ruleset_findnat(npf_ruleset_t *rlset, uint64_t id)
546 {
547 npf_rule_t *rl;
548
549 LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
550 npf_natpolicy_t *np = rl->r_natp;
551 if (np && npf_nat_getid(np) == id) {
552 return np;
553 }
554 }
555 return NULL;
556 }
557
558 /*
559 * npf_ruleset_freealg: inspect the ruleset and disassociate specified
560 * ALG from all NAT entries using it.
561 */
562 void
563 npf_ruleset_freealg(npf_ruleset_t *rlset, npf_alg_t *alg)
564 {
565 npf_rule_t *rl;
566 npf_natpolicy_t *np;
567
568 LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
569 if ((np = rl->r_natp) != NULL) {
570 npf_nat_freealg(np, alg);
571 }
572 }
573 }
574
575 /*
576 * npf_rule_alloc: allocate a rule and initialise it.
577 */
578 npf_rule_t *
579 npf_rule_alloc(prop_dictionary_t rldict)
580 {
581 npf_rule_t *rl;
582 const char *rname;
583 prop_data_t d;
584
585 /* Allocate a rule structure. */
586 rl = kmem_zalloc(sizeof(npf_rule_t), KM_SLEEP);
587 TAILQ_INIT(&rl->r_subset);
588 rl->r_natp = NULL;
589
590 /* Name (optional) */
591 if (prop_dictionary_get_cstring_nocopy(rldict, "name", &rname)) {
592 strlcpy(rl->r_name, rname, NPF_RULE_MAXNAMELEN);
593 } else {
594 rl->r_name[0] = '\0';
595 }
596
597 /* Attributes, priority and interface ID (optional). */
598 prop_dictionary_get_uint32(rldict, "attr", &rl->r_attr);
599 prop_dictionary_get_int32(rldict, "prio", &rl->r_priority);
600 rl->r_attr &= ~NPF_RULE_PRIVMASK;
601
602 if (prop_dictionary_get_cstring_nocopy(rldict, "ifname", &rname)) {
603 if ((rl->r_ifid = npf_ifmap_register(rname)) == 0) {
604 kmem_free(rl, sizeof(npf_rule_t));
605 return NULL;
606 }
607 } else {
608 rl->r_ifid = 0;
609 }
610
611 /* Get the skip-to index. No need to validate it. */
612 prop_dictionary_get_uint32(rldict, "skip-to", &rl->r_skip_to);
613
614 /* Key (optional). */
615 prop_object_t obj = prop_dictionary_get(rldict, "key");
616 const void *key = prop_data_data_nocopy(obj);
617
618 if (key) {
619 size_t len = prop_data_size(obj);
620 if (len > NPF_RULE_MAXKEYLEN) {
621 kmem_free(rl, sizeof(npf_rule_t));
622 return NULL;
623 }
624 memcpy(rl->r_key, key, len);
625 }
626
627 if ((d = prop_dictionary_get(rldict, "info")) != NULL) {
628 rl->r_info = prop_data_copy(d);
629 }
630 return rl;
631 }
632
633 static int
634 npf_rule_export(const npf_ruleset_t *rlset, const npf_rule_t *rl,
635 prop_dictionary_t rldict)
636 {
637 u_int skip_to = 0;
638 prop_data_t d;
639
640 prop_dictionary_set_uint32(rldict, "attr", rl->r_attr);
641 prop_dictionary_set_int32(rldict, "prio", rl->r_priority);
642 if ((rl->r_skip_to & SKIPTO_ADJ_FLAG) == 0) {
643 skip_to = rl->r_skip_to & SKIPTO_MASK;
644 }
645 prop_dictionary_set_uint32(rldict, "skip-to", skip_to);
646 prop_dictionary_set_int32(rldict, "code-type", rl->r_type);
647 if (rl->r_code) {
648 d = prop_data_create_data(rl->r_code, rl->r_clen);
649 prop_dictionary_set_and_rel(rldict, "code", d);
650 }
651
652 if (rl->r_ifid) {
653 const char *ifname = npf_ifmap_getname(rl->r_ifid);
654 prop_dictionary_set_cstring(rldict, "ifname", ifname);
655 }
656 prop_dictionary_set_uint64(rldict, "id", rl->r_id);
657
658 if (rl->r_name[0]) {
659 prop_dictionary_set_cstring(rldict, "name", rl->r_name);
660 }
661 if (NPF_DYNAMIC_RULE_P(rl->r_attr)) {
662 d = prop_data_create_data(rl->r_key, NPF_RULE_MAXKEYLEN);
663 prop_dictionary_set_and_rel(rldict, "key", d);
664 }
665 if (rl->r_info) {
666 prop_dictionary_set(rldict, "info", rl->r_info);
667 }
668 return 0;
669 }
670
671 /*
672 * npf_rule_setcode: assign filter code to the rule.
673 *
674 * => The code must be validated by the caller.
675 * => JIT compilation may be performed here.
676 */
677 void
678 npf_rule_setcode(npf_rule_t *rl, const int type, void *code, size_t size)
679 {
680 KASSERT(type == NPF_CODE_BPF);
681
682 rl->r_type = type;
683 rl->r_code = code;
684 rl->r_clen = size;
685 rl->r_jcode = npf_bpf_compile(code, size);
686 }
687
688 /*
689 * npf_rule_setrproc: assign a rule procedure and hold a reference on it.
690 */
691 void
692 npf_rule_setrproc(npf_rule_t *rl, npf_rproc_t *rp)
693 {
694 npf_rproc_acquire(rp);
695 rl->r_rproc = rp;
696 }
697
698 /*
699 * npf_rule_free: free the specified rule.
700 */
701 void
702 npf_rule_free(npf_rule_t *rl)
703 {
704 npf_natpolicy_t *np = rl->r_natp;
705 npf_rproc_t *rp = rl->r_rproc;
706
707 if (np && (rl->r_attr & NPF_RULE_KEEPNAT) == 0) {
708 /* Free NAT policy. */
709 npf_nat_freepolicy(np);
710 }
711 if (rp) {
712 /* Release rule procedure. */
713 npf_rproc_release(rp);
714 }
715 if (rl->r_code) {
716 /* Free byte-code. */
717 kmem_free(rl->r_code, rl->r_clen);
718 }
719 if (rl->r_jcode) {
720 /* Free JIT code. */
721 bpf_jit_freecode(rl->r_jcode);
722 }
723 if (rl->r_info) {
724 prop_object_release(rl->r_info);
725 }
726 kmem_free(rl, sizeof(npf_rule_t));
727 }
728
729 /*
730 * npf_rule_getid: return the unique ID of a rule.
731 * npf_rule_getrproc: acquire a reference and return rule procedure, if any.
732 * npf_rule_getnat: get NAT policy assigned to the rule.
733 */
734
735 uint64_t
736 npf_rule_getid(const npf_rule_t *rl)
737 {
738 KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
739 return rl->r_id;
740 }
741
742 npf_rproc_t *
743 npf_rule_getrproc(const npf_rule_t *rl)
744 {
745 npf_rproc_t *rp = rl->r_rproc;
746
747 if (rp) {
748 npf_rproc_acquire(rp);
749 }
750 return rp;
751 }
752
753 npf_natpolicy_t *
754 npf_rule_getnat(const npf_rule_t *rl)
755 {
756 return rl->r_natp;
757 }
758
759 /*
760 * npf_rule_setnat: assign NAT policy to the rule and insert into the
761 * NAT policy list in the ruleset.
762 */
763 void
764 npf_rule_setnat(npf_rule_t *rl, npf_natpolicy_t *np)
765 {
766 KASSERT(rl->r_natp == NULL);
767 rl->r_natp = np;
768 }
769
770 /*
771 * npf_rule_inspect: match the interface, direction and run the filter code.
772 * Returns true if rule matches and false otherwise.
773 */
774 static inline bool
775 npf_rule_inspect(const npf_rule_t *rl, bpf_args_t *bc_args,
776 const int di_mask, const u_int ifid)
777 {
778 /* Match the interface. */
779 if (rl->r_ifid && rl->r_ifid != ifid) {
780 return false;
781 }
782
783 /* Match the direction. */
784 if ((rl->r_attr & NPF_RULE_DIMASK) != NPF_RULE_DIMASK) {
785 if ((rl->r_attr & di_mask) == 0)
786 return false;
787 }
788
789 /* Any code? */
790 if (!rl->r_code) {
791 KASSERT(rl->r_jcode == NULL);
792 return true;
793 }
794 KASSERT(rl->r_type == NPF_CODE_BPF);
795 return npf_bpf_filter(bc_args, rl->r_code, rl->r_jcode) != 0;
796 }
797
798 /*
799 * npf_rule_reinspect: re-inspect the dynamic rule by iterating its list.
800 * This is only for the dynamic rules. Subrules cannot have nested rules.
801 */
802 static npf_rule_t *
803 npf_rule_reinspect(const npf_rule_t *drl, bpf_args_t *bc_args,
804 const int di_mask, const u_int ifid)
805 {
806 npf_rule_t *final_rl = NULL, *rl;
807
808 KASSERT(NPF_DYNAMIC_GROUP_P(drl->r_attr));
809
810 TAILQ_FOREACH(rl, &drl->r_subset, r_entry) {
811 if (!npf_rule_inspect(rl, bc_args, di_mask, ifid)) {
812 continue;
813 }
814 if (rl->r_attr & NPF_RULE_FINAL) {
815 return rl;
816 }
817 final_rl = rl;
818 }
819 return final_rl;
820 }
821
822 /*
823 * npf_ruleset_inspect: inspect the packet against the given ruleset.
824 *
825 * Loop through the rules in the set and run the byte-code of each rule
826 * against the packet (nbuf chain). If sub-ruleset is found, inspect it.
827 */
828 npf_rule_t *
829 npf_ruleset_inspect(npf_cache_t *npc, const npf_ruleset_t *rlset,
830 const int di, const int layer)
831 {
832 nbuf_t *nbuf = npc->npc_nbuf;
833 const int di_mask = (di & PFIL_IN) ? NPF_RULE_IN : NPF_RULE_OUT;
834 const u_int nitems = rlset->rs_nitems;
835 const u_int ifid = nbuf->nb_ifid;
836 npf_rule_t *final_rl = NULL;
837 bpf_args_t bc_args;
838 u_int n = 0;
839
840 KASSERT(((di & PFIL_IN) != 0) ^ ((di & PFIL_OUT) != 0));
841
842 /*
843 * Prepare the external memory store and the arguments for
844 * the BPF programs to be executed.
845 */
846 uint32_t bc_words[NPF_BPF_NWORDS];
847 npf_bpf_prepare(npc, &bc_args, bc_words);
848
849 while (n < nitems) {
850 npf_rule_t *rl = rlset->rs_rules[n];
851 const u_int skip_to = rl->r_skip_to & SKIPTO_MASK;
852 const uint32_t attr = rl->r_attr;
853
854 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
855 KASSERT(!final_rl || rl->r_priority >= final_rl->r_priority);
856 KASSERT(n < skip_to);
857
858 /* Group is a barrier: return a matching if found any. */
859 if ((attr & NPF_RULE_GROUP) != 0 && final_rl) {
860 break;
861 }
862
863 /* Main inspection of the rule. */
864 if (!npf_rule_inspect(rl, &bc_args, di_mask, ifid)) {
865 n = skip_to;
866 continue;
867 }
868
869 if (NPF_DYNAMIC_GROUP_P(attr)) {
870 /*
871 * If this is a dynamic rule, re-inspect the subrules.
872 * If it has any matching rule, then it is final.
873 */
874 rl = npf_rule_reinspect(rl, &bc_args, di_mask, ifid);
875 if (rl != NULL) {
876 final_rl = rl;
877 break;
878 }
879 } else if ((attr & NPF_RULE_GROUP) == 0) {
880 /*
881 * Groups themselves are not matching.
882 */
883 final_rl = rl;
884 }
885
886 /* Set the matching rule and check for "final". */
887 if (attr & NPF_RULE_FINAL) {
888 break;
889 }
890 n++;
891 }
892
893 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
894 return final_rl;
895 }
896
897 /*
898 * npf_rule_conclude: return decision and the flags for conclusion.
899 *
900 * => Returns ENETUNREACH if "block" and 0 if "pass".
901 */
902 int
903 npf_rule_conclude(const npf_rule_t *rl, int *retfl)
904 {
905 /* If not passing - drop the packet. */
906 *retfl = rl->r_attr;
907 return (rl->r_attr & NPF_RULE_PASS) ? 0 : ENETUNREACH;
908 }
909