npf_ruleset.c revision 1.4 1 /* $NetBSD: npf_ruleset.c,v 1.4 2010/12/18 01:07:25 rmind Exp $ */
2
3 /*-
4 * Copyright (c) 2009-2010 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This material is based upon work partially supported by The
8 * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * NPF ruleset module.
34 */
35
36 #ifdef _KERNEL
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: npf_ruleset.c,v 1.4 2010/12/18 01:07:25 rmind Exp $");
39
40 #include <sys/param.h>
41 #include <sys/kernel.h>
42
43 #include <sys/atomic.h>
44 #include <sys/kmem.h>
45 #include <sys/pool.h>
46 #include <sys/queue.h>
47 #include <sys/types.h>
48
49 #include <net/pfil.h>
50 #include <net/if.h>
51 #endif
52
53 #include "npf_ncode.h"
54 #include "npf_impl.h"
55
56 /* Ruleset structre (queue and default rule). */
57 struct npf_ruleset {
58 TAILQ_HEAD(, npf_rule) rs_queue;
59 npf_rule_t * rs_default;
60 };
61
62 /* Rule hook entry. */
63 struct npf_hook {
64 void (*hk_fn)(npf_cache_t *, nbuf_t *, void *);
65 void * hk_arg;
66 LIST_ENTRY(npf_hook) hk_entry;
67 };
68
69 /* Rule processing structure. */
70 struct npf_rproc {
71 /* Reference count. */
72 u_int rp_refcnt;
73 /* Normalization options. */
74 bool rp_rnd_ipid;
75 bool rp_no_df;
76 u_int rp_minttl;
77 u_int rp_maxmss;
78 /* Logging interface. */
79 u_int rp_log_ifid;
80 };
81
82 /* Rule structure. */
83 struct npf_rule {
84 TAILQ_ENTRY(npf_rule) r_entry;
85 /* Optional: sub-ruleset, NAT policy. */
86 npf_ruleset_t r_subset;
87 npf_natpolicy_t * r_natp;
88 /* Rule priority: (highest) 0, 1, 2 ... n (lowest). */
89 u_int r_priority;
90 /* N-code to process. */
91 void * r_ncode;
92 size_t r_nc_size;
93 /* Attributes of this rule. */
94 uint32_t r_attr;
95 /* Interface. */
96 u_int r_ifid;
97 /* Hit counter. */
98 u_long r_hitcount;
99 /* Rule processing data. */
100 npf_rproc_t * r_rproc;
101 /* List of hooks to process on match. */
102 kmutex_t r_hooks_lock;
103 LIST_HEAD(, npf_hook) r_hooks;
104 };
105
106 npf_ruleset_t *
107 npf_ruleset_create(void)
108 {
109 npf_ruleset_t *rlset;
110
111 rlset = kmem_zalloc(sizeof(npf_ruleset_t), KM_SLEEP);
112 TAILQ_INIT(&rlset->rs_queue);
113 return rlset;
114 }
115
116 void
117 npf_ruleset_destroy(npf_ruleset_t *rlset)
118 {
119 npf_rule_t *rl;
120
121 while ((rl = TAILQ_FIRST(&rlset->rs_queue)) != NULL) {
122 TAILQ_REMOVE(&rlset->rs_queue, rl, r_entry);
123 npf_rule_free(rl);
124 }
125 kmem_free(rlset, sizeof(npf_ruleset_t));
126 }
127
128 /*
129 * npf_ruleset_insert: insert the rule into the specified ruleset.
130 *
131 * Note: multiple rules at the same priority are allowed.
132 */
133 void
134 npf_ruleset_insert(npf_ruleset_t *rlset, npf_rule_t *rl)
135 {
136 npf_rule_t *it;
137
138 if (rl->r_attr & NPF_RULE_DEFAULT) {
139 rlset->rs_default = rl;
140 return;
141 }
142 TAILQ_FOREACH(it, &rlset->rs_queue, r_entry) {
143 /* Rule priority: (highest) 0, 1, 2, 4 ... n (lowest). */
144 if (it->r_priority > rl->r_priority)
145 break;
146 }
147 if (it == NULL) {
148 TAILQ_INSERT_TAIL(&rlset->rs_queue, rl, r_entry);
149 } else {
150 TAILQ_INSERT_BEFORE(it, rl, r_entry);
151 }
152 }
153
154 /*
155 * npf_ruleset_matchnat: find a matching NAT policy in the ruleset.
156 */
157 npf_rule_t *
158 npf_ruleset_matchnat(npf_ruleset_t *rlset, npf_natpolicy_t *mnp)
159 {
160 npf_rule_t *rl;
161
162 /* Find a matching NAT policy in the old ruleset. */
163 TAILQ_FOREACH(rl, &rlset->rs_queue, r_entry) {
164 if (npf_nat_matchpolicy(rl->r_natp, mnp))
165 break;
166 }
167 return rl;
168 }
169
170 /*
171 * npf_ruleset_natreload: minimum reload of NAT policies by maching
172 * two (active and new) NAT rulesets.
173 *
174 * => Active ruleset should be exclusively locked.
175 */
176 void
177 npf_ruleset_natreload(npf_ruleset_t *nrlset, npf_ruleset_t *arlset)
178 {
179 npf_natpolicy_t *np, *anp;
180 npf_rule_t *rl, *arl;
181
182 KASSERT(npf_core_locked());
183
184 /* Scan a new NAT ruleset against NAT policies in old ruleset. */
185 TAILQ_FOREACH(rl, &nrlset->rs_queue, r_entry) {
186 np = rl->r_natp;
187 arl = npf_ruleset_matchnat(arlset, np);
188 if (arl == NULL) {
189 continue;
190 }
191 /* On match - we exchange NAT policies. */
192 anp = arl->r_natp;
193 rl->r_natp = anp;
194 arl->r_natp = np;
195 }
196 }
197
198 npf_rproc_t *
199 npf_rproc_create(prop_dictionary_t rpdict)
200 {
201 npf_rproc_t *rp;
202 prop_object_t obj;
203
204 rp = kmem_alloc(sizeof(npf_rproc_t), KM_SLEEP);
205 rp->rp_refcnt = 1;
206
207 /* Logging interface ID (integer). */
208 obj = prop_dictionary_get(rpdict, "log-interface");
209 rp->rp_log_ifid = prop_number_integer_value(obj);
210
211 /* Randomize IP ID (bool). */
212 obj = prop_dictionary_get(rpdict, "randomize-id");
213 rp->rp_rnd_ipid = prop_bool_true(obj);
214
215 /* IP_DF flag cleansing (bool). */
216 obj = prop_dictionary_get(rpdict, "no-df");
217 rp->rp_no_df = prop_bool_true(obj);
218
219 /* Minimum IP TTL (integer). */
220 obj = prop_dictionary_get(rpdict, "min-ttl");
221 rp->rp_minttl = prop_number_integer_value(obj);
222
223 /* Maximum TCP MSS (integer). */
224 obj = prop_dictionary_get(rpdict, "max-mss");
225 rp->rp_maxmss = prop_number_integer_value(obj);
226
227 return rp;
228 }
229
230 npf_rproc_t *
231 npf_rproc_return(npf_rule_t *rl)
232 {
233 npf_rproc_t *rp = rl->r_rproc;
234
235 if (rp) {
236 atomic_inc_uint(&rp->rp_refcnt);
237 }
238 return rp;
239 }
240
241 void
242 npf_rproc_release(npf_rproc_t *rp)
243 {
244
245 /* Destroy on last reference. */
246 if (atomic_dec_uint_nv(&rp->rp_refcnt) != 0) {
247 return;
248 }
249 kmem_free(rp, sizeof(npf_rproc_t));
250 }
251
252 void
253 npf_rproc_run(npf_cache_t *npc, nbuf_t *nbuf, npf_rproc_t *rp)
254 {
255
256 KASSERT(rp->rp_refcnt > 0);
257
258 /* Normalize the packet, if required. */
259 (void)npf_normalize(npc, nbuf,
260 rp->rp_rnd_ipid, rp->rp_no_df, rp->rp_minttl, rp->rp_maxmss);
261
262 /* Log packet, if required. */
263 if (rp->rp_log_ifid) {
264 npf_log_packet(npc, nbuf, rp->rp_log_ifid);
265 }
266
267 }
268
269 /*
270 * npf_rule_alloc: allocate a rule and copy ncode from user-space.
271 *
272 * => N-code should be validated by the caller.
273 */
274 npf_rule_t *
275 npf_rule_alloc(prop_dictionary_t rldict, void *nc, size_t nc_size)
276 {
277 npf_rule_t *rl;
278 prop_object_t obj;
279 int errat;
280
281 /* Allocate a rule structure. */
282 rl = kmem_alloc(sizeof(npf_rule_t), KM_SLEEP);
283 TAILQ_INIT(&rl->r_subset.rs_queue);
284 mutex_init(&rl->r_hooks_lock, MUTEX_DEFAULT, IPL_SOFTNET);
285 LIST_INIT(&rl->r_hooks);
286 rl->r_hitcount = 0;
287 rl->r_natp = NULL;
288
289 /* N-code. */
290 KASSERT(nc == NULL || npf_ncode_validate(nc, nc_size, &errat) == 0);
291 rl->r_ncode = nc;
292 rl->r_nc_size = nc_size;
293
294 /* Attributes (integer). */
295 obj = prop_dictionary_get(rldict, "attributes");
296 rl->r_attr = prop_number_integer_value(obj);
297
298 /* Priority (integer). */
299 obj = prop_dictionary_get(rldict, "priority");
300 rl->r_priority = prop_number_integer_value(obj);
301
302 /* Interface ID (integer). */
303 obj = prop_dictionary_get(rldict, "interface");
304 rl->r_ifid = prop_number_integer_value(obj);
305
306 /* Create rule processing structure, if any. */
307 if (rl->r_attr & (NPF_RULE_LOG | NPF_RULE_NORMALIZE)) {
308 rl->r_rproc = npf_rproc_create(rldict);
309 } else {
310 rl->r_rproc = NULL;
311 }
312 return rl;
313 }
314
315 /*
316 * npf_rule_free: free the specified rule.
317 */
318 void
319 npf_rule_free(npf_rule_t *rl)
320 {
321 npf_natpolicy_t *np = rl->r_natp;
322 npf_rproc_t *rp = rl->r_rproc;
323
324 if (np) {
325 /* Free NAT policy. */
326 npf_nat_freepolicy(np);
327 }
328 if (rp) {
329 /* Release/free rule processing structure. */
330 npf_rproc_release(rp);
331 }
332 if (rl->r_ncode) {
333 /* Free n-code. */
334 npf_ncode_free(rl->r_ncode, rl->r_nc_size);
335 }
336 mutex_destroy(&rl->r_hooks_lock);
337 kmem_free(rl, sizeof(npf_rule_t));
338 }
339
340 /*
341 * npf_rule_subset: return sub-ruleset, if any.
342 * npf_rule_getnat: get NAT policy assigned to the rule.
343 */
344
345 npf_ruleset_t *
346 npf_rule_subset(npf_rule_t *rl)
347 {
348 return &rl->r_subset;
349 }
350
351 npf_natpolicy_t *
352 npf_rule_getnat(const npf_rule_t *rl)
353 {
354 return rl->r_natp;
355 }
356
357 /*
358 * npf_rule_setnat: assign NAT policy to the rule and insert into the
359 * NAT policy list in the ruleset.
360 */
361 void
362 npf_rule_setnat(npf_rule_t *rl, npf_natpolicy_t *np)
363 {
364
365 KASSERT(rl->r_natp == NULL);
366 rl->r_natp = np;
367 }
368
369 /*
370 * npf_hook_register: register action hook in the rule.
371 */
372 npf_hook_t *
373 npf_hook_register(npf_rule_t *rl,
374 void (*fn)(npf_cache_t *, nbuf_t *, void *), void *arg)
375 {
376 npf_hook_t *hk;
377
378 hk = kmem_alloc(sizeof(npf_hook_t), KM_SLEEP);
379 if (hk != NULL) {
380 hk->hk_fn = fn;
381 hk->hk_arg = arg;
382 mutex_enter(&rl->r_hooks_lock);
383 LIST_INSERT_HEAD(&rl->r_hooks, hk, hk_entry);
384 mutex_exit(&rl->r_hooks_lock);
385 }
386 return hk;
387 }
388
389 /*
390 * npf_hook_unregister: unregister a specified hook.
391 *
392 * => Hook should have been registered in the rule.
393 */
394 void
395 npf_hook_unregister(npf_rule_t *rl, npf_hook_t *hk)
396 {
397
398 mutex_enter(&rl->r_hooks_lock);
399 LIST_REMOVE(hk, hk_entry);
400 mutex_exit(&rl->r_hooks_lock);
401 kmem_free(hk, sizeof(npf_hook_t));
402 }
403
404 /*
405 * npf_ruleset_match: inspect the packet against the given ruleset.
406 *
407 * Loop for each rule in the set and run n-code processor of each rule
408 * against the packet (nbuf chain).
409 */
410 npf_rule_t *
411 npf_ruleset_match(npf_ruleset_t *rlset, npf_cache_t *npc, nbuf_t *nbuf,
412 struct ifnet *ifp, const int di, const int layer)
413 {
414 npf_rule_t *final_rl = NULL, *rl;
415
416 KASSERT(((di & PFIL_IN) != 0) ^ ((di & PFIL_OUT) != 0));
417
418 TAILQ_FOREACH(rl, &rlset->rs_queue, r_entry) {
419 KASSERT(!final_rl || rl->r_priority >= final_rl->r_priority);
420
421 /* Match the interface. */
422 if (rl->r_ifid && rl->r_ifid != ifp->if_index) {
423 continue;
424 }
425 /* Match the direction. */
426 if ((rl->r_attr & NPF_RULE_DIMASK) != NPF_RULE_DIMASK) {
427 const int di_mask =
428 (di & PFIL_IN) ? NPF_RULE_IN : NPF_RULE_OUT;
429
430 if ((rl->r_attr & di_mask) == 0)
431 continue;
432 }
433 /* Process the n-code, if any. */
434 const void *nc = rl->r_ncode;
435 if (nc && npf_ncode_process(npc, nc, nbuf, layer)) {
436 continue;
437 }
438 /* Set the matching rule and check for "final". */
439 final_rl = rl;
440 if (rl->r_attr & NPF_RULE_FINAL) {
441 break;
442 }
443 }
444 return final_rl;
445 }
446
447 /*
448 * npf_ruleset_inspect: inspection of the main ruleset for filtering.
449 * If sub-ruleset is found, inspect it.
450 *
451 * => If found, ruleset is kept read-locked.
452 * => Caller should protect the nbuf chain.
453 */
454 npf_rule_t *
455 npf_ruleset_inspect(npf_cache_t *npc, nbuf_t *nbuf,
456 struct ifnet *ifp, const int di, const int layer)
457 {
458 npf_ruleset_t *rlset;
459 npf_rule_t *rl;
460 bool defed;
461
462 defed = false;
463 npf_core_enter();
464 rlset = npf_core_ruleset();
465 reinspect:
466 rl = npf_ruleset_match(rlset, npc, nbuf, ifp, di, layer);
467
468 /* If no final rule, then - default. */
469 if (rl == NULL && !defed) {
470 npf_ruleset_t *mainrlset = npf_core_ruleset();
471 rl = mainrlset->rs_default;
472 defed = true;
473 }
474 /* Inspect the sub-ruleset, if any. */
475 if (rl && !TAILQ_EMPTY(&rl->r_subset.rs_queue)) {
476 rlset = &rl->r_subset;
477 goto reinspect;
478 }
479 if (rl == NULL) {
480 npf_core_exit();
481 }
482 return rl;
483 }
484
485 /*
486 * npf_rule_apply: apply the rule i.e. run hooks and return appropriate value.
487 *
488 * => Returns ENETUNREACH if "block" and 0 if "pass".
489 * => Releases the ruleset lock.
490 */
491 int
492 npf_rule_apply(npf_cache_t *npc, nbuf_t *nbuf, npf_rule_t *rl, int *retfl)
493 {
494 npf_hook_t *hk;
495 int error;
496
497 KASSERT(npf_core_locked());
498
499 /* Update the "hit" counter. */
500 if (rl->r_attr & NPF_RULE_COUNT) {
501 atomic_inc_ulong(&rl->r_hitcount);
502 }
503
504 /* If not passing - drop the packet. */
505 if ((rl->r_attr & NPF_RULE_PASS) == 0) {
506 error = ENETUNREACH;
507 goto done;
508 }
509 error = 0;
510
511 /* Passing. Run the hooks. */
512 LIST_FOREACH(hk, &rl->r_hooks, hk_entry) {
513 KASSERT(hk->hk_fn != NULL);
514 (*hk->hk_fn)(npc, nbuf, hk->hk_arg);
515 }
516 done:
517 *retfl = rl->r_attr;
518 npf_core_exit();
519 return error;
520 }
521
522 #if defined(DDB) || defined(_NPF_TESTING)
523
524 void
525 npf_rulenc_dump(npf_rule_t *rl)
526 {
527 uint32_t *op = rl->r_ncode;
528 size_t n = rl->r_nc_size;
529
530 while (n) {
531 printf("\t> |0x%02x|\n", (uint32_t)*op);
532 op++;
533 n -= sizeof(*op);
534 }
535 printf("-> %s\n", (rl->r_attr & NPF_RULE_PASS) ? "pass" : "block");
536 }
537
538 #endif
539