npf_ruleset.c revision 1.16 1 /* $NetBSD: npf_ruleset.c,v 1.16 2013/01/20 18:45:56 rmind Exp $ */
2
3 /*-
4 * Copyright (c) 2009-2012 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This material is based upon work partially supported by The
8 * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * NPF ruleset module.
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: npf_ruleset.c,v 1.16 2013/01/20 18:45:56 rmind Exp $");
38
39 #include <sys/param.h>
40 #include <sys/types.h>
41
42 #include <sys/kmem.h>
43 #include <sys/queue.h>
44 #include <sys/types.h>
45
46 #include <net/pfil.h>
47 #include <net/if.h>
48
49 #include "npf_ncode.h"
50 #include "npf_impl.h"
51
52 /* Ruleset structure (queue and default rule). */
53 struct npf_ruleset {
54 TAILQ_HEAD(, npf_rule) rs_queue;
55 npf_rule_t * rs_default;
56 };
57
58 #define NPF_RNAME_LEN 16
59
60 /* Rule structure. */
61 struct npf_rule {
62 /* Rule name (optional) and list entry. */
63 char r_name[NPF_RNAME_LEN];
64 TAILQ_ENTRY(npf_rule) r_entry;
65 /* Optional: sub-ruleset, NAT policy. */
66 npf_ruleset_t r_subset;
67 npf_natpolicy_t * r_natp;
68 /* Rule priority: (highest) 0, 1, 2 ... n (lowest). */
69 pri_t r_priority;
70 /* N-code to process. */
71 void * r_ncode;
72 size_t r_nc_size;
73 /* Attributes of this rule. */
74 uint32_t r_attr;
75 /* Interface. */
76 u_int r_ifid;
77 /* Rule procedure data. */
78 npf_rproc_t * r_rproc;
79 };
80
81 npf_ruleset_t *
82 npf_ruleset_create(void)
83 {
84 npf_ruleset_t *rlset;
85
86 rlset = kmem_zalloc(sizeof(npf_ruleset_t), KM_SLEEP);
87 TAILQ_INIT(&rlset->rs_queue);
88 return rlset;
89 }
90
91 void
92 npf_ruleset_destroy(npf_ruleset_t *rlset)
93 {
94 npf_rule_t *rl;
95
96 while ((rl = TAILQ_FIRST(&rlset->rs_queue)) != NULL) {
97 TAILQ_REMOVE(&rlset->rs_queue, rl, r_entry);
98 npf_rule_free(rl);
99 }
100 kmem_free(rlset, sizeof(npf_ruleset_t));
101 }
102
103 /*
104 * npf_ruleset_insert: insert the rule into the specified ruleset.
105 *
106 * Note: multiple rules at the same priority are allowed.
107 */
108 void
109 npf_ruleset_insert(npf_ruleset_t *rlset, npf_rule_t *rl)
110 {
111 npf_rule_t *it;
112
113 if (rl->r_attr & NPF_RULE_DEFAULT) {
114 rlset->rs_default = rl;
115 return;
116 }
117 TAILQ_FOREACH(it, &rlset->rs_queue, r_entry) {
118 /* Rule priority: (highest) 0, 1, 2, 4 ... n (lowest). */
119 if (it->r_priority > rl->r_priority)
120 break;
121 }
122 if (it == NULL) {
123 TAILQ_INSERT_TAIL(&rlset->rs_queue, rl, r_entry);
124 } else {
125 TAILQ_INSERT_BEFORE(it, rl, r_entry);
126 }
127 }
128
129 /*
130 * npf_ruleset_matchnat: find a matching NAT policy in the ruleset.
131 */
132 npf_rule_t *
133 npf_ruleset_matchnat(npf_ruleset_t *rlset, npf_natpolicy_t *mnp)
134 {
135 npf_rule_t *rl;
136
137 /* Find a matching NAT policy in the old ruleset. */
138 TAILQ_FOREACH(rl, &rlset->rs_queue, r_entry) {
139 if (npf_nat_matchpolicy(rl->r_natp, mnp))
140 break;
141 }
142 return rl;
143 }
144
145 npf_rule_t *
146 npf_ruleset_sharepm(npf_ruleset_t *rlset, npf_natpolicy_t *mnp)
147 {
148 npf_natpolicy_t *np;
149 npf_rule_t *rl;
150
151 /* Find a matching NAT policy in the old ruleset. */
152 TAILQ_FOREACH(rl, &rlset->rs_queue, r_entry) {
153 /*
154 * NAT policy might not yet be set during the creation of
155 * the ruleset (in such case, rule is for our policy), or
156 * policies might be equal due to rule exchange on reload.
157 */
158 np = rl->r_natp;
159 if (np == NULL || np == mnp)
160 continue;
161 if (npf_nat_sharepm(np, mnp))
162 break;
163 }
164 return rl;
165 }
166
167 /*
168 * npf_ruleset_freealg: inspect the ruleset and disassociate specified
169 * ALG from all NAT entries using it.
170 */
171 void
172 npf_ruleset_freealg(npf_ruleset_t *rlset, npf_alg_t *alg)
173 {
174 npf_rule_t *rl;
175
176 KASSERT(npf_core_locked());
177
178 TAILQ_FOREACH(rl, &rlset->rs_queue, r_entry) {
179 npf_natpolicy_t *np = rl->r_natp;
180
181 if (np != NULL) {
182 npf_nat_freealg(np, alg);
183 }
184 }
185 }
186
187 /*
188 * npf_ruleset_natreload: minimum reload of NAT policies by maching
189 * two (active and new) NAT rulesets.
190 *
191 * => Active ruleset should be exclusively locked.
192 */
193 void
194 npf_ruleset_natreload(npf_ruleset_t *nrlset, npf_ruleset_t *arlset)
195 {
196 npf_natpolicy_t *np, *anp;
197 npf_rule_t *rl, *arl;
198
199 KASSERT(npf_core_locked());
200
201 /* Scan a new NAT ruleset against NAT policies in old ruleset. */
202 TAILQ_FOREACH(rl, &nrlset->rs_queue, r_entry) {
203 np = rl->r_natp;
204 arl = npf_ruleset_matchnat(arlset, np);
205 if (arl == NULL) {
206 continue;
207 }
208 /* On match - we exchange NAT policies. */
209 anp = arl->r_natp;
210 rl->r_natp = anp;
211 arl->r_natp = np;
212 /* Update other NAT policies to share portmap. */
213 (void)npf_ruleset_sharepm(nrlset, anp);
214 }
215 }
216
217 /*
218 * npf_rule_alloc: allocate a rule and copy n-code from user-space.
219 *
220 * => N-code should be validated by the caller.
221 */
222 npf_rule_t *
223 npf_rule_alloc(prop_dictionary_t rldict, npf_rproc_t *rp,
224 void *nc, size_t nc_size)
225 {
226 npf_rule_t *rl;
227 const char *rname;
228 int errat __unused;
229
230 /* Allocate a rule structure. */
231 rl = kmem_zalloc(sizeof(npf_rule_t), KM_SLEEP);
232 TAILQ_INIT(&rl->r_subset.rs_queue);
233 rl->r_natp = NULL;
234
235 /* N-code. */
236 KASSERT(nc == NULL || npf_ncode_validate(nc, nc_size, &errat) == 0);
237 rl->r_ncode = nc;
238 rl->r_nc_size = nc_size;
239
240 /* Name (optional) */
241 if (prop_dictionary_get_cstring_nocopy(rldict, "name", &rname)) {
242 strlcpy(rl->r_name, rname, NPF_RNAME_LEN);
243 } else {
244 rl->r_name[0] = '\0';
245 }
246
247 /* Attributes, priority and interface ID (optional). */
248 prop_dictionary_get_uint32(rldict, "attributes", &rl->r_attr);
249 prop_dictionary_get_int32(rldict, "priority", &rl->r_priority);
250 prop_dictionary_get_uint32(rldict, "interface", &rl->r_ifid);
251
252 /* Rule procedure. */
253 if (rp) {
254 npf_rproc_acquire(rp);
255 }
256 rl->r_rproc = rp;
257
258 return rl;
259 }
260
261 /*
262 * npf_rule_free: free the specified rule.
263 */
264 void
265 npf_rule_free(npf_rule_t *rl)
266 {
267 npf_natpolicy_t *np = rl->r_natp;
268 npf_rproc_t *rp = rl->r_rproc;
269
270 if (np) {
271 /* Free NAT policy. */
272 npf_nat_freepolicy(np);
273 }
274 if (rp) {
275 /* Release rule procedure. */
276 npf_rproc_release(rp);
277 }
278 if (rl->r_ncode) {
279 /* Free n-code. */
280 npf_ncode_free(rl->r_ncode, rl->r_nc_size);
281 }
282 kmem_free(rl, sizeof(npf_rule_t));
283 }
284
285 /*
286 * npf_rule_subset: return sub-ruleset, if any.
287 * npf_rule_getrproc: acquire a reference and return rule procedure, if any.
288 * npf_rule_getnat: get NAT policy assigned to the rule.
289 */
290
291 npf_ruleset_t *
292 npf_rule_subset(npf_rule_t *rl)
293 {
294 return &rl->r_subset;
295 }
296
297 npf_rproc_t *
298 npf_rule_getrproc(npf_rule_t *rl)
299 {
300 npf_rproc_t *rp = rl->r_rproc;
301
302 KASSERT(npf_core_locked());
303 if (rp) {
304 npf_rproc_acquire(rp);
305 }
306 return rp;
307 }
308
309 npf_natpolicy_t *
310 npf_rule_getnat(const npf_rule_t *rl)
311 {
312 return rl->r_natp;
313 }
314
315 /*
316 * npf_rule_setnat: assign NAT policy to the rule and insert into the
317 * NAT policy list in the ruleset.
318 */
319 void
320 npf_rule_setnat(npf_rule_t *rl, npf_natpolicy_t *np)
321 {
322
323 KASSERT(rl->r_natp == NULL);
324 rl->r_natp = np;
325 }
326
327 npf_rule_t *
328 npf_ruleset_replace(const char *name, npf_ruleset_t *rlset)
329 {
330 npf_ruleset_t orlset;
331 npf_rule_t *rl;
332
333 npf_core_enter(); /* XXX */
334 rlset = npf_core_ruleset();
335 TAILQ_FOREACH(rl, &rlset->rs_queue, r_entry) {
336 if (rl->r_name[0] == '\0')
337 continue;
338 if (strncmp(rl->r_name, name, NPF_RNAME_LEN))
339 continue;
340 memcpy(&orlset, &rl->r_subset, sizeof(npf_ruleset_t));
341 break;
342 }
343 npf_core_exit();
344 return rl;
345 }
346
347 /*
348 * npf_ruleset_inspect: inspect the packet against the given ruleset.
349 *
350 * Loop through the rules in the set and run n-code processor of each rule
351 * against the packet (nbuf chain). If sub-ruleset is found, inspect it.
352 *
353 * => Caller is responsible for nbuf chain protection.
354 */
355 npf_rule_t *
356 npf_ruleset_inspect(npf_cache_t *npc, nbuf_t *nbuf,
357 const npf_ruleset_t *mainrlset, const int di, const int layer)
358 {
359 const ifnet_t *ifp = nbuf->nb_ifp;
360 const int di_mask = (di & PFIL_IN) ? NPF_RULE_IN : NPF_RULE_OUT;
361 const npf_ruleset_t *rlset = mainrlset;
362 npf_rule_t *final_rl = NULL, *rl;
363 bool defed = false;
364
365 KASSERT(ifp != NULL);
366 KASSERT(npf_core_locked());
367 KASSERT(((di & PFIL_IN) != 0) ^ ((di & PFIL_OUT) != 0));
368 again:
369 TAILQ_FOREACH(rl, &rlset->rs_queue, r_entry) {
370 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
371 KASSERT(!final_rl || rl->r_priority >= final_rl->r_priority);
372
373 /* Match the interface. */
374 if (rl->r_ifid && rl->r_ifid != ifp->if_index) {
375 continue;
376 }
377 /* Match the direction. */
378 if ((rl->r_attr & NPF_RULE_DIMASK) != NPF_RULE_DIMASK) {
379 if ((rl->r_attr & di_mask) == 0)
380 continue;
381 }
382 /* Process the n-code, if any. */
383 const void *nc = rl->r_ncode;
384 if (nc && npf_ncode_process(npc, nc, nbuf, layer)) {
385 continue;
386 }
387 /* Set the matching rule and check for "final". */
388 final_rl = rl;
389 if (rl->r_attr & NPF_RULE_FINAL) {
390 break;
391 }
392 }
393
394 /* If no final rule, then - default. */
395 if (final_rl == NULL && !defed) {
396 final_rl = mainrlset->rs_default;
397 defed = true;
398 }
399 /* Inspect the sub-ruleset, if any. */
400 if (final_rl && !TAILQ_EMPTY(&final_rl->r_subset.rs_queue)) {
401 rlset = &final_rl->r_subset;
402 final_rl = NULL;
403 goto again;
404 }
405
406 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
407 return final_rl;
408 }
409
410 /*
411 * npf_rule_apply: apply the rule and return appropriate value.
412 *
413 * => Returns ENETUNREACH if "block" and 0 if "pass".
414 * => Releases the ruleset lock.
415 */
416 int
417 npf_rule_apply(npf_cache_t *npc, nbuf_t *nbuf, npf_rule_t *rl, int *retfl)
418 {
419 int error;
420
421 KASSERT(npf_core_locked());
422
423 /* If not passing - drop the packet. */
424 error = (rl->r_attr & NPF_RULE_PASS) ? 0 : ENETUNREACH;
425
426 *retfl = rl->r_attr;
427 npf_core_exit();
428
429 return error;
430 }
431
432 #if defined(DDB) || defined(_NPF_TESTING)
433
434 void
435 npf_rulenc_dump(const npf_rule_t *rl)
436 {
437 const uint32_t *op = rl->r_ncode;
438 size_t n = rl->r_nc_size;
439
440 while (n) {
441 printf("\t> |0x%02x|\n", (uint32_t)*op);
442 op++;
443 n -= sizeof(*op);
444 }
445 printf("-> %s\n", (rl->r_attr & NPF_RULE_PASS) ? "pass" : "block");
446 }
447
448 #endif
449