pf_table.c revision 1.1.1.4 1 /* $NetBSD: pf_table.c,v 1.1.1.4 2009/12/01 07:03:15 martti Exp $ */
2 /* $OpenBSD: pf_table.c,v 1.70 2007/05/23 11:53:45 markus Exp $ */
3
4 /*
5 * Copyright (c) 2002 Cedric Berger
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * - Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * - Redistributions in binary form must reproduce the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer in the documentation and/or other materials provided
17 * with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/socket.h>
37 #include <sys/mbuf.h>
38 #include <sys/kernel.h>
39
40 #include <net/if.h>
41 #include <net/route.h>
42 #include <netinet/in.h>
43 #include <netinet/ip_ipsp.h>
44 #include <net/pfvar.h>
45
46 #define ACCEPT_FLAGS(flags, oklist) \
47 do { \
48 if ((flags & ~(oklist)) & \
49 PFR_FLAG_ALLMASK) \
50 return (EINVAL); \
51 } while (0)
52
53 #define COPYIN(from, to, size, flags) \
54 ((flags & PFR_FLAG_USERIOCTL) ? \
55 copyin((from), (to), (size)) : \
56 (bcopy((from), (to), (size)), 0))
57
58 #define COPYOUT(from, to, size, flags) \
59 ((flags & PFR_FLAG_USERIOCTL) ? \
60 copyout((from), (to), (size)) : \
61 (bcopy((from), (to), (size)), 0))
62
63 #define FILLIN_SIN(sin, addr) \
64 do { \
65 (sin).sin_len = sizeof(sin); \
66 (sin).sin_family = AF_INET; \
67 (sin).sin_addr = (addr); \
68 } while (0)
69
70 #define FILLIN_SIN6(sin6, addr) \
71 do { \
72 (sin6).sin6_len = sizeof(sin6); \
73 (sin6).sin6_family = AF_INET6; \
74 (sin6).sin6_addr = (addr); \
75 } while (0)
76
77 #define SWAP(type, a1, a2) \
78 do { \
79 type tmp = a1; \
80 a1 = a2; \
81 a2 = tmp; \
82 } while (0)
83
84 #define SUNION2PF(su, af) (((af)==AF_INET) ? \
85 (struct pf_addr *)&(su)->sin.sin_addr : \
86 (struct pf_addr *)&(su)->sin6.sin6_addr)
87
88 #define AF_BITS(af) (((af)==AF_INET)?32:128)
89 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
90 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
91 #define KENTRY_RNF_ROOT(ke) \
92 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
93
94 #define NO_ADDRESSES (-1)
95 #define ENQUEUE_UNMARKED_ONLY (1)
96 #define INVERT_NEG_FLAG (1)
97
98 struct pfr_walktree {
99 enum pfrw_op {
100 PFRW_MARK,
101 PFRW_SWEEP,
102 PFRW_ENQUEUE,
103 PFRW_GET_ADDRS,
104 PFRW_GET_ASTATS,
105 PFRW_POOL_GET,
106 PFRW_DYNADDR_UPDATE
107 } pfrw_op;
108 union {
109 struct pfr_addr *pfrw1_addr;
110 struct pfr_astats *pfrw1_astats;
111 struct pfr_kentryworkq *pfrw1_workq;
112 struct pfr_kentry *pfrw1_kentry;
113 struct pfi_dynaddr *pfrw1_dyn;
114 } pfrw_1;
115 int pfrw_free;
116 int pfrw_flags;
117 };
118 #define pfrw_addr pfrw_1.pfrw1_addr
119 #define pfrw_astats pfrw_1.pfrw1_astats
120 #define pfrw_workq pfrw_1.pfrw1_workq
121 #define pfrw_kentry pfrw_1.pfrw1_kentry
122 #define pfrw_dyn pfrw_1.pfrw1_dyn
123 #define pfrw_cnt pfrw_free
124
125 #define senderr(e) do { rv = (e); goto _bad; } while (0)
126
127 struct pool pfr_ktable_pl;
128 struct pool pfr_kentry_pl;
129 struct pool pfr_kentry_pl2;
130 struct sockaddr_in pfr_sin;
131 struct sockaddr_in6 pfr_sin6;
132 union sockaddr_union pfr_mask;
133 struct pf_addr pfr_ffaddr;
134
135 void pfr_copyout_addr(struct pfr_addr *,
136 struct pfr_kentry *ke);
137 int pfr_validate_addr(struct pfr_addr *);
138 void pfr_enqueue_addrs(struct pfr_ktable *,
139 struct pfr_kentryworkq *, int *, int);
140 void pfr_mark_addrs(struct pfr_ktable *);
141 struct pfr_kentry *pfr_lookup_addr(struct pfr_ktable *,
142 struct pfr_addr *, int);
143 struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, int);
144 void pfr_destroy_kentries(struct pfr_kentryworkq *);
145 void pfr_destroy_kentry(struct pfr_kentry *);
146 void pfr_insert_kentries(struct pfr_ktable *,
147 struct pfr_kentryworkq *, long);
148 void pfr_remove_kentries(struct pfr_ktable *,
149 struct pfr_kentryworkq *);
150 void pfr_clstats_kentries(struct pfr_kentryworkq *, long,
151 int);
152 void pfr_reset_feedback(struct pfr_addr *, int, int);
153 void pfr_prepare_network(union sockaddr_union *, int, int);
154 int pfr_route_kentry(struct pfr_ktable *,
155 struct pfr_kentry *);
156 int pfr_unroute_kentry(struct pfr_ktable *,
157 struct pfr_kentry *);
158 int pfr_walktree(struct radix_node *, void *);
159 int pfr_validate_table(struct pfr_table *, int, int);
160 int pfr_fix_anchor(char *);
161 void pfr_commit_ktable(struct pfr_ktable *, long);
162 void pfr_insert_ktables(struct pfr_ktableworkq *);
163 void pfr_insert_ktable(struct pfr_ktable *);
164 void pfr_setflags_ktables(struct pfr_ktableworkq *);
165 void pfr_setflags_ktable(struct pfr_ktable *, int);
166 void pfr_clstats_ktables(struct pfr_ktableworkq *, long,
167 int);
168 void pfr_clstats_ktable(struct pfr_ktable *, long, int);
169 struct pfr_ktable *pfr_create_ktable(struct pfr_table *, long, int);
170 void pfr_destroy_ktables(struct pfr_ktableworkq *, int);
171 void pfr_destroy_ktable(struct pfr_ktable *, int);
172 int pfr_ktable_compare(struct pfr_ktable *,
173 struct pfr_ktable *);
174 struct pfr_ktable *pfr_lookup_table(struct pfr_table *);
175 void pfr_clean_node_mask(struct pfr_ktable *,
176 struct pfr_kentryworkq *);
177 int pfr_table_count(struct pfr_table *, int);
178 int pfr_skip_table(struct pfr_table *,
179 struct pfr_ktable *, int);
180 struct pfr_kentry *pfr_kentry_byidx(struct pfr_ktable *, int, int);
181
182 RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
183 RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
184
185 struct pfr_ktablehead pfr_ktables;
186 struct pfr_table pfr_nulltable;
187 int pfr_ktable_cnt;
188
189 void
190 pfr_initialize(void)
191 {
192 pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0,
193 "pfrktable", &pool_allocator_oldnointr);
194 pool_init(&pfr_kentry_pl, sizeof(struct pfr_kentry), 0, 0, 0,
195 "pfrkentry", &pool_allocator_oldnointr);
196 pool_init(&pfr_kentry_pl2, sizeof(struct pfr_kentry), 0, 0, 0,
197 "pfrkentry2", NULL);
198
199 pfr_sin.sin_len = sizeof(pfr_sin);
200 pfr_sin.sin_family = AF_INET;
201 pfr_sin6.sin6_len = sizeof(pfr_sin6);
202 pfr_sin6.sin6_family = AF_INET6;
203
204 memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr));
205 }
206
207 int
208 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
209 {
210 struct pfr_ktable *kt;
211 struct pfr_kentryworkq workq;
212 int s;
213
214 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
215 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
216 return (EINVAL);
217 kt = pfr_lookup_table(tbl);
218 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
219 return (ESRCH);
220 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
221 return (EPERM);
222 pfr_enqueue_addrs(kt, &workq, ndel, 0);
223
224 if (!(flags & PFR_FLAG_DUMMY)) {
225 if (flags & PFR_FLAG_ATOMIC)
226 s = splsoftnet();
227 pfr_remove_kentries(kt, &workq);
228 if (flags & PFR_FLAG_ATOMIC)
229 splx(s);
230 if (kt->pfrkt_cnt) {
231 printf("pfr_clr_addrs: corruption detected (%d).\n",
232 kt->pfrkt_cnt);
233 kt->pfrkt_cnt = 0;
234 }
235 }
236 return (0);
237 }
238
239 int
240 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
241 int *nadd, int flags)
242 {
243 struct pfr_ktable *kt, *tmpkt;
244 struct pfr_kentryworkq workq;
245 struct pfr_kentry *p, *q;
246 struct pfr_addr ad;
247 int i, rv, s, xadd = 0;
248 long tzero = time_second;
249
250 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
251 PFR_FLAG_FEEDBACK);
252 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
253 return (EINVAL);
254 kt = pfr_lookup_table(tbl);
255 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
256 return (ESRCH);
257 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
258 return (EPERM);
259 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
260 if (tmpkt == NULL)
261 return (ENOMEM);
262 SLIST_INIT(&workq);
263 for (i = 0; i < size; i++) {
264 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
265 senderr(EFAULT);
266 if (pfr_validate_addr(&ad))
267 senderr(EINVAL);
268 p = pfr_lookup_addr(kt, &ad, 1);
269 q = pfr_lookup_addr(tmpkt, &ad, 1);
270 if (flags & PFR_FLAG_FEEDBACK) {
271 if (q != NULL)
272 ad.pfra_fback = PFR_FB_DUPLICATE;
273 else if (p == NULL)
274 ad.pfra_fback = PFR_FB_ADDED;
275 else if (p->pfrke_not != ad.pfra_not)
276 ad.pfra_fback = PFR_FB_CONFLICT;
277 else
278 ad.pfra_fback = PFR_FB_NONE;
279 }
280 if (p == NULL && q == NULL) {
281 p = pfr_create_kentry(&ad,
282 !(flags & PFR_FLAG_USERIOCTL));
283 if (p == NULL)
284 senderr(ENOMEM);
285 if (pfr_route_kentry(tmpkt, p)) {
286 pfr_destroy_kentry(p);
287 ad.pfra_fback = PFR_FB_NONE;
288 } else {
289 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
290 xadd++;
291 }
292 }
293 if (flags & PFR_FLAG_FEEDBACK)
294 if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
295 senderr(EFAULT);
296 }
297 pfr_clean_node_mask(tmpkt, &workq);
298 if (!(flags & PFR_FLAG_DUMMY)) {
299 if (flags & PFR_FLAG_ATOMIC)
300 s = splsoftnet();
301 pfr_insert_kentries(kt, &workq, tzero);
302 if (flags & PFR_FLAG_ATOMIC)
303 splx(s);
304 } else
305 pfr_destroy_kentries(&workq);
306 if (nadd != NULL)
307 *nadd = xadd;
308 pfr_destroy_ktable(tmpkt, 0);
309 return (0);
310 _bad:
311 pfr_clean_node_mask(tmpkt, &workq);
312 pfr_destroy_kentries(&workq);
313 if (flags & PFR_FLAG_FEEDBACK)
314 pfr_reset_feedback(addr, size, flags);
315 pfr_destroy_ktable(tmpkt, 0);
316 return (rv);
317 }
318
319 int
320 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
321 int *ndel, int flags)
322 {
323 struct pfr_ktable *kt;
324 struct pfr_kentryworkq workq;
325 struct pfr_kentry *p;
326 struct pfr_addr ad;
327 int i, rv, s, xdel = 0, log = 1;
328
329 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
330 PFR_FLAG_FEEDBACK);
331 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
332 return (EINVAL);
333 kt = pfr_lookup_table(tbl);
334 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
335 return (ESRCH);
336 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
337 return (EPERM);
338 /*
339 * there are two algorithms to choose from here.
340 * with:
341 * n: number of addresses to delete
342 * N: number of addresses in the table
343 *
344 * one is O(N) and is better for large 'n'
345 * one is O(n*LOG(N)) and is better for small 'n'
346 *
347 * following code try to decide which one is best.
348 */
349 for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
350 log++;
351 if (size > kt->pfrkt_cnt/log) {
352 /* full table scan */
353 pfr_mark_addrs(kt);
354 } else {
355 /* iterate over addresses to delete */
356 for (i = 0; i < size; i++) {
357 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
358 return (EFAULT);
359 if (pfr_validate_addr(&ad))
360 return (EINVAL);
361 p = pfr_lookup_addr(kt, &ad, 1);
362 if (p != NULL)
363 p->pfrke_mark = 0;
364 }
365 }
366 SLIST_INIT(&workq);
367 for (i = 0; i < size; i++) {
368 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
369 senderr(EFAULT);
370 if (pfr_validate_addr(&ad))
371 senderr(EINVAL);
372 p = pfr_lookup_addr(kt, &ad, 1);
373 if (flags & PFR_FLAG_FEEDBACK) {
374 if (p == NULL)
375 ad.pfra_fback = PFR_FB_NONE;
376 else if (p->pfrke_not != ad.pfra_not)
377 ad.pfra_fback = PFR_FB_CONFLICT;
378 else if (p->pfrke_mark)
379 ad.pfra_fback = PFR_FB_DUPLICATE;
380 else
381 ad.pfra_fback = PFR_FB_DELETED;
382 }
383 if (p != NULL && p->pfrke_not == ad.pfra_not &&
384 !p->pfrke_mark) {
385 p->pfrke_mark = 1;
386 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
387 xdel++;
388 }
389 if (flags & PFR_FLAG_FEEDBACK)
390 if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
391 senderr(EFAULT);
392 }
393 if (!(flags & PFR_FLAG_DUMMY)) {
394 if (flags & PFR_FLAG_ATOMIC)
395 s = splsoftnet();
396 pfr_remove_kentries(kt, &workq);
397 if (flags & PFR_FLAG_ATOMIC)
398 splx(s);
399 }
400 if (ndel != NULL)
401 *ndel = xdel;
402 return (0);
403 _bad:
404 if (flags & PFR_FLAG_FEEDBACK)
405 pfr_reset_feedback(addr, size, flags);
406 return (rv);
407 }
408
409 int
410 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
411 int *size2, int *nadd, int *ndel, int *nchange, int flags,
412 u_int32_t ignore_pfrt_flags)
413 {
414 struct pfr_ktable *kt, *tmpkt;
415 struct pfr_kentryworkq addq, delq, changeq;
416 struct pfr_kentry *p, *q;
417 struct pfr_addr ad;
418 int i, rv, s, xadd = 0, xdel = 0, xchange = 0;
419 long tzero = time_second;
420
421 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
422 PFR_FLAG_FEEDBACK);
423 if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
424 PFR_FLAG_USERIOCTL))
425 return (EINVAL);
426 kt = pfr_lookup_table(tbl);
427 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
428 return (ESRCH);
429 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
430 return (EPERM);
431 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
432 if (tmpkt == NULL)
433 return (ENOMEM);
434 pfr_mark_addrs(kt);
435 SLIST_INIT(&addq);
436 SLIST_INIT(&delq);
437 SLIST_INIT(&changeq);
438 for (i = 0; i < size; i++) {
439 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
440 senderr(EFAULT);
441 if (pfr_validate_addr(&ad))
442 senderr(EINVAL);
443 ad.pfra_fback = PFR_FB_NONE;
444 p = pfr_lookup_addr(kt, &ad, 1);
445 if (p != NULL) {
446 if (p->pfrke_mark) {
447 ad.pfra_fback = PFR_FB_DUPLICATE;
448 goto _skip;
449 }
450 p->pfrke_mark = 1;
451 if (p->pfrke_not != ad.pfra_not) {
452 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
453 ad.pfra_fback = PFR_FB_CHANGED;
454 xchange++;
455 }
456 } else {
457 q = pfr_lookup_addr(tmpkt, &ad, 1);
458 if (q != NULL) {
459 ad.pfra_fback = PFR_FB_DUPLICATE;
460 goto _skip;
461 }
462 p = pfr_create_kentry(&ad,
463 !(flags & PFR_FLAG_USERIOCTL));
464 if (p == NULL)
465 senderr(ENOMEM);
466 if (pfr_route_kentry(tmpkt, p)) {
467 pfr_destroy_kentry(p);
468 ad.pfra_fback = PFR_FB_NONE;
469 } else {
470 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
471 ad.pfra_fback = PFR_FB_ADDED;
472 xadd++;
473 }
474 }
475 _skip:
476 if (flags & PFR_FLAG_FEEDBACK)
477 if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
478 senderr(EFAULT);
479 }
480 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
481 if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
482 if (*size2 < size+xdel) {
483 *size2 = size+xdel;
484 senderr(0);
485 }
486 i = 0;
487 SLIST_FOREACH(p, &delq, pfrke_workq) {
488 pfr_copyout_addr(&ad, p);
489 ad.pfra_fback = PFR_FB_DELETED;
490 if (COPYOUT(&ad, addr+size+i, sizeof(ad), flags))
491 senderr(EFAULT);
492 i++;
493 }
494 }
495 pfr_clean_node_mask(tmpkt, &addq);
496 if (!(flags & PFR_FLAG_DUMMY)) {
497 if (flags & PFR_FLAG_ATOMIC)
498 s = splsoftnet();
499 pfr_insert_kentries(kt, &addq, tzero);
500 pfr_remove_kentries(kt, &delq);
501 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
502 if (flags & PFR_FLAG_ATOMIC)
503 splx(s);
504 } else
505 pfr_destroy_kentries(&addq);
506 if (nadd != NULL)
507 *nadd = xadd;
508 if (ndel != NULL)
509 *ndel = xdel;
510 if (nchange != NULL)
511 *nchange = xchange;
512 if ((flags & PFR_FLAG_FEEDBACK) && size2)
513 *size2 = size+xdel;
514 pfr_destroy_ktable(tmpkt, 0);
515 return (0);
516 _bad:
517 pfr_clean_node_mask(tmpkt, &addq);
518 pfr_destroy_kentries(&addq);
519 if (flags & PFR_FLAG_FEEDBACK)
520 pfr_reset_feedback(addr, size, flags);
521 pfr_destroy_ktable(tmpkt, 0);
522 return (rv);
523 }
524
525 int
526 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
527 int *nmatch, int flags)
528 {
529 struct pfr_ktable *kt;
530 struct pfr_kentry *p;
531 struct pfr_addr ad;
532 int i, xmatch = 0;
533
534 ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
535 if (pfr_validate_table(tbl, 0, 0))
536 return (EINVAL);
537 kt = pfr_lookup_table(tbl);
538 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
539 return (ESRCH);
540
541 for (i = 0; i < size; i++) {
542 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
543 return (EFAULT);
544 if (pfr_validate_addr(&ad))
545 return (EINVAL);
546 if (ADDR_NETWORK(&ad))
547 return (EINVAL);
548 p = pfr_lookup_addr(kt, &ad, 0);
549 if (flags & PFR_FLAG_REPLACE)
550 pfr_copyout_addr(&ad, p);
551 ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
552 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
553 if (p != NULL && !p->pfrke_not)
554 xmatch++;
555 if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
556 return (EFAULT);
557 }
558 if (nmatch != NULL)
559 *nmatch = xmatch;
560 return (0);
561 }
562
563 int
564 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
565 int flags)
566 {
567 struct pfr_ktable *kt;
568 struct pfr_walktree w;
569 int rv;
570
571 ACCEPT_FLAGS(flags, 0);
572 if (pfr_validate_table(tbl, 0, 0))
573 return (EINVAL);
574 kt = pfr_lookup_table(tbl);
575 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
576 return (ESRCH);
577 if (kt->pfrkt_cnt > *size) {
578 *size = kt->pfrkt_cnt;
579 return (0);
580 }
581
582 bzero(&w, sizeof(w));
583 w.pfrw_op = PFRW_GET_ADDRS;
584 w.pfrw_addr = addr;
585 w.pfrw_free = kt->pfrkt_cnt;
586 w.pfrw_flags = flags;
587 rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
588 if (!rv)
589 rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
590 if (rv)
591 return (rv);
592
593 if (w.pfrw_free) {
594 printf("pfr_get_addrs: corruption detected (%d).\n",
595 w.pfrw_free);
596 return (ENOTTY);
597 }
598 *size = kt->pfrkt_cnt;
599 return (0);
600 }
601
602 int
603 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
604 int flags)
605 {
606 struct pfr_ktable *kt;
607 struct pfr_walktree w;
608 struct pfr_kentryworkq workq;
609 int rv, s;
610 long tzero = time_second;
611
612 /* XXX PFR_FLAG_CLSTATS disabled */
613 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC);
614 if (pfr_validate_table(tbl, 0, 0))
615 return (EINVAL);
616 kt = pfr_lookup_table(tbl);
617 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
618 return (ESRCH);
619 if (kt->pfrkt_cnt > *size) {
620 *size = kt->pfrkt_cnt;
621 return (0);
622 }
623
624 bzero(&w, sizeof(w));
625 w.pfrw_op = PFRW_GET_ASTATS;
626 w.pfrw_astats = addr;
627 w.pfrw_free = kt->pfrkt_cnt;
628 w.pfrw_flags = flags;
629 if (flags & PFR_FLAG_ATOMIC)
630 s = splsoftnet();
631 rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
632 if (!rv)
633 rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
634 if (!rv && (flags & PFR_FLAG_CLSTATS)) {
635 pfr_enqueue_addrs(kt, &workq, NULL, 0);
636 pfr_clstats_kentries(&workq, tzero, 0);
637 }
638 if (flags & PFR_FLAG_ATOMIC)
639 splx(s);
640 if (rv)
641 return (rv);
642
643 if (w.pfrw_free) {
644 printf("pfr_get_astats: corruption detected (%d).\n",
645 w.pfrw_free);
646 return (ENOTTY);
647 }
648 *size = kt->pfrkt_cnt;
649 return (0);
650 }
651
652 int
653 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
654 int *nzero, int flags)
655 {
656 struct pfr_ktable *kt;
657 struct pfr_kentryworkq workq;
658 struct pfr_kentry *p;
659 struct pfr_addr ad;
660 int i, rv, s, xzero = 0;
661
662 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
663 PFR_FLAG_FEEDBACK);
664 if (pfr_validate_table(tbl, 0, 0))
665 return (EINVAL);
666 kt = pfr_lookup_table(tbl);
667 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
668 return (ESRCH);
669 SLIST_INIT(&workq);
670 for (i = 0; i < size; i++) {
671 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
672 senderr(EFAULT);
673 if (pfr_validate_addr(&ad))
674 senderr(EINVAL);
675 p = pfr_lookup_addr(kt, &ad, 1);
676 if (flags & PFR_FLAG_FEEDBACK) {
677 ad.pfra_fback = (p != NULL) ?
678 PFR_FB_CLEARED : PFR_FB_NONE;
679 if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
680 senderr(EFAULT);
681 }
682 if (p != NULL) {
683 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
684 xzero++;
685 }
686 }
687
688 if (!(flags & PFR_FLAG_DUMMY)) {
689 if (flags & PFR_FLAG_ATOMIC)
690 s = splsoftnet();
691 pfr_clstats_kentries(&workq, 0, 0);
692 if (flags & PFR_FLAG_ATOMIC)
693 splx(s);
694 }
695 if (nzero != NULL)
696 *nzero = xzero;
697 return (0);
698 _bad:
699 if (flags & PFR_FLAG_FEEDBACK)
700 pfr_reset_feedback(addr, size, flags);
701 return (rv);
702 }
703
704 int
705 pfr_validate_addr(struct pfr_addr *ad)
706 {
707 int i;
708
709 switch (ad->pfra_af) {
710 #ifdef INET
711 case AF_INET:
712 if (ad->pfra_net > 32)
713 return (-1);
714 break;
715 #endif /* INET */
716 #ifdef INET6
717 case AF_INET6:
718 if (ad->pfra_net > 128)
719 return (-1);
720 break;
721 #endif /* INET6 */
722 default:
723 return (-1);
724 }
725 if (ad->pfra_net < 128 &&
726 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
727 return (-1);
728 for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
729 if (((caddr_t)ad)[i])
730 return (-1);
731 if (ad->pfra_not && ad->pfra_not != 1)
732 return (-1);
733 if (ad->pfra_fback)
734 return (-1);
735 return (0);
736 }
737
738 void
739 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
740 int *naddr, int sweep)
741 {
742 struct pfr_walktree w;
743
744 SLIST_INIT(workq);
745 bzero(&w, sizeof(w));
746 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
747 w.pfrw_workq = workq;
748 if (kt->pfrkt_ip4 != NULL)
749 if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
750 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
751 if (kt->pfrkt_ip6 != NULL)
752 if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
753 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
754 if (naddr != NULL)
755 *naddr = w.pfrw_cnt;
756 }
757
758 void
759 pfr_mark_addrs(struct pfr_ktable *kt)
760 {
761 struct pfr_walktree w;
762
763 bzero(&w, sizeof(w));
764 w.pfrw_op = PFRW_MARK;
765 if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
766 printf("pfr_mark_addrs: IPv4 walktree failed.\n");
767 if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
768 printf("pfr_mark_addrs: IPv6 walktree failed.\n");
769 }
770
771
772 struct pfr_kentry *
773 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
774 {
775 union sockaddr_union sa, mask;
776 struct radix_node_head *head;
777 struct pfr_kentry *ke;
778 int s;
779
780 bzero(&sa, sizeof(sa));
781 if (ad->pfra_af == AF_INET) {
782 FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
783 head = kt->pfrkt_ip4;
784 } else if ( ad->pfra_af == AF_INET6 ) {
785 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
786 head = kt->pfrkt_ip6;
787 }
788 if (ADDR_NETWORK(ad)) {
789 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
790 s = splsoftnet(); /* rn_lookup makes use of globals */
791 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
792 splx(s);
793 if (ke && KENTRY_RNF_ROOT(ke))
794 ke = NULL;
795 } else {
796 ke = (struct pfr_kentry *)rn_match(&sa, head);
797 if (ke && KENTRY_RNF_ROOT(ke))
798 ke = NULL;
799 if (exact && ke && KENTRY_NETWORK(ke))
800 ke = NULL;
801 }
802 return (ke);
803 }
804
805 struct pfr_kentry *
806 pfr_create_kentry(struct pfr_addr *ad, int intr)
807 {
808 struct pfr_kentry *ke;
809
810 if (intr)
811 ke = pool_get(&pfr_kentry_pl2, PR_NOWAIT);
812 else
813 ke = pool_get(&pfr_kentry_pl, PR_NOWAIT);
814 if (ke == NULL)
815 return (NULL);
816 bzero(ke, sizeof(*ke));
817
818 if (ad->pfra_af == AF_INET)
819 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
820 else if (ad->pfra_af == AF_INET6)
821 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
822 ke->pfrke_af = ad->pfra_af;
823 ke->pfrke_net = ad->pfra_net;
824 ke->pfrke_not = ad->pfra_not;
825 ke->pfrke_intrpool = intr;
826 return (ke);
827 }
828
829 void
830 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
831 {
832 struct pfr_kentry *p, *q;
833
834 for (p = SLIST_FIRST(workq); p != NULL; p = q) {
835 q = SLIST_NEXT(p, pfrke_workq);
836 pfr_destroy_kentry(p);
837 }
838 }
839
840 void
841 pfr_destroy_kentry(struct pfr_kentry *ke)
842 {
843 if (ke->pfrke_intrpool)
844 pool_put(&pfr_kentry_pl2, ke);
845 else
846 pool_put(&pfr_kentry_pl, ke);
847 }
848
849 void
850 pfr_insert_kentries(struct pfr_ktable *kt,
851 struct pfr_kentryworkq *workq, long tzero)
852 {
853 struct pfr_kentry *p;
854 int rv, n = 0;
855
856 SLIST_FOREACH(p, workq, pfrke_workq) {
857 rv = pfr_route_kentry(kt, p);
858 if (rv) {
859 printf("pfr_insert_kentries: cannot route entry "
860 "(code=%d).\n", rv);
861 break;
862 }
863 p->pfrke_tzero = tzero;
864 n++;
865 }
866 kt->pfrkt_cnt += n;
867 }
868
869 int
870 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero)
871 {
872 struct pfr_kentry *p;
873 int rv;
874
875 p = pfr_lookup_addr(kt, ad, 1);
876 if (p != NULL)
877 return (0);
878 p = pfr_create_kentry(ad, 1);
879 if (p == NULL)
880 return (EINVAL);
881
882 rv = pfr_route_kentry(kt, p);
883 if (rv)
884 return (rv);
885
886 p->pfrke_tzero = tzero;
887 kt->pfrkt_cnt++;
888
889 return (0);
890 }
891
892 void
893 pfr_remove_kentries(struct pfr_ktable *kt,
894 struct pfr_kentryworkq *workq)
895 {
896 struct pfr_kentry *p;
897 int n = 0;
898
899 SLIST_FOREACH(p, workq, pfrke_workq) {
900 pfr_unroute_kentry(kt, p);
901 n++;
902 }
903 kt->pfrkt_cnt -= n;
904 pfr_destroy_kentries(workq);
905 }
906
907 void
908 pfr_clean_node_mask(struct pfr_ktable *kt,
909 struct pfr_kentryworkq *workq)
910 {
911 struct pfr_kentry *p;
912
913 SLIST_FOREACH(p, workq, pfrke_workq)
914 pfr_unroute_kentry(kt, p);
915 }
916
917 void
918 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
919 {
920 struct pfr_kentry *p;
921 int s;
922
923 SLIST_FOREACH(p, workq, pfrke_workq) {
924 s = splsoftnet();
925 if (negchange)
926 p->pfrke_not = !p->pfrke_not;
927 bzero(p->pfrke_packets, sizeof(p->pfrke_packets));
928 bzero(p->pfrke_bytes, sizeof(p->pfrke_bytes));
929 splx(s);
930 p->pfrke_tzero = tzero;
931 }
932 }
933
934 void
935 pfr_reset_feedback(struct pfr_addr *addr, int size, int flags)
936 {
937 struct pfr_addr ad;
938 int i;
939
940 for (i = 0; i < size; i++) {
941 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
942 break;
943 ad.pfra_fback = PFR_FB_NONE;
944 if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
945 break;
946 }
947 }
948
949 void
950 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
951 {
952 int i;
953
954 bzero(sa, sizeof(*sa));
955 if (af == AF_INET) {
956 sa->sin.sin_len = sizeof(sa->sin);
957 sa->sin.sin_family = AF_INET;
958 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
959 } else if (af == AF_INET6) {
960 sa->sin6.sin6_len = sizeof(sa->sin6);
961 sa->sin6.sin6_family = AF_INET6;
962 for (i = 0; i < 4; i++) {
963 if (net <= 32) {
964 sa->sin6.sin6_addr.s6_addr32[i] =
965 net ? htonl(-1 << (32-net)) : 0;
966 break;
967 }
968 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
969 net -= 32;
970 }
971 }
972 }
973
974 int
975 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
976 {
977 union sockaddr_union mask;
978 struct radix_node *rn;
979 struct radix_node_head *head;
980 int s;
981
982 bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
983 if (ke->pfrke_af == AF_INET)
984 head = kt->pfrkt_ip4;
985 else if (ke->pfrke_af == AF_INET6)
986 head = kt->pfrkt_ip6;
987
988 s = splsoftnet();
989 if (KENTRY_NETWORK(ke)) {
990 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
991 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
992 } else
993 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
994 splx(s);
995
996 return (rn == NULL ? -1 : 0);
997 }
998
999 int
1000 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1001 {
1002 union sockaddr_union mask;
1003 struct radix_node *rn;
1004 struct radix_node_head *head;
1005 int s;
1006
1007 if (ke->pfrke_af == AF_INET)
1008 head = kt->pfrkt_ip4;
1009 else if (ke->pfrke_af == AF_INET6)
1010 head = kt->pfrkt_ip6;
1011
1012 s = splsoftnet();
1013 if (KENTRY_NETWORK(ke)) {
1014 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1015 rn = rn_delete(&ke->pfrke_sa, &mask, head, NULL);
1016 } else
1017 rn = rn_delete(&ke->pfrke_sa, NULL, head, NULL);
1018 splx(s);
1019
1020 if (rn == NULL) {
1021 printf("pfr_unroute_kentry: delete failed.\n");
1022 return (-1);
1023 }
1024 return (0);
1025 }
1026
1027 void
1028 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
1029 {
1030 bzero(ad, sizeof(*ad));
1031 if (ke == NULL)
1032 return;
1033 ad->pfra_af = ke->pfrke_af;
1034 ad->pfra_net = ke->pfrke_net;
1035 ad->pfra_not = ke->pfrke_not;
1036 if (ad->pfra_af == AF_INET)
1037 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1038 else if (ad->pfra_af == AF_INET6)
1039 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1040 }
1041
1042 int
1043 pfr_walktree(struct radix_node *rn, void *arg)
1044 {
1045 struct pfr_kentry *ke = (struct pfr_kentry *)rn;
1046 struct pfr_walktree *w = arg;
1047 int s, flags = w->pfrw_flags;
1048
1049 switch (w->pfrw_op) {
1050 case PFRW_MARK:
1051 ke->pfrke_mark = 0;
1052 break;
1053 case PFRW_SWEEP:
1054 if (ke->pfrke_mark)
1055 break;
1056 /* FALLTHROUGH */
1057 case PFRW_ENQUEUE:
1058 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1059 w->pfrw_cnt++;
1060 break;
1061 case PFRW_GET_ADDRS:
1062 if (w->pfrw_free-- > 0) {
1063 struct pfr_addr ad;
1064
1065 pfr_copyout_addr(&ad, ke);
1066 if (copyout(&ad, w->pfrw_addr, sizeof(ad)))
1067 return (EFAULT);
1068 w->pfrw_addr++;
1069 }
1070 break;
1071 case PFRW_GET_ASTATS:
1072 if (w->pfrw_free-- > 0) {
1073 struct pfr_astats as;
1074
1075 pfr_copyout_addr(&as.pfras_a, ke);
1076
1077 s = splsoftnet();
1078 bcopy(ke->pfrke_packets, as.pfras_packets,
1079 sizeof(as.pfras_packets));
1080 bcopy(ke->pfrke_bytes, as.pfras_bytes,
1081 sizeof(as.pfras_bytes));
1082 splx(s);
1083 as.pfras_tzero = ke->pfrke_tzero;
1084
1085 if (COPYOUT(&as, w->pfrw_astats, sizeof(as), flags))
1086 return (EFAULT);
1087 w->pfrw_astats++;
1088 }
1089 break;
1090 case PFRW_POOL_GET:
1091 if (ke->pfrke_not)
1092 break; /* negative entries are ignored */
1093 if (!w->pfrw_cnt--) {
1094 w->pfrw_kentry = ke;
1095 return (1); /* finish search */
1096 }
1097 break;
1098 case PFRW_DYNADDR_UPDATE:
1099 if (ke->pfrke_af == AF_INET) {
1100 if (w->pfrw_dyn->pfid_acnt4++ > 0)
1101 break;
1102 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1103 w->pfrw_dyn->pfid_addr4 = *SUNION2PF(
1104 &ke->pfrke_sa, AF_INET);
1105 w->pfrw_dyn->pfid_mask4 = *SUNION2PF(
1106 &pfr_mask, AF_INET);
1107 } else if (ke->pfrke_af == AF_INET6){
1108 if (w->pfrw_dyn->pfid_acnt6++ > 0)
1109 break;
1110 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1111 w->pfrw_dyn->pfid_addr6 = *SUNION2PF(
1112 &ke->pfrke_sa, AF_INET6);
1113 w->pfrw_dyn->pfid_mask6 = *SUNION2PF(
1114 &pfr_mask, AF_INET6);
1115 }
1116 break;
1117 }
1118 return (0);
1119 }
1120
1121 int
1122 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1123 {
1124 struct pfr_ktableworkq workq;
1125 struct pfr_ktable *p;
1126 int s, xdel = 0;
1127
1128 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1129 PFR_FLAG_ALLRSETS);
1130 if (pfr_fix_anchor(filter->pfrt_anchor))
1131 return (EINVAL);
1132 if (pfr_table_count(filter, flags) < 0)
1133 return (ENOENT);
1134
1135 SLIST_INIT(&workq);
1136 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1137 if (pfr_skip_table(filter, p, flags))
1138 continue;
1139 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1140 continue;
1141 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1142 continue;
1143 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1144 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1145 xdel++;
1146 }
1147 if (!(flags & PFR_FLAG_DUMMY)) {
1148 if (flags & PFR_FLAG_ATOMIC)
1149 s = splsoftnet();
1150 pfr_setflags_ktables(&workq);
1151 if (flags & PFR_FLAG_ATOMIC)
1152 splx(s);
1153 }
1154 if (ndel != NULL)
1155 *ndel = xdel;
1156 return (0);
1157 }
1158
1159 int
1160 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1161 {
1162 struct pfr_ktableworkq addq, changeq;
1163 struct pfr_ktable *p, *q, *r, key;
1164 int i, rv, s, xadd = 0;
1165 long tzero = time_second;
1166
1167 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1168 SLIST_INIT(&addq);
1169 SLIST_INIT(&changeq);
1170 for (i = 0; i < size; i++) {
1171 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1172 senderr(EFAULT);
1173 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1174 flags & PFR_FLAG_USERIOCTL))
1175 senderr(EINVAL);
1176 key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1177 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1178 if (p == NULL) {
1179 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1180 if (p == NULL)
1181 senderr(ENOMEM);
1182 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1183 if (!pfr_ktable_compare(p, q))
1184 goto _skip;
1185 }
1186 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1187 xadd++;
1188 if (!key.pfrkt_anchor[0])
1189 goto _skip;
1190
1191 /* find or create root table */
1192 bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1193 r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1194 if (r != NULL) {
1195 p->pfrkt_root = r;
1196 goto _skip;
1197 }
1198 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1199 if (!pfr_ktable_compare(&key, q)) {
1200 p->pfrkt_root = q;
1201 goto _skip;
1202 }
1203 }
1204 key.pfrkt_flags = 0;
1205 r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1206 if (r == NULL)
1207 senderr(ENOMEM);
1208 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1209 p->pfrkt_root = r;
1210 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1211 SLIST_FOREACH(q, &changeq, pfrkt_workq)
1212 if (!pfr_ktable_compare(&key, q))
1213 goto _skip;
1214 p->pfrkt_nflags = (p->pfrkt_flags &
1215 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1216 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1217 xadd++;
1218 }
1219 _skip:
1220 ;
1221 }
1222 if (!(flags & PFR_FLAG_DUMMY)) {
1223 if (flags & PFR_FLAG_ATOMIC)
1224 s = splsoftnet();
1225 pfr_insert_ktables(&addq);
1226 pfr_setflags_ktables(&changeq);
1227 if (flags & PFR_FLAG_ATOMIC)
1228 splx(s);
1229 } else
1230 pfr_destroy_ktables(&addq, 0);
1231 if (nadd != NULL)
1232 *nadd = xadd;
1233 return (0);
1234 _bad:
1235 pfr_destroy_ktables(&addq, 0);
1236 return (rv);
1237 }
1238
1239 int
1240 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1241 {
1242 struct pfr_ktableworkq workq;
1243 struct pfr_ktable *p, *q, key;
1244 int i, s, xdel = 0;
1245
1246 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1247 SLIST_INIT(&workq);
1248 for (i = 0; i < size; i++) {
1249 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1250 return (EFAULT);
1251 if (pfr_validate_table(&key.pfrkt_t, 0,
1252 flags & PFR_FLAG_USERIOCTL))
1253 return (EINVAL);
1254 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1255 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1256 SLIST_FOREACH(q, &workq, pfrkt_workq)
1257 if (!pfr_ktable_compare(p, q))
1258 goto _skip;
1259 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1260 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1261 xdel++;
1262 }
1263 _skip:
1264 ;
1265 }
1266
1267 if (!(flags & PFR_FLAG_DUMMY)) {
1268 if (flags & PFR_FLAG_ATOMIC)
1269 s = splsoftnet();
1270 pfr_setflags_ktables(&workq);
1271 if (flags & PFR_FLAG_ATOMIC)
1272 splx(s);
1273 }
1274 if (ndel != NULL)
1275 *ndel = xdel;
1276 return (0);
1277 }
1278
1279 int
1280 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1281 int flags)
1282 {
1283 struct pfr_ktable *p;
1284 int n, nn;
1285
1286 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1287 if (pfr_fix_anchor(filter->pfrt_anchor))
1288 return (EINVAL);
1289 n = nn = pfr_table_count(filter, flags);
1290 if (n < 0)
1291 return (ENOENT);
1292 if (n > *size) {
1293 *size = n;
1294 return (0);
1295 }
1296 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1297 if (pfr_skip_table(filter, p, flags))
1298 continue;
1299 if (n-- <= 0)
1300 continue;
1301 if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl), flags))
1302 return (EFAULT);
1303 }
1304 if (n) {
1305 printf("pfr_get_tables: corruption detected (%d).\n", n);
1306 return (ENOTTY);
1307 }
1308 *size = nn;
1309 return (0);
1310 }
1311
1312 int
1313 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1314 int flags)
1315 {
1316 struct pfr_ktable *p;
1317 struct pfr_ktableworkq workq;
1318 int s, n, nn;
1319 long tzero = time_second;
1320
1321 /* XXX PFR_FLAG_CLSTATS disabled */
1322 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_ALLRSETS);
1323 if (pfr_fix_anchor(filter->pfrt_anchor))
1324 return (EINVAL);
1325 n = nn = pfr_table_count(filter, flags);
1326 if (n < 0)
1327 return (ENOENT);
1328 if (n > *size) {
1329 *size = n;
1330 return (0);
1331 }
1332 SLIST_INIT(&workq);
1333 if (flags & PFR_FLAG_ATOMIC)
1334 s = splsoftnet();
1335 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1336 if (pfr_skip_table(filter, p, flags))
1337 continue;
1338 if (n-- <= 0)
1339 continue;
1340 if (!(flags & PFR_FLAG_ATOMIC))
1341 s = splsoftnet();
1342 if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl), flags)) {
1343 splx(s);
1344 return (EFAULT);
1345 }
1346 if (!(flags & PFR_FLAG_ATOMIC))
1347 splx(s);
1348 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1349 }
1350 if (flags & PFR_FLAG_CLSTATS)
1351 pfr_clstats_ktables(&workq, tzero,
1352 flags & PFR_FLAG_ADDRSTOO);
1353 if (flags & PFR_FLAG_ATOMIC)
1354 splx(s);
1355 if (n) {
1356 printf("pfr_get_tstats: corruption detected (%d).\n", n);
1357 return (ENOTTY);
1358 }
1359 *size = nn;
1360 return (0);
1361 }
1362
1363 int
1364 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1365 {
1366 struct pfr_ktableworkq workq;
1367 struct pfr_ktable *p, key;
1368 int i, s, xzero = 0;
1369 long tzero = time_second;
1370
1371 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1372 PFR_FLAG_ADDRSTOO);
1373 SLIST_INIT(&workq);
1374 for (i = 0; i < size; i++) {
1375 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1376 return (EFAULT);
1377 if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1378 return (EINVAL);
1379 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1380 if (p != NULL) {
1381 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1382 xzero++;
1383 }
1384 }
1385 if (!(flags & PFR_FLAG_DUMMY)) {
1386 if (flags & PFR_FLAG_ATOMIC)
1387 s = splsoftnet();
1388 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1389 if (flags & PFR_FLAG_ATOMIC)
1390 splx(s);
1391 }
1392 if (nzero != NULL)
1393 *nzero = xzero;
1394 return (0);
1395 }
1396
1397 int
1398 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1399 int *nchange, int *ndel, int flags)
1400 {
1401 struct pfr_ktableworkq workq;
1402 struct pfr_ktable *p, *q, key;
1403 int i, s, xchange = 0, xdel = 0;
1404
1405 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1406 if ((setflag & ~PFR_TFLAG_USRMASK) ||
1407 (clrflag & ~PFR_TFLAG_USRMASK) ||
1408 (setflag & clrflag))
1409 return (EINVAL);
1410 SLIST_INIT(&workq);
1411 for (i = 0; i < size; i++) {
1412 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1413 return (EFAULT);
1414 if (pfr_validate_table(&key.pfrkt_t, 0,
1415 flags & PFR_FLAG_USERIOCTL))
1416 return (EINVAL);
1417 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1418 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1419 p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1420 ~clrflag;
1421 if (p->pfrkt_nflags == p->pfrkt_flags)
1422 goto _skip;
1423 SLIST_FOREACH(q, &workq, pfrkt_workq)
1424 if (!pfr_ktable_compare(p, q))
1425 goto _skip;
1426 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1427 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1428 (clrflag & PFR_TFLAG_PERSIST) &&
1429 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1430 xdel++;
1431 else
1432 xchange++;
1433 }
1434 _skip:
1435 ;
1436 }
1437 if (!(flags & PFR_FLAG_DUMMY)) {
1438 if (flags & PFR_FLAG_ATOMIC)
1439 s = splsoftnet();
1440 pfr_setflags_ktables(&workq);
1441 if (flags & PFR_FLAG_ATOMIC)
1442 splx(s);
1443 }
1444 if (nchange != NULL)
1445 *nchange = xchange;
1446 if (ndel != NULL)
1447 *ndel = xdel;
1448 return (0);
1449 }
1450
1451 int
1452 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1453 {
1454 struct pfr_ktableworkq workq;
1455 struct pfr_ktable *p;
1456 struct pf_ruleset *rs;
1457 int xdel = 0;
1458
1459 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1460 rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
1461 if (rs == NULL)
1462 return (ENOMEM);
1463 SLIST_INIT(&workq);
1464 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1465 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1466 pfr_skip_table(trs, p, 0))
1467 continue;
1468 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1469 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1470 xdel++;
1471 }
1472 if (!(flags & PFR_FLAG_DUMMY)) {
1473 pfr_setflags_ktables(&workq);
1474 if (ticket != NULL)
1475 *ticket = ++rs->tticket;
1476 rs->topen = 1;
1477 } else
1478 pf_remove_if_empty_ruleset(rs);
1479 if (ndel != NULL)
1480 *ndel = xdel;
1481 return (0);
1482 }
1483
1484 int
1485 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1486 int *nadd, int *naddr, u_int32_t ticket, int flags)
1487 {
1488 struct pfr_ktableworkq tableq;
1489 struct pfr_kentryworkq addrq;
1490 struct pfr_ktable *kt, *rt, *shadow, key;
1491 struct pfr_kentry *p;
1492 struct pfr_addr ad;
1493 struct pf_ruleset *rs;
1494 int i, rv, xadd = 0, xaddr = 0;
1495
1496 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1497 if (size && !(flags & PFR_FLAG_ADDRSTOO))
1498 return (EINVAL);
1499 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1500 flags & PFR_FLAG_USERIOCTL))
1501 return (EINVAL);
1502 rs = pf_find_ruleset(tbl->pfrt_anchor);
1503 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1504 return (EBUSY);
1505 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1506 SLIST_INIT(&tableq);
1507 kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1508 if (kt == NULL) {
1509 kt = pfr_create_ktable(tbl, 0, 1);
1510 if (kt == NULL)
1511 return (ENOMEM);
1512 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1513 xadd++;
1514 if (!tbl->pfrt_anchor[0])
1515 goto _skip;
1516
1517 /* find or create root table */
1518 bzero(&key, sizeof(key));
1519 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1520 rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1521 if (rt != NULL) {
1522 kt->pfrkt_root = rt;
1523 goto _skip;
1524 }
1525 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1526 if (rt == NULL) {
1527 pfr_destroy_ktables(&tableq, 0);
1528 return (ENOMEM);
1529 }
1530 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1531 kt->pfrkt_root = rt;
1532 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1533 xadd++;
1534 _skip:
1535 shadow = pfr_create_ktable(tbl, 0, 0);
1536 if (shadow == NULL) {
1537 pfr_destroy_ktables(&tableq, 0);
1538 return (ENOMEM);
1539 }
1540 SLIST_INIT(&addrq);
1541 for (i = 0; i < size; i++) {
1542 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
1543 senderr(EFAULT);
1544 if (pfr_validate_addr(&ad))
1545 senderr(EINVAL);
1546 if (pfr_lookup_addr(shadow, &ad, 1) != NULL)
1547 continue;
1548 p = pfr_create_kentry(&ad, 0);
1549 if (p == NULL)
1550 senderr(ENOMEM);
1551 if (pfr_route_kentry(shadow, p)) {
1552 pfr_destroy_kentry(p);
1553 continue;
1554 }
1555 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1556 xaddr++;
1557 }
1558 if (!(flags & PFR_FLAG_DUMMY)) {
1559 if (kt->pfrkt_shadow != NULL)
1560 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1561 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1562 pfr_insert_ktables(&tableq);
1563 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1564 xaddr : NO_ADDRESSES;
1565 kt->pfrkt_shadow = shadow;
1566 } else {
1567 pfr_clean_node_mask(shadow, &addrq);
1568 pfr_destroy_ktable(shadow, 0);
1569 pfr_destroy_ktables(&tableq, 0);
1570 pfr_destroy_kentries(&addrq);
1571 }
1572 if (nadd != NULL)
1573 *nadd = xadd;
1574 if (naddr != NULL)
1575 *naddr = xaddr;
1576 return (0);
1577 _bad:
1578 pfr_destroy_ktable(shadow, 0);
1579 pfr_destroy_ktables(&tableq, 0);
1580 pfr_destroy_kentries(&addrq);
1581 return (rv);
1582 }
1583
1584 int
1585 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1586 {
1587 struct pfr_ktableworkq workq;
1588 struct pfr_ktable *p;
1589 struct pf_ruleset *rs;
1590 int xdel = 0;
1591
1592 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1593 rs = pf_find_ruleset(trs->pfrt_anchor);
1594 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1595 return (0);
1596 SLIST_INIT(&workq);
1597 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1598 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1599 pfr_skip_table(trs, p, 0))
1600 continue;
1601 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1602 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1603 xdel++;
1604 }
1605 if (!(flags & PFR_FLAG_DUMMY)) {
1606 pfr_setflags_ktables(&workq);
1607 rs->topen = 0;
1608 pf_remove_if_empty_ruleset(rs);
1609 }
1610 if (ndel != NULL)
1611 *ndel = xdel;
1612 return (0);
1613 }
1614
1615 int
1616 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1617 int *nchange, int flags)
1618 {
1619 struct pfr_ktable *p, *q;
1620 struct pfr_ktableworkq workq;
1621 struct pf_ruleset *rs;
1622 int s, xadd = 0, xchange = 0;
1623 long tzero = time_second;
1624
1625 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1626 rs = pf_find_ruleset(trs->pfrt_anchor);
1627 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1628 return (EBUSY);
1629
1630 SLIST_INIT(&workq);
1631 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1632 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1633 pfr_skip_table(trs, p, 0))
1634 continue;
1635 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1636 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1637 xchange++;
1638 else
1639 xadd++;
1640 }
1641
1642 if (!(flags & PFR_FLAG_DUMMY)) {
1643 if (flags & PFR_FLAG_ATOMIC)
1644 s = splsoftnet();
1645 for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1646 q = SLIST_NEXT(p, pfrkt_workq);
1647 pfr_commit_ktable(p, tzero);
1648 }
1649 if (flags & PFR_FLAG_ATOMIC)
1650 splx(s);
1651 rs->topen = 0;
1652 pf_remove_if_empty_ruleset(rs);
1653 }
1654 if (nadd != NULL)
1655 *nadd = xadd;
1656 if (nchange != NULL)
1657 *nchange = xchange;
1658
1659 return (0);
1660 }
1661
1662 void
1663 pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1664 {
1665 struct pfr_ktable *shadow = kt->pfrkt_shadow;
1666 int nflags;
1667
1668 if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1669 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1670 pfr_clstats_ktable(kt, tzero, 1);
1671 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1672 /* kt might contain addresses */
1673 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq;
1674 struct pfr_kentry *p, *q, *next;
1675 struct pfr_addr ad;
1676
1677 pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1678 pfr_mark_addrs(kt);
1679 SLIST_INIT(&addq);
1680 SLIST_INIT(&changeq);
1681 SLIST_INIT(&delq);
1682 SLIST_INIT(&garbageq);
1683 pfr_clean_node_mask(shadow, &addrq);
1684 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1685 next = SLIST_NEXT(p, pfrke_workq); /* XXX */
1686 pfr_copyout_addr(&ad, p);
1687 q = pfr_lookup_addr(kt, &ad, 1);
1688 if (q != NULL) {
1689 if (q->pfrke_not != p->pfrke_not)
1690 SLIST_INSERT_HEAD(&changeq, q,
1691 pfrke_workq);
1692 q->pfrke_mark = 1;
1693 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1694 } else {
1695 p->pfrke_tzero = tzero;
1696 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1697 }
1698 }
1699 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1700 pfr_insert_kentries(kt, &addq, tzero);
1701 pfr_remove_kentries(kt, &delq);
1702 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1703 pfr_destroy_kentries(&garbageq);
1704 } else {
1705 /* kt cannot contain addresses */
1706 SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1707 shadow->pfrkt_ip4);
1708 SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1709 shadow->pfrkt_ip6);
1710 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1711 pfr_clstats_ktable(kt, tzero, 1);
1712 }
1713 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1714 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1715 & ~PFR_TFLAG_INACTIVE;
1716 pfr_destroy_ktable(shadow, 0);
1717 kt->pfrkt_shadow = NULL;
1718 pfr_setflags_ktable(kt, nflags);
1719 }
1720
1721 int
1722 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1723 {
1724 int i;
1725
1726 if (!tbl->pfrt_name[0])
1727 return (-1);
1728 if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1729 return (-1);
1730 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1731 return (-1);
1732 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1733 if (tbl->pfrt_name[i])
1734 return (-1);
1735 if (pfr_fix_anchor(tbl->pfrt_anchor))
1736 return (-1);
1737 if (tbl->pfrt_flags & ~allowedflags)
1738 return (-1);
1739 return (0);
1740 }
1741
1742 /*
1743 * Rewrite anchors referenced by tables to remove slashes
1744 * and check for validity.
1745 */
1746 int
1747 pfr_fix_anchor(char *anchor)
1748 {
1749 size_t siz = MAXPATHLEN;
1750 int i;
1751
1752 if (anchor[0] == '/') {
1753 char *path;
1754 int off;
1755
1756 path = anchor;
1757 off = 1;
1758 while (*++path == '/')
1759 off++;
1760 bcopy(path, anchor, siz - off);
1761 memset(anchor + siz - off, 0, off);
1762 }
1763 if (anchor[siz - 1])
1764 return (-1);
1765 for (i = strlen(anchor); i < siz; i++)
1766 if (anchor[i])
1767 return (-1);
1768 return (0);
1769 }
1770
1771 int
1772 pfr_table_count(struct pfr_table *filter, int flags)
1773 {
1774 struct pf_ruleset *rs;
1775
1776 if (flags & PFR_FLAG_ALLRSETS)
1777 return (pfr_ktable_cnt);
1778 if (filter->pfrt_anchor[0]) {
1779 rs = pf_find_ruleset(filter->pfrt_anchor);
1780 return ((rs != NULL) ? rs->tables : -1);
1781 }
1782 return (pf_main_ruleset.tables);
1783 }
1784
1785 int
1786 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1787 {
1788 if (flags & PFR_FLAG_ALLRSETS)
1789 return (0);
1790 if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1791 return (1);
1792 return (0);
1793 }
1794
1795 void
1796 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1797 {
1798 struct pfr_ktable *p;
1799
1800 SLIST_FOREACH(p, workq, pfrkt_workq)
1801 pfr_insert_ktable(p);
1802 }
1803
1804 void
1805 pfr_insert_ktable(struct pfr_ktable *kt)
1806 {
1807 RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1808 pfr_ktable_cnt++;
1809 if (kt->pfrkt_root != NULL)
1810 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1811 pfr_setflags_ktable(kt->pfrkt_root,
1812 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1813 }
1814
1815 void
1816 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1817 {
1818 struct pfr_ktable *p, *q;
1819
1820 for (p = SLIST_FIRST(workq); p; p = q) {
1821 q = SLIST_NEXT(p, pfrkt_workq);
1822 pfr_setflags_ktable(p, p->pfrkt_nflags);
1823 }
1824 }
1825
1826 void
1827 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1828 {
1829 struct pfr_kentryworkq addrq;
1830
1831 if (!(newf & PFR_TFLAG_REFERENCED) &&
1832 !(newf & PFR_TFLAG_PERSIST))
1833 newf &= ~PFR_TFLAG_ACTIVE;
1834 if (!(newf & PFR_TFLAG_ACTIVE))
1835 newf &= ~PFR_TFLAG_USRMASK;
1836 if (!(newf & PFR_TFLAG_SETMASK)) {
1837 RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
1838 if (kt->pfrkt_root != NULL)
1839 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1840 pfr_setflags_ktable(kt->pfrkt_root,
1841 kt->pfrkt_root->pfrkt_flags &
1842 ~PFR_TFLAG_REFDANCHOR);
1843 pfr_destroy_ktable(kt, 1);
1844 pfr_ktable_cnt--;
1845 return;
1846 }
1847 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1848 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1849 pfr_remove_kentries(kt, &addrq);
1850 }
1851 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1852 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1853 kt->pfrkt_shadow = NULL;
1854 }
1855 kt->pfrkt_flags = newf;
1856 }
1857
1858 void
1859 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1860 {
1861 struct pfr_ktable *p;
1862
1863 SLIST_FOREACH(p, workq, pfrkt_workq)
1864 pfr_clstats_ktable(p, tzero, recurse);
1865 }
1866
1867 void
1868 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1869 {
1870 struct pfr_kentryworkq addrq;
1871 int s;
1872
1873 if (recurse) {
1874 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1875 pfr_clstats_kentries(&addrq, tzero, 0);
1876 }
1877 s = splsoftnet();
1878 bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
1879 bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
1880 kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1881 splx(s);
1882 kt->pfrkt_tzero = tzero;
1883 }
1884
1885 struct pfr_ktable *
1886 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1887 {
1888 struct pfr_ktable *kt;
1889 struct pf_ruleset *rs;
1890
1891 kt = pool_get(&pfr_ktable_pl, PR_NOWAIT);
1892 if (kt == NULL)
1893 return (NULL);
1894 bzero(kt, sizeof(*kt));
1895 kt->pfrkt_t = *tbl;
1896
1897 if (attachruleset) {
1898 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
1899 if (!rs) {
1900 pfr_destroy_ktable(kt, 0);
1901 return (NULL);
1902 }
1903 kt->pfrkt_rs = rs;
1904 rs->tables++;
1905 }
1906
1907 if (!rn_inithead((void **)&kt->pfrkt_ip4,
1908 offsetof(struct sockaddr_in, sin_addr) * 8) ||
1909 !rn_inithead((void **)&kt->pfrkt_ip6,
1910 offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1911 pfr_destroy_ktable(kt, 0);
1912 return (NULL);
1913 }
1914 kt->pfrkt_tzero = tzero;
1915
1916 return (kt);
1917 }
1918
1919 void
1920 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1921 {
1922 struct pfr_ktable *p, *q;
1923
1924 for (p = SLIST_FIRST(workq); p; p = q) {
1925 q = SLIST_NEXT(p, pfrkt_workq);
1926 pfr_destroy_ktable(p, flushaddr);
1927 }
1928 }
1929
1930 void
1931 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1932 {
1933 struct pfr_kentryworkq addrq;
1934
1935 if (flushaddr) {
1936 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1937 pfr_clean_node_mask(kt, &addrq);
1938 pfr_destroy_kentries(&addrq);
1939 }
1940 if (kt->pfrkt_ip4 != NULL)
1941 free((caddr_t)kt->pfrkt_ip4, M_RTABLE);
1942 if (kt->pfrkt_ip6 != NULL)
1943 free((caddr_t)kt->pfrkt_ip6, M_RTABLE);
1944 if (kt->pfrkt_shadow != NULL)
1945 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1946 if (kt->pfrkt_rs != NULL) {
1947 kt->pfrkt_rs->tables--;
1948 pf_remove_if_empty_ruleset(kt->pfrkt_rs);
1949 }
1950 pool_put(&pfr_ktable_pl, kt);
1951 }
1952
1953 int
1954 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
1955 {
1956 int d;
1957
1958 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
1959 return (d);
1960 return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
1961 }
1962
1963 struct pfr_ktable *
1964 pfr_lookup_table(struct pfr_table *tbl)
1965 {
1966 /* struct pfr_ktable start like a struct pfr_table */
1967 return (RB_FIND(pfr_ktablehead, &pfr_ktables,
1968 (struct pfr_ktable *)tbl));
1969 }
1970
1971 int
1972 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
1973 {
1974 struct pfr_kentry *ke = NULL;
1975 int match;
1976
1977 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1978 kt = kt->pfrkt_root;
1979 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1980 return (0);
1981
1982 switch (af) {
1983 #ifdef INET
1984 case AF_INET:
1985 pfr_sin.sin_addr.s_addr = a->addr32[0];
1986 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
1987 if (ke && KENTRY_RNF_ROOT(ke))
1988 ke = NULL;
1989 break;
1990 #endif /* INET */
1991 #ifdef INET6
1992 case AF_INET6:
1993 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
1994 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
1995 if (ke && KENTRY_RNF_ROOT(ke))
1996 ke = NULL;
1997 break;
1998 #endif /* INET6 */
1999 }
2000 match = (ke && !ke->pfrke_not);
2001 if (match)
2002 kt->pfrkt_match++;
2003 else
2004 kt->pfrkt_nomatch++;
2005 return (match);
2006 }
2007
2008 void
2009 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2010 u_int64_t len, int dir_out, int op_pass, int notrule)
2011 {
2012 struct pfr_kentry *ke = NULL;
2013
2014 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2015 kt = kt->pfrkt_root;
2016 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2017 return;
2018
2019 switch (af) {
2020 #ifdef INET
2021 case AF_INET:
2022 pfr_sin.sin_addr.s_addr = a->addr32[0];
2023 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
2024 if (ke && KENTRY_RNF_ROOT(ke))
2025 ke = NULL;
2026 break;
2027 #endif /* INET */
2028 #ifdef INET6
2029 case AF_INET6:
2030 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2031 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
2032 if (ke && KENTRY_RNF_ROOT(ke))
2033 ke = NULL;
2034 break;
2035 #endif /* INET6 */
2036 default:
2037 ;
2038 }
2039 if ((ke == NULL || ke->pfrke_not) != notrule) {
2040 if (op_pass != PFR_OP_PASS)
2041 printf("pfr_update_stats: assertion failed.\n");
2042 op_pass = PFR_OP_XPASS;
2043 }
2044 kt->pfrkt_packets[dir_out][op_pass]++;
2045 kt->pfrkt_bytes[dir_out][op_pass] += len;
2046 if (ke != NULL && op_pass != PFR_OP_XPASS) {
2047 ke->pfrke_packets[dir_out][op_pass]++;
2048 ke->pfrke_bytes[dir_out][op_pass] += len;
2049 }
2050 }
2051
2052 struct pfr_ktable *
2053 pfr_attach_table(struct pf_ruleset *rs, char *name)
2054 {
2055 struct pfr_ktable *kt, *rt;
2056 struct pfr_table tbl;
2057 struct pf_anchor *ac = rs->anchor;
2058
2059 bzero(&tbl, sizeof(tbl));
2060 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2061 if (ac != NULL)
2062 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2063 kt = pfr_lookup_table(&tbl);
2064 if (kt == NULL) {
2065 kt = pfr_create_ktable(&tbl, time_second, 1);
2066 if (kt == NULL)
2067 return (NULL);
2068 if (ac != NULL) {
2069 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2070 rt = pfr_lookup_table(&tbl);
2071 if (rt == NULL) {
2072 rt = pfr_create_ktable(&tbl, 0, 1);
2073 if (rt == NULL) {
2074 pfr_destroy_ktable(kt, 0);
2075 return (NULL);
2076 }
2077 pfr_insert_ktable(rt);
2078 }
2079 kt->pfrkt_root = rt;
2080 }
2081 pfr_insert_ktable(kt);
2082 }
2083 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2084 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2085 return (kt);
2086 }
2087
2088 void
2089 pfr_detach_table(struct pfr_ktable *kt)
2090 {
2091 if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
2092 printf("pfr_detach_table: refcount = %d.\n",
2093 kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
2094 else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2095 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2096 }
2097
2098 int
2099 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2100 struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af)
2101 {
2102 struct pfr_kentry *ke, *ke2;
2103 struct pf_addr *addr;
2104 union sockaddr_union mask;
2105 int idx = -1, use_counter = 0;
2106
2107 if (af == AF_INET)
2108 addr = (struct pf_addr *)&pfr_sin.sin_addr;
2109 else if (af == AF_INET6)
2110 addr = (struct pf_addr *)&pfr_sin6.sin6_addr;
2111 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2112 kt = kt->pfrkt_root;
2113 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2114 return (-1);
2115
2116 if (pidx != NULL)
2117 idx = *pidx;
2118 if (counter != NULL && idx >= 0)
2119 use_counter = 1;
2120 if (idx < 0)
2121 idx = 0;
2122
2123 _next_block:
2124 ke = pfr_kentry_byidx(kt, idx, af);
2125 if (ke == NULL)
2126 return (1);
2127 pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
2128 *raddr = SUNION2PF(&ke->pfrke_sa, af);
2129 *rmask = SUNION2PF(&pfr_mask, af);
2130
2131 if (use_counter) {
2132 /* is supplied address within block? */
2133 if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
2134 /* no, go to next block in table */
2135 idx++;
2136 use_counter = 0;
2137 goto _next_block;
2138 }
2139 PF_ACPY(addr, counter, af);
2140 } else {
2141 /* use first address of block */
2142 PF_ACPY(addr, *raddr, af);
2143 }
2144
2145 if (!KENTRY_NETWORK(ke)) {
2146 /* this is a single IP address - no possible nested block */
2147 PF_ACPY(counter, addr, af);
2148 *pidx = idx;
2149 return (0);
2150 }
2151 for (;;) {
2152 /* we don't want to use a nested block */
2153 if (af == AF_INET)
2154 ke2 = (struct pfr_kentry *)rn_match(&pfr_sin,
2155 kt->pfrkt_ip4);
2156 else if (af == AF_INET6)
2157 ke2 = (struct pfr_kentry *)rn_match(&pfr_sin6,
2158 kt->pfrkt_ip6);
2159 /* no need to check KENTRY_RNF_ROOT() here */
2160 if (ke2 == ke) {
2161 /* lookup return the same block - perfect */
2162 PF_ACPY(counter, addr, af);
2163 *pidx = idx;
2164 return (0);
2165 }
2166
2167 /* we need to increase the counter past the nested block */
2168 pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
2169 PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
2170 PF_AINC(addr, af);
2171 if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
2172 /* ok, we reached the end of our main block */
2173 /* go to next block in table */
2174 idx++;
2175 use_counter = 0;
2176 goto _next_block;
2177 }
2178 }
2179 }
2180
2181 struct pfr_kentry *
2182 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2183 {
2184 struct pfr_walktree w;
2185
2186 bzero(&w, sizeof(w));
2187 w.pfrw_op = PFRW_POOL_GET;
2188 w.pfrw_cnt = idx;
2189
2190 switch (af) {
2191 #ifdef INET
2192 case AF_INET:
2193 rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2194 return (w.pfrw_kentry);
2195 #endif /* INET */
2196 #ifdef INET6
2197 case AF_INET6:
2198 rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2199 return (w.pfrw_kentry);
2200 #endif /* INET6 */
2201 default:
2202 return (NULL);
2203 }
2204 }
2205
2206 void
2207 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2208 {
2209 struct pfr_walktree w;
2210 int s;
2211
2212 bzero(&w, sizeof(w));
2213 w.pfrw_op = PFRW_DYNADDR_UPDATE;
2214 w.pfrw_dyn = dyn;
2215
2216 s = splsoftnet();
2217 dyn->pfid_acnt4 = 0;
2218 dyn->pfid_acnt6 = 0;
2219 if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2220 rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2221 if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2222 rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2223 splx(s);
2224 }
2225