pf_table.c revision 1.2 1 /* $NetBSD: pf_table.c,v 1.2 2004/06/22 14:17:08 itojun Exp $ */
2 /* $OpenBSD: pf_table.c,v 1.47 2004/03/09 21:44:41 mcbride Exp $ */
3
4 /*
5 * Copyright (c) 2002 Cedric Berger
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * - Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * - Redistributions in binary form must reproduce the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer in the documentation and/or other materials provided
17 * with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
34 #ifdef _KERNEL_OPT
35 #include "opt_inet.h"
36 #endif
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/socket.h>
41 #include <sys/mbuf.h>
42 #include <sys/kernel.h>
43
44 #include <net/if.h>
45 #include <net/route.h>
46 #include <netinet/in.h>
47 #ifdef __OpenBSD__
48 #include <netinet/ip_ipsp.h>
49 #endif
50 #include <net/pfvar.h>
51
52 #define ACCEPT_FLAGS(oklist) \
53 do { \
54 if ((flags & ~(oklist)) & \
55 PFR_FLAG_ALLMASK) \
56 return (EINVAL); \
57 } while (0)
58
59 #define COPYIN(from, to, size) \
60 ((flags & PFR_FLAG_USERIOCTL) ? \
61 copyin((from), (to), (size)) : \
62 (bcopy((from), (to), (size)), 0))
63
64 #define COPYOUT(from, to, size) \
65 ((flags & PFR_FLAG_USERIOCTL) ? \
66 copyout((from), (to), (size)) : \
67 (bcopy((from), (to), (size)), 0))
68
69 #define FILLIN_SIN(sin, addr) \
70 do { \
71 (sin).sin_len = sizeof(sin); \
72 (sin).sin_family = AF_INET; \
73 (sin).sin_addr = (addr); \
74 } while (0)
75
76 #define FILLIN_SIN6(sin6, addr) \
77 do { \
78 (sin6).sin6_len = sizeof(sin6); \
79 (sin6).sin6_family = AF_INET6; \
80 (sin6).sin6_addr = (addr); \
81 } while (0)
82
83 #define SWAP(type, a1, a2) \
84 do { \
85 type tmp = a1; \
86 a1 = a2; \
87 a2 = tmp; \
88 } while (0)
89
90 #define SUNION2PF(su, af) (((af)==AF_INET) ? \
91 (struct pf_addr *)&(su)->sin.sin_addr : \
92 (struct pf_addr *)&(su)->sin6.sin6_addr)
93
94 #define AF_BITS(af) (((af)==AF_INET)?32:128)
95 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
96 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
97 #define KENTRY_RNF_ROOT(ke) \
98 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
99
100 #define NO_ADDRESSES (-1)
101 #define ENQUEUE_UNMARKED_ONLY (1)
102 #define INVERT_NEG_FLAG (1)
103
104 struct pfr_walktree {
105 enum pfrw_op {
106 PFRW_MARK,
107 PFRW_SWEEP,
108 PFRW_ENQUEUE,
109 PFRW_GET_ADDRS,
110 PFRW_GET_ASTATS,
111 PFRW_POOL_GET,
112 PFRW_DYNADDR_UPDATE
113 } pfrw_op;
114 union {
115 struct pfr_addr *pfrw1_addr;
116 struct pfr_astats *pfrw1_astats;
117 struct pfr_kentryworkq *pfrw1_workq;
118 struct pfr_kentry *pfrw1_kentry;
119 struct pfi_dynaddr *pfrw1_dyn;
120 } pfrw_1;
121 int pfrw_free;
122 int pfrw_flags;
123 };
124 #define pfrw_addr pfrw_1.pfrw1_addr
125 #define pfrw_astats pfrw_1.pfrw1_astats
126 #define pfrw_workq pfrw_1.pfrw1_workq
127 #define pfrw_kentry pfrw_1.pfrw1_kentry
128 #define pfrw_dyn pfrw_1.pfrw1_dyn
129 #define pfrw_cnt pfrw_free
130
131 #define senderr(e) do { rv = (e); goto _bad; } while (0)
132
133 struct pool pfr_ktable_pl;
134 struct pool pfr_kentry_pl;
135 struct sockaddr_in pfr_sin;
136 struct sockaddr_in6 pfr_sin6;
137 union sockaddr_union pfr_mask;
138 struct pf_addr pfr_ffaddr;
139
140 void pfr_copyout_addr(struct pfr_addr *,
141 struct pfr_kentry *ke);
142 int pfr_validate_addr(struct pfr_addr *);
143 void pfr_enqueue_addrs(struct pfr_ktable *,
144 struct pfr_kentryworkq *, int *, int);
145 void pfr_mark_addrs(struct pfr_ktable *);
146 struct pfr_kentry *pfr_lookup_addr(struct pfr_ktable *,
147 struct pfr_addr *, int);
148 struct pfr_kentry *pfr_create_kentry(struct pfr_addr *);
149 void pfr_destroy_kentries(struct pfr_kentryworkq *);
150 void pfr_destroy_kentry(struct pfr_kentry *);
151 void pfr_insert_kentries(struct pfr_ktable *,
152 struct pfr_kentryworkq *, long);
153 void pfr_remove_kentries(struct pfr_ktable *,
154 struct pfr_kentryworkq *);
155 void pfr_clstats_kentries(struct pfr_kentryworkq *, long,
156 int);
157 void pfr_reset_feedback(struct pfr_addr *, int, int);
158 void pfr_prepare_network(union sockaddr_union *, int, int);
159 int pfr_route_kentry(struct pfr_ktable *,
160 struct pfr_kentry *);
161 int pfr_unroute_kentry(struct pfr_ktable *,
162 struct pfr_kentry *);
163 int pfr_walktree(struct radix_node *, void *);
164 int pfr_validate_table(struct pfr_table *, int, int);
165 void pfr_commit_ktable(struct pfr_ktable *, long);
166 void pfr_insert_ktables(struct pfr_ktableworkq *);
167 void pfr_insert_ktable(struct pfr_ktable *);
168 void pfr_setflags_ktables(struct pfr_ktableworkq *);
169 void pfr_setflags_ktable(struct pfr_ktable *, int);
170 void pfr_clstats_ktables(struct pfr_ktableworkq *, long,
171 int);
172 void pfr_clstats_ktable(struct pfr_ktable *, long, int);
173 struct pfr_ktable *pfr_create_ktable(struct pfr_table *, long, int);
174 void pfr_destroy_ktables(struct pfr_ktableworkq *, int);
175 void pfr_destroy_ktable(struct pfr_ktable *, int);
176 int pfr_ktable_compare(struct pfr_ktable *,
177 struct pfr_ktable *);
178 struct pfr_ktable *pfr_lookup_table(struct pfr_table *);
179 void pfr_clean_node_mask(struct pfr_ktable *,
180 struct pfr_kentryworkq *);
181 int pfr_table_count(struct pfr_table *, int);
182 int pfr_skip_table(struct pfr_table *,
183 struct pfr_ktable *, int);
184 struct pfr_kentry *pfr_kentry_byidx(struct pfr_ktable *, int, int);
185
186 RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
187 RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
188
189 struct pfr_ktablehead pfr_ktables;
190 struct pfr_table pfr_nulltable;
191 int pfr_ktable_cnt;
192
193 #ifdef __NetBSD__
194 POOL_INIT(pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0, "pfrktable", NULL);
195 POOL_INIT(pfr_kentry_pl, sizeof(struct pfr_kentry), 0, 0, 0, "pfrkentry", NULL);
196 #endif
197
198 void
199 pfr_initialize(void)
200 {
201 #ifdef __OpenBSD__
202 pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0,
203 "pfrktable", NULL);
204 pool_init(&pfr_kentry_pl, sizeof(struct pfr_kentry), 0, 0, 0,
205 "pfrkentry", NULL);
206 #endif
207
208 pfr_sin.sin_len = sizeof(pfr_sin);
209 pfr_sin.sin_family = AF_INET;
210 pfr_sin6.sin6_len = sizeof(pfr_sin6);
211 pfr_sin6.sin6_family = AF_INET6;
212
213 memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr));
214 }
215
216 int
217 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
218 {
219 struct pfr_ktable *kt;
220 struct pfr_kentryworkq workq;
221 int s = 0;
222
223 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
224 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
225 return (EINVAL);
226 kt = pfr_lookup_table(tbl);
227 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
228 return (ESRCH);
229 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
230 return (EPERM);
231 pfr_enqueue_addrs(kt, &workq, ndel, 0);
232
233 if (!(flags & PFR_FLAG_DUMMY)) {
234 if (flags & PFR_FLAG_ATOMIC)
235 s = splsoftnet();
236 pfr_remove_kentries(kt, &workq);
237 if (flags & PFR_FLAG_ATOMIC)
238 splx(s);
239 if (kt->pfrkt_cnt) {
240 printf("pfr_clr_addrs: corruption detected (%d).\n",
241 kt->pfrkt_cnt);
242 kt->pfrkt_cnt = 0;
243 }
244 }
245 return (0);
246 }
247
248 int
249 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
250 int *nadd, int flags)
251 {
252 struct pfr_ktable *kt, *tmpkt;
253 struct pfr_kentryworkq workq;
254 struct pfr_kentry *p, *q;
255 struct pfr_addr ad;
256 int i, rv, s = 0, xadd = 0;
257 long tzero = time.tv_sec;
258
259 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
260 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
261 return (EINVAL);
262 kt = pfr_lookup_table(tbl);
263 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
264 return (ESRCH);
265 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
266 return (EPERM);
267 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
268 if (tmpkt == NULL)
269 return (ENOMEM);
270 SLIST_INIT(&workq);
271 for (i = 0; i < size; i++) {
272 if (COPYIN(addr+i, &ad, sizeof(ad)))
273 senderr(EFAULT);
274 if (pfr_validate_addr(&ad))
275 senderr(EINVAL);
276 p = pfr_lookup_addr(kt, &ad, 1);
277 q = pfr_lookup_addr(tmpkt, &ad, 1);
278 if (flags & PFR_FLAG_FEEDBACK) {
279 if (q != NULL)
280 ad.pfra_fback = PFR_FB_DUPLICATE;
281 else if (p == NULL)
282 ad.pfra_fback = PFR_FB_ADDED;
283 else if (p->pfrke_not != ad.pfra_not)
284 ad.pfra_fback = PFR_FB_CONFLICT;
285 else
286 ad.pfra_fback = PFR_FB_NONE;
287 }
288 if (p == NULL && q == NULL) {
289 p = pfr_create_kentry(&ad);
290 if (p == NULL)
291 senderr(ENOMEM);
292 if (pfr_route_kentry(tmpkt, p)) {
293 pfr_destroy_kentry(p);
294 ad.pfra_fback = PFR_FB_NONE;
295 } else {
296 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
297 xadd++;
298 }
299 }
300 if (flags & PFR_FLAG_FEEDBACK)
301 if (COPYOUT(&ad, addr+i, sizeof(ad)))
302 senderr(EFAULT);
303 }
304 pfr_clean_node_mask(tmpkt, &workq);
305 if (!(flags & PFR_FLAG_DUMMY)) {
306 if (flags & PFR_FLAG_ATOMIC)
307 s = splsoftnet();
308 pfr_insert_kentries(kt, &workq, tzero);
309 if (flags & PFR_FLAG_ATOMIC)
310 splx(s);
311 } else
312 pfr_destroy_kentries(&workq);
313 if (nadd != NULL)
314 *nadd = xadd;
315 pfr_destroy_ktable(tmpkt, 0);
316 return (0);
317 _bad:
318 pfr_clean_node_mask(tmpkt, &workq);
319 pfr_destroy_kentries(&workq);
320 if (flags & PFR_FLAG_FEEDBACK)
321 pfr_reset_feedback(addr, size, flags);
322 pfr_destroy_ktable(tmpkt, 0);
323 return (rv);
324 }
325
326 int
327 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
328 int *ndel, int flags)
329 {
330 struct pfr_ktable *kt;
331 struct pfr_kentryworkq workq;
332 struct pfr_kentry *p;
333 struct pfr_addr ad;
334 int i, rv, s = 0, xdel = 0;
335
336 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
337 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
338 return (EINVAL);
339 kt = pfr_lookup_table(tbl);
340 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
341 return (ESRCH);
342 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
343 return (EPERM);
344 pfr_mark_addrs(kt);
345 SLIST_INIT(&workq);
346 for (i = 0; i < size; i++) {
347 if (COPYIN(addr+i, &ad, sizeof(ad)))
348 senderr(EFAULT);
349 if (pfr_validate_addr(&ad))
350 senderr(EINVAL);
351 p = pfr_lookup_addr(kt, &ad, 1);
352 if (flags & PFR_FLAG_FEEDBACK) {
353 if (p == NULL)
354 ad.pfra_fback = PFR_FB_NONE;
355 else if (p->pfrke_not != ad.pfra_not)
356 ad.pfra_fback = PFR_FB_CONFLICT;
357 else if (p->pfrke_mark)
358 ad.pfra_fback = PFR_FB_DUPLICATE;
359 else
360 ad.pfra_fback = PFR_FB_DELETED;
361 }
362 if (p != NULL && p->pfrke_not == ad.pfra_not &&
363 !p->pfrke_mark) {
364 p->pfrke_mark = 1;
365 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
366 xdel++;
367 }
368 if (flags & PFR_FLAG_FEEDBACK)
369 if (COPYOUT(&ad, addr+i, sizeof(ad)))
370 senderr(EFAULT);
371 }
372 if (!(flags & PFR_FLAG_DUMMY)) {
373 if (flags & PFR_FLAG_ATOMIC)
374 s = splsoftnet();
375 pfr_remove_kentries(kt, &workq);
376 if (flags & PFR_FLAG_ATOMIC)
377 splx(s);
378 }
379 if (ndel != NULL)
380 *ndel = xdel;
381 return (0);
382 _bad:
383 if (flags & PFR_FLAG_FEEDBACK)
384 pfr_reset_feedback(addr, size, flags);
385 return (rv);
386 }
387
388 int
389 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
390 int *size2, int *nadd, int *ndel, int *nchange, int flags)
391 {
392 struct pfr_ktable *kt, *tmpkt;
393 struct pfr_kentryworkq addq, delq, changeq;
394 struct pfr_kentry *p, *q;
395 struct pfr_addr ad;
396 int i, rv, s = 0, xadd = 0, xdel = 0, xchange = 0;
397 long tzero = time.tv_sec;
398
399 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
400 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
401 return (EINVAL);
402 kt = pfr_lookup_table(tbl);
403 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
404 return (ESRCH);
405 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
406 return (EPERM);
407 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
408 if (tmpkt == NULL)
409 return (ENOMEM);
410 pfr_mark_addrs(kt);
411 SLIST_INIT(&addq);
412 SLIST_INIT(&delq);
413 SLIST_INIT(&changeq);
414 for (i = 0; i < size; i++) {
415 if (COPYIN(addr+i, &ad, sizeof(ad)))
416 senderr(EFAULT);
417 if (pfr_validate_addr(&ad))
418 senderr(EINVAL);
419 ad.pfra_fback = PFR_FB_NONE;
420 p = pfr_lookup_addr(kt, &ad, 1);
421 if (p != NULL) {
422 if (p->pfrke_mark) {
423 ad.pfra_fback = PFR_FB_DUPLICATE;
424 goto _skip;
425 }
426 p->pfrke_mark = 1;
427 if (p->pfrke_not != ad.pfra_not) {
428 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
429 ad.pfra_fback = PFR_FB_CHANGED;
430 xchange++;
431 }
432 } else {
433 q = pfr_lookup_addr(tmpkt, &ad, 1);
434 if (q != NULL) {
435 ad.pfra_fback = PFR_FB_DUPLICATE;
436 goto _skip;
437 }
438 p = pfr_create_kentry(&ad);
439 if (p == NULL)
440 senderr(ENOMEM);
441 if (pfr_route_kentry(tmpkt, p)) {
442 pfr_destroy_kentry(p);
443 ad.pfra_fback = PFR_FB_NONE;
444 } else {
445 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
446 ad.pfra_fback = PFR_FB_ADDED;
447 xadd++;
448 }
449 }
450 _skip:
451 if (flags & PFR_FLAG_FEEDBACK)
452 if (COPYOUT(&ad, addr+i, sizeof(ad)))
453 senderr(EFAULT);
454 }
455 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
456 if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
457 if (*size2 < size+xdel) {
458 *size2 = size+xdel;
459 senderr(0);
460 }
461 i = 0;
462 SLIST_FOREACH(p, &delq, pfrke_workq) {
463 pfr_copyout_addr(&ad, p);
464 ad.pfra_fback = PFR_FB_DELETED;
465 if (COPYOUT(&ad, addr+size+i, sizeof(ad)))
466 senderr(EFAULT);
467 i++;
468 }
469 }
470 pfr_clean_node_mask(tmpkt, &addq);
471 if (!(flags & PFR_FLAG_DUMMY)) {
472 if (flags & PFR_FLAG_ATOMIC)
473 s = splsoftnet();
474 pfr_insert_kentries(kt, &addq, tzero);
475 pfr_remove_kentries(kt, &delq);
476 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
477 if (flags & PFR_FLAG_ATOMIC)
478 splx(s);
479 } else
480 pfr_destroy_kentries(&addq);
481 if (nadd != NULL)
482 *nadd = xadd;
483 if (ndel != NULL)
484 *ndel = xdel;
485 if (nchange != NULL)
486 *nchange = xchange;
487 if ((flags & PFR_FLAG_FEEDBACK) && size2)
488 *size2 = size+xdel;
489 pfr_destroy_ktable(tmpkt, 0);
490 return (0);
491 _bad:
492 pfr_clean_node_mask(tmpkt, &addq);
493 pfr_destroy_kentries(&addq);
494 if (flags & PFR_FLAG_FEEDBACK)
495 pfr_reset_feedback(addr, size, flags);
496 pfr_destroy_ktable(tmpkt, 0);
497 return (rv);
498 }
499
500 int
501 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
502 int *nmatch, int flags)
503 {
504 struct pfr_ktable *kt;
505 struct pfr_kentry *p;
506 struct pfr_addr ad;
507 int i, xmatch = 0;
508
509 ACCEPT_FLAGS(PFR_FLAG_REPLACE);
510 if (pfr_validate_table(tbl, 0, 0))
511 return (EINVAL);
512 kt = pfr_lookup_table(tbl);
513 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
514 return (ESRCH);
515
516 for (i = 0; i < size; i++) {
517 if (COPYIN(addr+i, &ad, sizeof(ad)))
518 return (EFAULT);
519 if (pfr_validate_addr(&ad))
520 return (EINVAL);
521 if (ADDR_NETWORK(&ad))
522 return (EINVAL);
523 p = pfr_lookup_addr(kt, &ad, 0);
524 if (flags & PFR_FLAG_REPLACE)
525 pfr_copyout_addr(&ad, p);
526 ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
527 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
528 if (p != NULL && !p->pfrke_not)
529 xmatch++;
530 if (COPYOUT(&ad, addr+i, sizeof(ad)))
531 return (EFAULT);
532 }
533 if (nmatch != NULL)
534 *nmatch = xmatch;
535 return (0);
536 }
537
538 int
539 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
540 int flags)
541 {
542 struct pfr_ktable *kt;
543 struct pfr_walktree w;
544 int rv;
545
546 ACCEPT_FLAGS(0);
547 if (pfr_validate_table(tbl, 0, 0))
548 return (EINVAL);
549 kt = pfr_lookup_table(tbl);
550 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
551 return (ESRCH);
552 if (kt->pfrkt_cnt > *size) {
553 *size = kt->pfrkt_cnt;
554 return (0);
555 }
556
557 bzero(&w, sizeof(w));
558 w.pfrw_op = PFRW_GET_ADDRS;
559 w.pfrw_addr = addr;
560 w.pfrw_free = kt->pfrkt_cnt;
561 w.pfrw_flags = flags;
562 rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
563 if (!rv)
564 rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
565 if (rv)
566 return (rv);
567
568 if (w.pfrw_free) {
569 printf("pfr_get_addrs: corruption detected (%d).\n",
570 w.pfrw_free);
571 return (ENOTTY);
572 }
573 *size = kt->pfrkt_cnt;
574 return (0);
575 }
576
577 int
578 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
579 int flags)
580 {
581 struct pfr_ktable *kt;
582 struct pfr_walktree w;
583 struct pfr_kentryworkq workq;
584 int rv, s = 0;
585 long tzero = time.tv_sec;
586
587 ACCEPT_FLAGS(PFR_FLAG_ATOMIC); /* XXX PFR_FLAG_CLSTATS disabled */
588 if (pfr_validate_table(tbl, 0, 0))
589 return (EINVAL);
590 kt = pfr_lookup_table(tbl);
591 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
592 return (ESRCH);
593 if (kt->pfrkt_cnt > *size) {
594 *size = kt->pfrkt_cnt;
595 return (0);
596 }
597
598 bzero(&w, sizeof(w));
599 w.pfrw_op = PFRW_GET_ASTATS;
600 w.pfrw_astats = addr;
601 w.pfrw_free = kt->pfrkt_cnt;
602 w.pfrw_flags = flags;
603 if (flags & PFR_FLAG_ATOMIC)
604 s = splsoftnet();
605 rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
606 if (!rv)
607 rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
608 if (!rv && (flags & PFR_FLAG_CLSTATS)) {
609 pfr_enqueue_addrs(kt, &workq, NULL, 0);
610 pfr_clstats_kentries(&workq, tzero, 0);
611 }
612 if (flags & PFR_FLAG_ATOMIC)
613 splx(s);
614 if (rv)
615 return (rv);
616
617 if (w.pfrw_free) {
618 printf("pfr_get_astats: corruption detected (%d).\n",
619 w.pfrw_free);
620 return (ENOTTY);
621 }
622 *size = kt->pfrkt_cnt;
623 return (0);
624 }
625
626 int
627 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
628 int *nzero, int flags)
629 {
630 struct pfr_ktable *kt;
631 struct pfr_kentryworkq workq;
632 struct pfr_kentry *p;
633 struct pfr_addr ad;
634 int i, rv, s = 0, xzero = 0;
635
636 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
637 if (pfr_validate_table(tbl, 0, 0))
638 return (EINVAL);
639 kt = pfr_lookup_table(tbl);
640 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
641 return (ESRCH);
642 SLIST_INIT(&workq);
643 for (i = 0; i < size; i++) {
644 if (COPYIN(addr+i, &ad, sizeof(ad)))
645 senderr(EFAULT);
646 if (pfr_validate_addr(&ad))
647 senderr(EINVAL);
648 p = pfr_lookup_addr(kt, &ad, 1);
649 if (flags & PFR_FLAG_FEEDBACK) {
650 ad.pfra_fback = (p != NULL) ?
651 PFR_FB_CLEARED : PFR_FB_NONE;
652 if (COPYOUT(&ad, addr+i, sizeof(ad)))
653 senderr(EFAULT);
654 }
655 if (p != NULL) {
656 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
657 xzero++;
658 }
659 }
660
661 if (!(flags & PFR_FLAG_DUMMY)) {
662 if (flags & PFR_FLAG_ATOMIC)
663 s = splsoftnet();
664 pfr_clstats_kentries(&workq, 0, 0);
665 if (flags & PFR_FLAG_ATOMIC)
666 splx(s);
667 }
668 if (nzero != NULL)
669 *nzero = xzero;
670 return (0);
671 _bad:
672 if (flags & PFR_FLAG_FEEDBACK)
673 pfr_reset_feedback(addr, size, flags);
674 return (rv);
675 }
676
677 int
678 pfr_validate_addr(struct pfr_addr *ad)
679 {
680 int i;
681
682 switch (ad->pfra_af) {
683 case AF_INET:
684 if (ad->pfra_net > 32)
685 return (-1);
686 break;
687 case AF_INET6:
688 if (ad->pfra_net > 128)
689 return (-1);
690 break;
691 default:
692 return (-1);
693 }
694 if (ad->pfra_net < 128 &&
695 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
696 return (-1);
697 for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
698 if (((caddr_t)ad)[i])
699 return (-1);
700 if (ad->pfra_not && ad->pfra_not != 1)
701 return (-1);
702 if (ad->pfra_fback)
703 return (-1);
704 return (0);
705 }
706
707 void
708 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
709 int *naddr, int sweep)
710 {
711 struct pfr_walktree w;
712
713 SLIST_INIT(workq);
714 bzero(&w, sizeof(w));
715 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
716 w.pfrw_workq = workq;
717 if (kt->pfrkt_ip4 != NULL)
718 if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
719 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
720 if (kt->pfrkt_ip6 != NULL)
721 if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
722 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
723 if (naddr != NULL)
724 *naddr = w.pfrw_cnt;
725 }
726
727 void
728 pfr_mark_addrs(struct pfr_ktable *kt)
729 {
730 struct pfr_walktree w;
731
732 bzero(&w, sizeof(w));
733 w.pfrw_op = PFRW_MARK;
734 if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
735 printf("pfr_mark_addrs: IPv4 walktree failed.\n");
736 if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
737 printf("pfr_mark_addrs: IPv6 walktree failed.\n");
738 }
739
740
741 struct pfr_kentry *
742 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
743 {
744 union sockaddr_union sa, mask;
745 struct radix_node_head *head;
746 struct pfr_kentry *ke;
747 int s;
748
749 bzero(&sa, sizeof(sa));
750 if (ad->pfra_af == AF_INET) {
751 FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
752 head = kt->pfrkt_ip4;
753 } else {
754 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
755 head = kt->pfrkt_ip6;
756 }
757 if (ADDR_NETWORK(ad)) {
758 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
759 s = splsoftnet(); /* rn_lookup makes use of globals */
760 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
761 splx(s);
762 if (ke && KENTRY_RNF_ROOT(ke))
763 ke = NULL;
764 } else {
765 ke = (struct pfr_kentry *)rn_match(&sa, head);
766 if (ke && KENTRY_RNF_ROOT(ke))
767 ke = NULL;
768 if (exact && ke && KENTRY_NETWORK(ke))
769 ke = NULL;
770 }
771 return (ke);
772 }
773
774 struct pfr_kentry *
775 pfr_create_kentry(struct pfr_addr *ad)
776 {
777 struct pfr_kentry *ke;
778
779 ke = pool_get(&pfr_kentry_pl, PR_NOWAIT);
780 if (ke == NULL)
781 return (NULL);
782 bzero(ke, sizeof(*ke));
783
784 if (ad->pfra_af == AF_INET)
785 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
786 else
787 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
788 ke->pfrke_af = ad->pfra_af;
789 ke->pfrke_net = ad->pfra_net;
790 ke->pfrke_not = ad->pfra_not;
791 return (ke);
792 }
793
794 void
795 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
796 {
797 struct pfr_kentry *p, *q;
798
799 for (p = SLIST_FIRST(workq); p != NULL; p = q) {
800 q = SLIST_NEXT(p, pfrke_workq);
801 pfr_destroy_kentry(p);
802 }
803 }
804
805 void
806 pfr_destroy_kentry(struct pfr_kentry *ke)
807 {
808 pool_put(&pfr_kentry_pl, ke);
809 }
810
811 void
812 pfr_insert_kentries(struct pfr_ktable *kt,
813 struct pfr_kentryworkq *workq, long tzero)
814 {
815 struct pfr_kentry *p;
816 int rv, n = 0;
817
818 SLIST_FOREACH(p, workq, pfrke_workq) {
819 rv = pfr_route_kentry(kt, p);
820 if (rv) {
821 printf("pfr_insert_kentries: cannot route entry "
822 "(code=%d).\n", rv);
823 break;
824 }
825 p->pfrke_tzero = tzero;
826 n++;
827 }
828 kt->pfrkt_cnt += n;
829 }
830
831 void
832 pfr_remove_kentries(struct pfr_ktable *kt,
833 struct pfr_kentryworkq *workq)
834 {
835 struct pfr_kentry *p;
836 int n = 0;
837
838 SLIST_FOREACH(p, workq, pfrke_workq) {
839 pfr_unroute_kentry(kt, p);
840 n++;
841 }
842 kt->pfrkt_cnt -= n;
843 pfr_destroy_kentries(workq);
844 }
845
846 void
847 pfr_clean_node_mask(struct pfr_ktable *kt,
848 struct pfr_kentryworkq *workq)
849 {
850 struct pfr_kentry *p;
851
852 SLIST_FOREACH(p, workq, pfrke_workq)
853 pfr_unroute_kentry(kt, p);
854 }
855
856 void
857 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
858 {
859 struct pfr_kentry *p;
860 int s;
861
862 SLIST_FOREACH(p, workq, pfrke_workq) {
863 s = splsoftnet();
864 if (negchange)
865 p->pfrke_not = !p->pfrke_not;
866 bzero(p->pfrke_packets, sizeof(p->pfrke_packets));
867 bzero(p->pfrke_bytes, sizeof(p->pfrke_bytes));
868 splx(s);
869 p->pfrke_tzero = tzero;
870 }
871 }
872
873 void
874 pfr_reset_feedback(struct pfr_addr *addr, int size, int flags)
875 {
876 struct pfr_addr ad;
877 int i;
878
879 for (i = 0; i < size; i++) {
880 if (COPYIN(addr+i, &ad, sizeof(ad)))
881 break;
882 ad.pfra_fback = PFR_FB_NONE;
883 if (COPYOUT(&ad, addr+i, sizeof(ad)))
884 break;
885 }
886 }
887
888 void
889 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
890 {
891 int i;
892
893 bzero(sa, sizeof(*sa));
894 if (af == AF_INET) {
895 sa->sin.sin_len = sizeof(sa->sin);
896 sa->sin.sin_family = AF_INET;
897 sa->sin.sin_addr.s_addr = htonl(-1 << (32-net));
898 } else {
899 sa->sin6.sin6_len = sizeof(sa->sin6);
900 sa->sin6.sin6_family = AF_INET6;
901 for (i = 0; i < 4; i++) {
902 if (net <= 32) {
903 sa->sin6.sin6_addr.s6_addr32[i] =
904 htonl(-1 << (32-net));
905 break;
906 }
907 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
908 net -= 32;
909 }
910 }
911 }
912
913 int
914 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
915 {
916 union sockaddr_union mask;
917 struct radix_node *rn;
918 struct radix_node_head *head;
919 int s;
920
921 bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
922 if (ke->pfrke_af == AF_INET)
923 head = kt->pfrkt_ip4;
924 else
925 head = kt->pfrkt_ip6;
926
927 s = splsoftnet();
928 if (KENTRY_NETWORK(ke)) {
929 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
930 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
931 } else
932 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
933 splx(s);
934
935 return (rn == NULL ? -1 : 0);
936 }
937
938 int
939 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
940 {
941 union sockaddr_union mask;
942 struct radix_node *rn;
943 struct radix_node_head *head;
944 int s;
945
946 if (ke->pfrke_af == AF_INET)
947 head = kt->pfrkt_ip4;
948 else
949 head = kt->pfrkt_ip6;
950
951 s = splsoftnet();
952 if (KENTRY_NETWORK(ke)) {
953 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
954 rn = rn_delete(&ke->pfrke_sa, &mask, head);
955 } else
956 rn = rn_delete(&ke->pfrke_sa, NULL, head);
957 splx(s);
958
959 if (rn == NULL) {
960 printf("pfr_unroute_kentry: delete failed.\n");
961 return (-1);
962 }
963 return (0);
964 }
965
966 void
967 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
968 {
969 bzero(ad, sizeof(*ad));
970 if (ke == NULL)
971 return;
972 ad->pfra_af = ke->pfrke_af;
973 ad->pfra_net = ke->pfrke_net;
974 ad->pfra_not = ke->pfrke_not;
975 if (ad->pfra_af == AF_INET)
976 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
977 else
978 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
979 }
980
981 int
982 pfr_walktree(struct radix_node *rn, void *arg)
983 {
984 struct pfr_kentry *ke = (struct pfr_kentry *)rn;
985 struct pfr_walktree *w = arg;
986 int s, flags = w->pfrw_flags;
987
988 switch (w->pfrw_op) {
989 case PFRW_MARK:
990 ke->pfrke_mark = 0;
991 break;
992 case PFRW_SWEEP:
993 if (ke->pfrke_mark)
994 break;
995 /* FALLTHROUGH */
996 case PFRW_ENQUEUE:
997 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
998 w->pfrw_cnt++;
999 break;
1000 case PFRW_GET_ADDRS:
1001 if (w->pfrw_free-- > 0) {
1002 struct pfr_addr ad;
1003
1004 pfr_copyout_addr(&ad, ke);
1005 if (copyout(&ad, w->pfrw_addr, sizeof(ad)))
1006 return (EFAULT);
1007 w->pfrw_addr++;
1008 }
1009 break;
1010 case PFRW_GET_ASTATS:
1011 if (w->pfrw_free-- > 0) {
1012 struct pfr_astats as;
1013
1014 pfr_copyout_addr(&as.pfras_a, ke);
1015
1016 s = splsoftnet();
1017 bcopy(ke->pfrke_packets, as.pfras_packets,
1018 sizeof(as.pfras_packets));
1019 bcopy(ke->pfrke_bytes, as.pfras_bytes,
1020 sizeof(as.pfras_bytes));
1021 splx(s);
1022 as.pfras_tzero = ke->pfrke_tzero;
1023
1024 if (COPYOUT(&as, w->pfrw_astats, sizeof(as)))
1025 return (EFAULT);
1026 w->pfrw_astats++;
1027 }
1028 break;
1029 case PFRW_POOL_GET:
1030 if (ke->pfrke_not)
1031 break; /* negative entries are ignored */
1032 if (!w->pfrw_cnt--) {
1033 w->pfrw_kentry = ke;
1034 return (1); /* finish search */
1035 }
1036 break;
1037 case PFRW_DYNADDR_UPDATE:
1038 if (ke->pfrke_af == AF_INET) {
1039 if (w->pfrw_dyn->pfid_acnt4++ > 0)
1040 break;
1041 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1042 w->pfrw_dyn->pfid_addr4 = *SUNION2PF(
1043 &ke->pfrke_sa, AF_INET);
1044 w->pfrw_dyn->pfid_mask4 = *SUNION2PF(
1045 &pfr_mask, AF_INET);
1046 } else {
1047 if (w->pfrw_dyn->pfid_acnt6++ > 0)
1048 break;
1049 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1050 w->pfrw_dyn->pfid_addr6 = *SUNION2PF(
1051 &ke->pfrke_sa, AF_INET6);
1052 w->pfrw_dyn->pfid_mask6 = *SUNION2PF(
1053 &pfr_mask, AF_INET6);
1054 }
1055 break;
1056 }
1057 return (0);
1058 }
1059
1060 int
1061 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1062 {
1063 struct pfr_ktableworkq workq;
1064 struct pfr_ktable *p;
1065 int s = 0, xdel = 0;
1066
1067 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ALLRSETS);
1068 if (pfr_table_count(filter, flags) < 0)
1069 return (ENOENT);
1070
1071 SLIST_INIT(&workq);
1072 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1073 if (pfr_skip_table(filter, p, flags))
1074 continue;
1075 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1076 continue;
1077 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1078 continue;
1079 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1080 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1081 xdel++;
1082 }
1083 if (!(flags & PFR_FLAG_DUMMY)) {
1084 if (flags & PFR_FLAG_ATOMIC)
1085 s = splsoftnet();
1086 pfr_setflags_ktables(&workq);
1087 if (flags & PFR_FLAG_ATOMIC)
1088 splx(s);
1089 }
1090 if (ndel != NULL)
1091 *ndel = xdel;
1092 return (0);
1093 }
1094
1095 int
1096 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1097 {
1098 struct pfr_ktableworkq addq, changeq;
1099 struct pfr_ktable *p, *q, *r, key;
1100 int i, rv, s = 0, xadd = 0;
1101 long tzero = time.tv_sec;
1102
1103 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1104 SLIST_INIT(&addq);
1105 SLIST_INIT(&changeq);
1106 for (i = 0; i < size; i++) {
1107 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1108 senderr(EFAULT);
1109 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1110 flags & PFR_FLAG_USERIOCTL))
1111 senderr(EINVAL);
1112 key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1113 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1114 if (p == NULL) {
1115 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1116 if (p == NULL)
1117 senderr(ENOMEM);
1118 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1119 if (!pfr_ktable_compare(p, q))
1120 goto _skip;
1121 }
1122 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1123 xadd++;
1124 if (!key.pfrkt_anchor[0])
1125 goto _skip;
1126
1127 /* find or create root table */
1128 bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1129 bzero(key.pfrkt_ruleset, sizeof(key.pfrkt_ruleset));
1130 r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1131 if (r != NULL) {
1132 p->pfrkt_root = r;
1133 goto _skip;
1134 }
1135 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1136 if (!pfr_ktable_compare(&key, q)) {
1137 p->pfrkt_root = q;
1138 goto _skip;
1139 }
1140 }
1141 key.pfrkt_flags = 0;
1142 r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1143 if (r == NULL)
1144 senderr(ENOMEM);
1145 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1146 p->pfrkt_root = r;
1147 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1148 SLIST_FOREACH(q, &changeq, pfrkt_workq)
1149 if (!pfr_ktable_compare(&key, q))
1150 goto _skip;
1151 p->pfrkt_nflags = (p->pfrkt_flags &
1152 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1153 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1154 xadd++;
1155 }
1156 _skip:
1157 ;
1158 }
1159 if (!(flags & PFR_FLAG_DUMMY)) {
1160 if (flags & PFR_FLAG_ATOMIC)
1161 s = splsoftnet();
1162 pfr_insert_ktables(&addq);
1163 pfr_setflags_ktables(&changeq);
1164 if (flags & PFR_FLAG_ATOMIC)
1165 splx(s);
1166 } else
1167 pfr_destroy_ktables(&addq, 0);
1168 if (nadd != NULL)
1169 *nadd = xadd;
1170 return (0);
1171 _bad:
1172 pfr_destroy_ktables(&addq, 0);
1173 return (rv);
1174 }
1175
1176 int
1177 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1178 {
1179 struct pfr_ktableworkq workq;
1180 struct pfr_ktable *p, *q, key;
1181 int i, s = 0, xdel = 0;
1182
1183 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1184 SLIST_INIT(&workq);
1185 for (i = 0; i < size; i++) {
1186 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1187 return (EFAULT);
1188 if (pfr_validate_table(&key.pfrkt_t, 0,
1189 flags & PFR_FLAG_USERIOCTL))
1190 return (EINVAL);
1191 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1192 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1193 SLIST_FOREACH(q, &workq, pfrkt_workq)
1194 if (!pfr_ktable_compare(p, q))
1195 goto _skip;
1196 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1197 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1198 xdel++;
1199 }
1200 _skip:
1201 ;
1202 }
1203
1204 if (!(flags & PFR_FLAG_DUMMY)) {
1205 if (flags & PFR_FLAG_ATOMIC)
1206 s = splsoftnet();
1207 pfr_setflags_ktables(&workq);
1208 if (flags & PFR_FLAG_ATOMIC)
1209 splx(s);
1210 }
1211 if (ndel != NULL)
1212 *ndel = xdel;
1213 return (0);
1214 }
1215
1216 int
1217 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1218 int flags)
1219 {
1220 struct pfr_ktable *p;
1221 int n, nn;
1222
1223 ACCEPT_FLAGS(PFR_FLAG_ALLRSETS);
1224 n = nn = pfr_table_count(filter, flags);
1225 if (n < 0)
1226 return (ENOENT);
1227 if (n > *size) {
1228 *size = n;
1229 return (0);
1230 }
1231 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1232 if (pfr_skip_table(filter, p, flags))
1233 continue;
1234 if (n-- <= 0)
1235 continue;
1236 if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl)))
1237 return (EFAULT);
1238 }
1239 if (n) {
1240 printf("pfr_get_tables: corruption detected (%d).\n", n);
1241 return (ENOTTY);
1242 }
1243 *size = nn;
1244 return (0);
1245 }
1246
1247 int
1248 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1249 int flags)
1250 {
1251 struct pfr_ktable *p;
1252 struct pfr_ktableworkq workq;
1253 int s = 0, n, nn;
1254 long tzero = time.tv_sec;
1255
1256 ACCEPT_FLAGS(PFR_FLAG_ATOMIC|PFR_FLAG_ALLRSETS);
1257 /* XXX PFR_FLAG_CLSTATS disabled */
1258 n = nn = pfr_table_count(filter, flags);
1259 if (n < 0)
1260 return (ENOENT);
1261 if (n > *size) {
1262 *size = n;
1263 return (0);
1264 }
1265 SLIST_INIT(&workq);
1266 if (flags & PFR_FLAG_ATOMIC)
1267 s = splsoftnet();
1268 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1269 if (pfr_skip_table(filter, p, flags))
1270 continue;
1271 if (n-- <= 0)
1272 continue;
1273 if (!(flags & PFR_FLAG_ATOMIC))
1274 s = splsoftnet();
1275 if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl))) {
1276 splx(s);
1277 return (EFAULT);
1278 }
1279 if (!(flags & PFR_FLAG_ATOMIC))
1280 splx(s);
1281 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1282 }
1283 if (flags & PFR_FLAG_CLSTATS)
1284 pfr_clstats_ktables(&workq, tzero,
1285 flags & PFR_FLAG_ADDRSTOO);
1286 if (flags & PFR_FLAG_ATOMIC)
1287 splx(s);
1288 if (n) {
1289 printf("pfr_get_tstats: corruption detected (%d).\n", n);
1290 return (ENOTTY);
1291 }
1292 *size = nn;
1293 return (0);
1294 }
1295
1296 int
1297 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1298 {
1299 struct pfr_ktableworkq workq;
1300 struct pfr_ktable *p, key;
1301 int i, s = 0, xzero = 0;
1302 long tzero = time.tv_sec;
1303
1304 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ADDRSTOO);
1305 SLIST_INIT(&workq);
1306 for (i = 0; i < size; i++) {
1307 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1308 return (EFAULT);
1309 if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1310 return (EINVAL);
1311 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1312 if (p != NULL) {
1313 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1314 xzero++;
1315 }
1316 }
1317 if (!(flags & PFR_FLAG_DUMMY)) {
1318 if (flags & PFR_FLAG_ATOMIC)
1319 s = splsoftnet();
1320 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1321 if (flags & PFR_FLAG_ATOMIC)
1322 splx(s);
1323 }
1324 if (nzero != NULL)
1325 *nzero = xzero;
1326 return (0);
1327 }
1328
1329 int
1330 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1331 int *nchange, int *ndel, int flags)
1332 {
1333 struct pfr_ktableworkq workq;
1334 struct pfr_ktable *p, *q, key;
1335 int i, s = 0, xchange = 0, xdel = 0;
1336
1337 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1338 if ((setflag & ~PFR_TFLAG_USRMASK) ||
1339 (clrflag & ~PFR_TFLAG_USRMASK) ||
1340 (setflag & clrflag))
1341 return (EINVAL);
1342 SLIST_INIT(&workq);
1343 for (i = 0; i < size; i++) {
1344 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1345 return (EFAULT);
1346 if (pfr_validate_table(&key.pfrkt_t, 0,
1347 flags & PFR_FLAG_USERIOCTL))
1348 return (EINVAL);
1349 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1350 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1351 p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1352 ~clrflag;
1353 if (p->pfrkt_nflags == p->pfrkt_flags)
1354 goto _skip;
1355 SLIST_FOREACH(q, &workq, pfrkt_workq)
1356 if (!pfr_ktable_compare(p, q))
1357 goto _skip;
1358 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1359 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1360 (clrflag & PFR_TFLAG_PERSIST) &&
1361 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1362 xdel++;
1363 else
1364 xchange++;
1365 }
1366 _skip:
1367 ;
1368 }
1369 if (!(flags & PFR_FLAG_DUMMY)) {
1370 if (flags & PFR_FLAG_ATOMIC)
1371 s = splsoftnet();
1372 pfr_setflags_ktables(&workq);
1373 if (flags & PFR_FLAG_ATOMIC)
1374 splx(s);
1375 }
1376 if (nchange != NULL)
1377 *nchange = xchange;
1378 if (ndel != NULL)
1379 *ndel = xdel;
1380 return (0);
1381 }
1382
1383 int
1384 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1385 {
1386 struct pfr_ktableworkq workq;
1387 struct pfr_ktable *p;
1388 struct pf_ruleset *rs;
1389 int xdel = 0;
1390
1391 ACCEPT_FLAGS(PFR_FLAG_DUMMY);
1392 rs = pf_find_or_create_ruleset(trs->pfrt_anchor, trs->pfrt_ruleset);
1393 if (rs == NULL)
1394 return (ENOMEM);
1395 SLIST_INIT(&workq);
1396 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1397 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1398 pfr_skip_table(trs, p, 0))
1399 continue;
1400 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1401 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1402 xdel++;
1403 }
1404 if (!(flags & PFR_FLAG_DUMMY)) {
1405 pfr_setflags_ktables(&workq);
1406 if (ticket != NULL)
1407 *ticket = ++rs->tticket;
1408 rs->topen = 1;
1409 } else
1410 pf_remove_if_empty_ruleset(rs);
1411 if (ndel != NULL)
1412 *ndel = xdel;
1413 return (0);
1414 }
1415
1416 int
1417 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1418 int *nadd, int *naddr, u_int32_t ticket, int flags)
1419 {
1420 struct pfr_ktableworkq tableq;
1421 struct pfr_kentryworkq addrq;
1422 struct pfr_ktable *kt, *rt, *shadow, key;
1423 struct pfr_kentry *p;
1424 struct pfr_addr ad;
1425 struct pf_ruleset *rs;
1426 int i, rv, xadd = 0, xaddr = 0;
1427
1428 ACCEPT_FLAGS(PFR_FLAG_DUMMY|PFR_FLAG_ADDRSTOO);
1429 if (size && !(flags & PFR_FLAG_ADDRSTOO))
1430 return (EINVAL);
1431 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1432 flags & PFR_FLAG_USERIOCTL))
1433 return (EINVAL);
1434 rs = pf_find_ruleset(tbl->pfrt_anchor, tbl->pfrt_ruleset);
1435 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1436 return (EBUSY);
1437 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1438 SLIST_INIT(&tableq);
1439 kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1440 if (kt == NULL) {
1441 kt = pfr_create_ktable(tbl, 0, 1);
1442 if (kt == NULL)
1443 return (ENOMEM);
1444 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1445 xadd++;
1446 if (!tbl->pfrt_anchor[0])
1447 goto _skip;
1448
1449 /* find or create root table */
1450 bzero(&key, sizeof(key));
1451 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1452 rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1453 if (rt != NULL) {
1454 kt->pfrkt_root = rt;
1455 goto _skip;
1456 }
1457 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1458 if (rt == NULL) {
1459 pfr_destroy_ktables(&tableq, 0);
1460 return (ENOMEM);
1461 }
1462 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1463 kt->pfrkt_root = rt;
1464 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1465 xadd++;
1466 _skip:
1467 shadow = pfr_create_ktable(tbl, 0, 0);
1468 if (shadow == NULL) {
1469 pfr_destroy_ktables(&tableq, 0);
1470 return (ENOMEM);
1471 }
1472 SLIST_INIT(&addrq);
1473 for (i = 0; i < size; i++) {
1474 if (COPYIN(addr+i, &ad, sizeof(ad)))
1475 senderr(EFAULT);
1476 if (pfr_validate_addr(&ad))
1477 senderr(EINVAL);
1478 if (pfr_lookup_addr(shadow, &ad, 1) != NULL)
1479 continue;
1480 p = pfr_create_kentry(&ad);
1481 if (p == NULL)
1482 senderr(ENOMEM);
1483 if (pfr_route_kentry(shadow, p)) {
1484 pfr_destroy_kentry(p);
1485 continue;
1486 }
1487 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1488 xaddr++;
1489 }
1490 if (!(flags & PFR_FLAG_DUMMY)) {
1491 if (kt->pfrkt_shadow != NULL)
1492 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1493 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1494 pfr_insert_ktables(&tableq);
1495 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1496 xaddr : NO_ADDRESSES;
1497 kt->pfrkt_shadow = shadow;
1498 } else {
1499 pfr_clean_node_mask(shadow, &addrq);
1500 pfr_destroy_ktable(shadow, 0);
1501 pfr_destroy_ktables(&tableq, 0);
1502 pfr_destroy_kentries(&addrq);
1503 }
1504 if (nadd != NULL)
1505 *nadd = xadd;
1506 if (naddr != NULL)
1507 *naddr = xaddr;
1508 return (0);
1509 _bad:
1510 pfr_destroy_ktable(shadow, 0);
1511 pfr_destroy_ktables(&tableq, 0);
1512 pfr_destroy_kentries(&addrq);
1513 return (rv);
1514 }
1515
1516 int
1517 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1518 {
1519 struct pfr_ktableworkq workq;
1520 struct pfr_ktable *p;
1521 struct pf_ruleset *rs;
1522 int xdel = 0;
1523
1524 ACCEPT_FLAGS(PFR_FLAG_DUMMY);
1525 rs = pf_find_ruleset(trs->pfrt_anchor, trs->pfrt_ruleset);
1526 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1527 return (0);
1528 SLIST_INIT(&workq);
1529 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1530 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1531 pfr_skip_table(trs, p, 0))
1532 continue;
1533 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1534 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1535 xdel++;
1536 }
1537 if (!(flags & PFR_FLAG_DUMMY)) {
1538 pfr_setflags_ktables(&workq);
1539 rs->topen = 0;
1540 pf_remove_if_empty_ruleset(rs);
1541 }
1542 if (ndel != NULL)
1543 *ndel = xdel;
1544 return (0);
1545 }
1546
1547 int
1548 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1549 int *nchange, int flags)
1550 {
1551 struct pfr_ktable *p;
1552 struct pfr_ktableworkq workq;
1553 struct pf_ruleset *rs;
1554 int s = 0, xadd = 0, xchange = 0;
1555 long tzero = time.tv_sec;
1556
1557 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1558 rs = pf_find_ruleset(trs->pfrt_anchor, trs->pfrt_ruleset);
1559 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1560 return (EBUSY);
1561
1562 SLIST_INIT(&workq);
1563 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1564 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1565 pfr_skip_table(trs, p, 0))
1566 continue;
1567 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1568 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1569 xchange++;
1570 else
1571 xadd++;
1572 }
1573
1574 if (!(flags & PFR_FLAG_DUMMY)) {
1575 if (flags & PFR_FLAG_ATOMIC)
1576 s = splsoftnet();
1577 SLIST_FOREACH(p, &workq, pfrkt_workq)
1578 pfr_commit_ktable(p, tzero);
1579 if (flags & PFR_FLAG_ATOMIC)
1580 splx(s);
1581 rs->topen = 0;
1582 pf_remove_if_empty_ruleset(rs);
1583 }
1584 if (nadd != NULL)
1585 *nadd = xadd;
1586 if (nchange != NULL)
1587 *nchange = xchange;
1588
1589 return (0);
1590 }
1591
1592 void
1593 pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1594 {
1595 struct pfr_ktable *shadow = kt->pfrkt_shadow;
1596 int nflags;
1597
1598 if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1599 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1600 pfr_clstats_ktable(kt, tzero, 1);
1601 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1602 /* kt might contain addresses */
1603 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq;
1604 struct pfr_kentry *p, *q, *next;
1605 struct pfr_addr ad;
1606
1607 pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1608 pfr_mark_addrs(kt);
1609 SLIST_INIT(&addq);
1610 SLIST_INIT(&changeq);
1611 SLIST_INIT(&delq);
1612 SLIST_INIT(&garbageq);
1613 pfr_clean_node_mask(shadow, &addrq);
1614 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1615 next = SLIST_NEXT(p, pfrke_workq); /* XXX */
1616 pfr_copyout_addr(&ad, p);
1617 q = pfr_lookup_addr(kt, &ad, 1);
1618 if (q != NULL) {
1619 if (q->pfrke_not != p->pfrke_not)
1620 SLIST_INSERT_HEAD(&changeq, q,
1621 pfrke_workq);
1622 q->pfrke_mark = 1;
1623 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1624 } else {
1625 p->pfrke_tzero = tzero;
1626 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1627 }
1628 }
1629 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1630 pfr_insert_kentries(kt, &addq, tzero);
1631 pfr_remove_kentries(kt, &delq);
1632 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1633 pfr_destroy_kentries(&garbageq);
1634 } else {
1635 /* kt cannot contain addresses */
1636 SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1637 shadow->pfrkt_ip4);
1638 SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1639 shadow->pfrkt_ip6);
1640 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1641 pfr_clstats_ktable(kt, tzero, 1);
1642 }
1643 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1644 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1645 & ~PFR_TFLAG_INACTIVE;
1646 pfr_destroy_ktable(shadow, 0);
1647 kt->pfrkt_shadow = NULL;
1648 pfr_setflags_ktable(kt, nflags);
1649 }
1650
1651 int
1652 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1653 {
1654 int i;
1655
1656 if (!tbl->pfrt_name[0])
1657 return (-1);
1658 if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1659 return (-1);
1660 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1661 return (-1);
1662 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1663 if (tbl->pfrt_name[i])
1664 return (-1);
1665 if (tbl->pfrt_flags & ~allowedflags)
1666 return (-1);
1667 return (0);
1668 }
1669
1670 int
1671 pfr_table_count(struct pfr_table *filter, int flags)
1672 {
1673 struct pf_ruleset *rs;
1674 struct pf_anchor *ac;
1675
1676 if (flags & PFR_FLAG_ALLRSETS)
1677 return (pfr_ktable_cnt);
1678 if (filter->pfrt_ruleset[0]) {
1679 rs = pf_find_ruleset(filter->pfrt_anchor,
1680 filter->pfrt_ruleset);
1681 return ((rs != NULL) ? rs->tables : -1);
1682 }
1683 if (filter->pfrt_anchor[0]) {
1684 ac = pf_find_anchor(filter->pfrt_anchor);
1685 return ((ac != NULL) ? ac->tables : -1);
1686 }
1687 return (pf_main_ruleset.tables);
1688 }
1689
1690 int
1691 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1692 {
1693 if (flags & PFR_FLAG_ALLRSETS)
1694 return (0);
1695 if (strncmp(filter->pfrt_anchor, kt->pfrkt_anchor,
1696 PF_ANCHOR_NAME_SIZE))
1697 return (1);
1698 if (!filter->pfrt_ruleset[0])
1699 return (0);
1700 if (strncmp(filter->pfrt_ruleset, kt->pfrkt_ruleset,
1701 PF_RULESET_NAME_SIZE))
1702 return (1);
1703 return (0);
1704 }
1705
1706 void
1707 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1708 {
1709 struct pfr_ktable *p;
1710
1711 SLIST_FOREACH(p, workq, pfrkt_workq)
1712 pfr_insert_ktable(p);
1713 }
1714
1715 void
1716 pfr_insert_ktable(struct pfr_ktable *kt)
1717 {
1718 RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1719 pfr_ktable_cnt++;
1720 if (kt->pfrkt_root != NULL)
1721 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1722 pfr_setflags_ktable(kt->pfrkt_root,
1723 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1724 }
1725
1726 void
1727 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1728 {
1729 struct pfr_ktable *p;
1730
1731 SLIST_FOREACH(p, workq, pfrkt_workq)
1732 pfr_setflags_ktable(p, p->pfrkt_nflags);
1733 }
1734
1735 void
1736 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1737 {
1738 struct pfr_kentryworkq addrq;
1739
1740 if (!(newf & PFR_TFLAG_REFERENCED) &&
1741 !(newf & PFR_TFLAG_PERSIST))
1742 newf &= ~PFR_TFLAG_ACTIVE;
1743 if (!(newf & PFR_TFLAG_ACTIVE))
1744 newf &= ~PFR_TFLAG_USRMASK;
1745 if (!(newf & PFR_TFLAG_SETMASK)) {
1746 RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
1747 if (kt->pfrkt_root != NULL)
1748 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1749 pfr_setflags_ktable(kt->pfrkt_root,
1750 kt->pfrkt_root->pfrkt_flags &
1751 ~PFR_TFLAG_REFDANCHOR);
1752 pfr_destroy_ktable(kt, 1);
1753 pfr_ktable_cnt--;
1754 return;
1755 }
1756 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1757 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1758 pfr_remove_kentries(kt, &addrq);
1759 }
1760 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1761 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1762 kt->pfrkt_shadow = NULL;
1763 }
1764 kt->pfrkt_flags = newf;
1765 }
1766
1767 void
1768 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1769 {
1770 struct pfr_ktable *p;
1771
1772 SLIST_FOREACH(p, workq, pfrkt_workq)
1773 pfr_clstats_ktable(p, tzero, recurse);
1774 }
1775
1776 void
1777 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1778 {
1779 struct pfr_kentryworkq addrq;
1780 int s;
1781
1782 if (recurse) {
1783 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1784 pfr_clstats_kentries(&addrq, tzero, 0);
1785 }
1786 s = splsoftnet();
1787 bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
1788 bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
1789 kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1790 splx(s);
1791 kt->pfrkt_tzero = tzero;
1792 }
1793
1794 struct pfr_ktable *
1795 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1796 {
1797 struct pfr_ktable *kt;
1798 struct pf_ruleset *rs;
1799
1800 kt = pool_get(&pfr_ktable_pl, PR_NOWAIT);
1801 if (kt == NULL)
1802 return (NULL);
1803 bzero(kt, sizeof(*kt));
1804 kt->pfrkt_t = *tbl;
1805
1806 if (attachruleset) {
1807 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor,
1808 tbl->pfrt_ruleset);
1809 if (!rs) {
1810 pfr_destroy_ktable(kt, 0);
1811 return (NULL);
1812 }
1813 kt->pfrkt_rs = rs;
1814 rs->tables++;
1815 if (rs->anchor != NULL)
1816 rs->anchor->tables++;
1817 }
1818
1819 if (!rn_inithead((void **)&kt->pfrkt_ip4,
1820 offsetof(struct sockaddr_in, sin_addr) * 8) ||
1821 !rn_inithead((void **)&kt->pfrkt_ip6,
1822 offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1823 pfr_destroy_ktable(kt, 0);
1824 return (NULL);
1825 }
1826 kt->pfrkt_tzero = tzero;
1827
1828 return (kt);
1829 }
1830
1831 void
1832 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1833 {
1834 struct pfr_ktable *p, *q;
1835
1836 for (p = SLIST_FIRST(workq); p; p = q) {
1837 q = SLIST_NEXT(p, pfrkt_workq);
1838 pfr_destroy_ktable(p, flushaddr);
1839 }
1840 }
1841
1842 void
1843 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1844 {
1845 struct pfr_kentryworkq addrq;
1846
1847 if (flushaddr) {
1848 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1849 pfr_clean_node_mask(kt, &addrq);
1850 pfr_destroy_kentries(&addrq);
1851 }
1852 if (kt->pfrkt_ip4 != NULL)
1853 free((caddr_t)kt->pfrkt_ip4, M_RTABLE);
1854 if (kt->pfrkt_ip6 != NULL)
1855 free((caddr_t)kt->pfrkt_ip6, M_RTABLE);
1856 if (kt->pfrkt_shadow != NULL)
1857 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1858 if (kt->pfrkt_rs != NULL) {
1859 kt->pfrkt_rs->tables--;
1860 if (kt->pfrkt_rs->anchor != NULL)
1861 kt->pfrkt_rs->anchor->tables--;
1862 pf_remove_if_empty_ruleset(kt->pfrkt_rs);
1863 }
1864 pool_put(&pfr_ktable_pl, kt);
1865 }
1866
1867 int
1868 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
1869 {
1870 int d;
1871
1872 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
1873 return (d);
1874 if ((d = strncmp(p->pfrkt_anchor, q->pfrkt_anchor,
1875 PF_ANCHOR_NAME_SIZE)))
1876 return (d);
1877 return (strncmp(p->pfrkt_ruleset, q->pfrkt_ruleset,
1878 PF_RULESET_NAME_SIZE));
1879 }
1880
1881 struct pfr_ktable *
1882 pfr_lookup_table(struct pfr_table *tbl)
1883 {
1884 /* struct pfr_ktable start like a struct pfr_table */
1885 return (RB_FIND(pfr_ktablehead, &pfr_ktables,
1886 (struct pfr_ktable *)tbl));
1887 }
1888
1889 int
1890 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
1891 {
1892 struct pfr_kentry *ke = NULL;
1893 int match;
1894
1895 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1896 kt = kt->pfrkt_root;
1897 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1898 return (0);
1899
1900 switch (af) {
1901 case AF_INET:
1902 pfr_sin.sin_addr.s_addr = a->addr32[0];
1903 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
1904 if (ke && KENTRY_RNF_ROOT(ke))
1905 ke = NULL;
1906 break;
1907 case AF_INET6:
1908 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
1909 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
1910 if (ke && KENTRY_RNF_ROOT(ke))
1911 ke = NULL;
1912 break;
1913 }
1914 match = (ke && !ke->pfrke_not);
1915 if (match)
1916 kt->pfrkt_match++;
1917 else
1918 kt->pfrkt_nomatch++;
1919 return (match);
1920 }
1921
1922 void
1923 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
1924 u_int64_t len, int dir_out, int op_pass, int notrule)
1925 {
1926 struct pfr_kentry *ke = NULL;
1927
1928 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1929 kt = kt->pfrkt_root;
1930 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1931 return;
1932
1933 switch (af) {
1934 case AF_INET:
1935 pfr_sin.sin_addr.s_addr = a->addr32[0];
1936 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
1937 if (ke && KENTRY_RNF_ROOT(ke))
1938 ke = NULL;
1939 break;
1940 case AF_INET6:
1941 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
1942 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
1943 if (ke && KENTRY_RNF_ROOT(ke))
1944 ke = NULL;
1945 break;
1946 }
1947 if ((ke == NULL || ke->pfrke_not) != notrule) {
1948 if (op_pass != PFR_OP_PASS)
1949 printf("pfr_update_stats: assertion failed.\n");
1950 op_pass = PFR_OP_XPASS;
1951 }
1952 kt->pfrkt_packets[dir_out][op_pass]++;
1953 kt->pfrkt_bytes[dir_out][op_pass] += len;
1954 if (ke != NULL && op_pass != PFR_OP_XPASS) {
1955 ke->pfrke_packets[dir_out][op_pass]++;
1956 ke->pfrke_bytes[dir_out][op_pass] += len;
1957 }
1958 }
1959
1960 struct pfr_ktable *
1961 pfr_attach_table(struct pf_ruleset *rs, char *name)
1962 {
1963 struct pfr_ktable *kt, *rt;
1964 struct pfr_table tbl;
1965 struct pf_anchor *ac = rs->anchor;
1966
1967 bzero(&tbl, sizeof(tbl));
1968 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
1969 if (ac != NULL) {
1970 strlcpy(tbl.pfrt_anchor, ac->name, sizeof(tbl.pfrt_anchor));
1971 strlcpy(tbl.pfrt_ruleset, rs->name, sizeof(tbl.pfrt_ruleset));
1972 }
1973 kt = pfr_lookup_table(&tbl);
1974 if (kt == NULL) {
1975 kt = pfr_create_ktable(&tbl, time.tv_sec, 1);
1976 if (kt == NULL)
1977 return (NULL);
1978 if (ac != NULL) {
1979 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
1980 bzero(tbl.pfrt_ruleset, sizeof(tbl.pfrt_ruleset));
1981 rt = pfr_lookup_table(&tbl);
1982 if (rt == NULL) {
1983 rt = pfr_create_ktable(&tbl, 0, 1);
1984 if (rt == NULL) {
1985 pfr_destroy_ktable(kt, 0);
1986 return (NULL);
1987 }
1988 pfr_insert_ktable(rt);
1989 }
1990 kt->pfrkt_root = rt;
1991 }
1992 pfr_insert_ktable(kt);
1993 }
1994 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
1995 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
1996 return (kt);
1997 }
1998
1999 void
2000 pfr_detach_table(struct pfr_ktable *kt)
2001 {
2002 if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
2003 printf("pfr_detach_table: refcount = %d.\n",
2004 kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
2005 else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2006 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2007 }
2008
2009 int
2010 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2011 struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af)
2012 {
2013 struct pfr_kentry *ke, *ke2;
2014 struct pf_addr *addr;
2015 union sockaddr_union mask;
2016 int idx = -1, use_counter = 0;
2017
2018 addr = (af == AF_INET) ? (struct pf_addr *)&pfr_sin.sin_addr :
2019 (struct pf_addr *)&pfr_sin6.sin6_addr;
2020 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2021 kt = kt->pfrkt_root;
2022 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2023 return (-1);
2024
2025 if (pidx != NULL)
2026 idx = *pidx;
2027 if (counter != NULL && idx >= 0)
2028 use_counter = 1;
2029 if (idx < 0)
2030 idx = 0;
2031
2032 _next_block:
2033 ke = pfr_kentry_byidx(kt, idx, af);
2034 if (ke == NULL)
2035 return (1);
2036 pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
2037 *raddr = SUNION2PF(&ke->pfrke_sa, af);
2038 *rmask = SUNION2PF(&pfr_mask, af);
2039
2040 if (use_counter) {
2041 /* is supplied address within block? */
2042 if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
2043 /* no, go to next block in table */
2044 idx++;
2045 use_counter = 0;
2046 goto _next_block;
2047 }
2048 PF_ACPY(addr, counter, af);
2049 } else {
2050 /* use first address of block */
2051 PF_ACPY(addr, *raddr, af);
2052 }
2053
2054 if (!KENTRY_NETWORK(ke)) {
2055 /* this is a single IP address - no possible nested block */
2056 PF_ACPY(counter, addr, af);
2057 *pidx = idx;
2058 return (0);
2059 }
2060 for (;;) {
2061 /* we don't want to use a nested block */
2062 ke2 = (struct pfr_kentry *)(af == AF_INET ?
2063 rn_match(&pfr_sin, kt->pfrkt_ip4) :
2064 rn_match(&pfr_sin6, kt->pfrkt_ip6));
2065 /* no need to check KENTRY_RNF_ROOT() here */
2066 if (ke2 == ke) {
2067 /* lookup return the same block - perfect */
2068 PF_ACPY(counter, addr, af);
2069 *pidx = idx;
2070 return (0);
2071 }
2072
2073 /* we need to increase the counter past the nested block */
2074 pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
2075 PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
2076 PF_AINC(addr, af);
2077 if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
2078 /* ok, we reached the end of our main block */
2079 /* go to next block in table */
2080 idx++;
2081 use_counter = 0;
2082 goto _next_block;
2083 }
2084 }
2085 }
2086
2087 struct pfr_kentry *
2088 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2089 {
2090 struct pfr_walktree w;
2091
2092 bzero(&w, sizeof(w));
2093 w.pfrw_op = PFRW_POOL_GET;
2094 w.pfrw_cnt = idx;
2095
2096 switch (af) {
2097 case AF_INET:
2098 rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2099 return (w.pfrw_kentry);
2100 case AF_INET6:
2101 rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2102 return (w.pfrw_kentry);
2103 default:
2104 return (NULL);
2105 }
2106 }
2107
2108 void
2109 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2110 {
2111 struct pfr_walktree w;
2112 int s;
2113
2114 bzero(&w, sizeof(w));
2115 w.pfrw_op = PFRW_DYNADDR_UPDATE;
2116 w.pfrw_dyn = dyn;
2117
2118 s = splsoftnet();
2119 dyn->pfid_acnt4 = 0;
2120 dyn->pfid_acnt6 = 0;
2121 if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2122 rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2123 if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2124 rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2125 splx(s);
2126 }
2127