pf_table.c revision 1.4 1 /* $NetBSD: pf_table.c,v 1.4 2004/09/09 14:56:00 yamt Exp $ */
2 /* $OpenBSD: pf_table.c,v 1.47 2004/03/09 21:44:41 mcbride Exp $ */
3
4 /*
5 * Copyright (c) 2002 Cedric Berger
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * - Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * - Redistributions in binary form must reproduce the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer in the documentation and/or other materials provided
17 * with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
34 #ifdef _KERNEL_OPT
35 #include "opt_inet.h"
36 #endif
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/socket.h>
41 #include <sys/mbuf.h>
42 #include <sys/kernel.h>
43
44 #include <net/if.h>
45 #include <net/route.h>
46 #include <netinet/in.h>
47 #ifdef __OpenBSD__
48 #include <netinet/ip_ipsp.h>
49 #endif
50 #include <net/pfvar.h>
51
52 #define ACCEPT_FLAGS(oklist) \
53 do { \
54 if ((flags & ~(oklist)) & \
55 PFR_FLAG_ALLMASK) \
56 return (EINVAL); \
57 } while (0)
58
59 #define COPYIN(from, to, size) \
60 ((flags & PFR_FLAG_USERIOCTL) ? \
61 copyin((from), (to), (size)) : \
62 (bcopy((from), (to), (size)), 0))
63
64 #define COPYOUT(from, to, size) \
65 ((flags & PFR_FLAG_USERIOCTL) ? \
66 copyout((from), (to), (size)) : \
67 (bcopy((from), (to), (size)), 0))
68
69 #define FILLIN_SIN(sin, addr) \
70 do { \
71 (sin).sin_len = sizeof(sin); \
72 (sin).sin_family = AF_INET; \
73 (sin).sin_addr = (addr); \
74 } while (0)
75
76 #define FILLIN_SIN6(sin6, addr) \
77 do { \
78 (sin6).sin6_len = sizeof(sin6); \
79 (sin6).sin6_family = AF_INET6; \
80 (sin6).sin6_addr = (addr); \
81 } while (0)
82
83 #define SWAP(type, a1, a2) \
84 do { \
85 type tmp = a1; \
86 a1 = a2; \
87 a2 = tmp; \
88 } while (0)
89
90 #define SUNION2PF(su, af) (((af)==AF_INET) ? \
91 (struct pf_addr *)&(su)->sin.sin_addr : \
92 (struct pf_addr *)&(su)->sin6.sin6_addr)
93
94 #define AF_BITS(af) (((af)==AF_INET)?32:128)
95 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
96 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
97 #define KENTRY_RNF_ROOT(ke) \
98 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
99
100 #define NO_ADDRESSES (-1)
101 #define ENQUEUE_UNMARKED_ONLY (1)
102 #define INVERT_NEG_FLAG (1)
103
104 struct pfr_walktree {
105 enum pfrw_op {
106 PFRW_MARK,
107 PFRW_SWEEP,
108 PFRW_ENQUEUE,
109 PFRW_GET_ADDRS,
110 PFRW_GET_ASTATS,
111 PFRW_POOL_GET,
112 PFRW_DYNADDR_UPDATE
113 } pfrw_op;
114 union {
115 struct pfr_addr *pfrw1_addr;
116 struct pfr_astats *pfrw1_astats;
117 struct pfr_kentryworkq *pfrw1_workq;
118 struct pfr_kentry *pfrw1_kentry;
119 struct pfi_dynaddr *pfrw1_dyn;
120 } pfrw_1;
121 int pfrw_free;
122 int pfrw_flags;
123 };
124 #define pfrw_addr pfrw_1.pfrw1_addr
125 #define pfrw_astats pfrw_1.pfrw1_astats
126 #define pfrw_workq pfrw_1.pfrw1_workq
127 #define pfrw_kentry pfrw_1.pfrw1_kentry
128 #define pfrw_dyn pfrw_1.pfrw1_dyn
129 #define pfrw_cnt pfrw_free
130
131 #define senderr(e) do { rv = (e); goto _bad; } while (0)
132
133 struct pool pfr_ktable_pl;
134 struct pool pfr_kentry_pl;
135 struct sockaddr_in pfr_sin;
136 struct sockaddr_in6 pfr_sin6;
137 union sockaddr_union pfr_mask;
138 struct pf_addr pfr_ffaddr;
139
140 void pfr_copyout_addr(struct pfr_addr *,
141 struct pfr_kentry *ke);
142 int pfr_validate_addr(struct pfr_addr *);
143 void pfr_enqueue_addrs(struct pfr_ktable *,
144 struct pfr_kentryworkq *, int *, int);
145 void pfr_mark_addrs(struct pfr_ktable *);
146 struct pfr_kentry *pfr_lookup_addr(struct pfr_ktable *,
147 struct pfr_addr *, int);
148 struct pfr_kentry *pfr_create_kentry(struct pfr_addr *);
149 void pfr_destroy_kentries(struct pfr_kentryworkq *);
150 void pfr_destroy_kentry(struct pfr_kentry *);
151 void pfr_insert_kentries(struct pfr_ktable *,
152 struct pfr_kentryworkq *, long);
153 void pfr_remove_kentries(struct pfr_ktable *,
154 struct pfr_kentryworkq *);
155 void pfr_clstats_kentries(struct pfr_kentryworkq *, long,
156 int);
157 void pfr_reset_feedback(struct pfr_addr *, int, int);
158 void pfr_prepare_network(union sockaddr_union *, int, int);
159 int pfr_route_kentry(struct pfr_ktable *,
160 struct pfr_kentry *);
161 int pfr_unroute_kentry(struct pfr_ktable *,
162 struct pfr_kentry *);
163 int pfr_walktree(struct radix_node *, void *);
164 int pfr_validate_table(struct pfr_table *, int, int);
165 void pfr_commit_ktable(struct pfr_ktable *, long);
166 void pfr_insert_ktables(struct pfr_ktableworkq *);
167 void pfr_insert_ktable(struct pfr_ktable *);
168 void pfr_setflags_ktables(struct pfr_ktableworkq *);
169 void pfr_setflags_ktable(struct pfr_ktable *, int);
170 void pfr_clstats_ktables(struct pfr_ktableworkq *, long,
171 int);
172 void pfr_clstats_ktable(struct pfr_ktable *, long, int);
173 struct pfr_ktable *pfr_create_ktable(struct pfr_table *, long, int);
174 void pfr_destroy_ktables(struct pfr_ktableworkq *, int);
175 void pfr_destroy_ktable(struct pfr_ktable *, int);
176 int pfr_ktable_compare(struct pfr_ktable *,
177 struct pfr_ktable *);
178 struct pfr_ktable *pfr_lookup_table(struct pfr_table *);
179 void pfr_clean_node_mask(struct pfr_ktable *,
180 struct pfr_kentryworkq *);
181 int pfr_table_count(struct pfr_table *, int);
182 int pfr_skip_table(struct pfr_table *,
183 struct pfr_ktable *, int);
184 struct pfr_kentry *pfr_kentry_byidx(struct pfr_ktable *, int, int);
185
186 RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
187 RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
188
189 struct pfr_ktablehead pfr_ktables;
190 struct pfr_table pfr_nulltable;
191 int pfr_ktable_cnt;
192
193 void
194 pfr_initialize(void)
195 {
196 pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0,
197 "pfrktable", NULL);
198 pool_init(&pfr_kentry_pl, sizeof(struct pfr_kentry), 0, 0, 0,
199 "pfrkentry", NULL);
200
201 pfr_sin.sin_len = sizeof(pfr_sin);
202 pfr_sin.sin_family = AF_INET;
203 pfr_sin6.sin6_len = sizeof(pfr_sin6);
204 pfr_sin6.sin6_family = AF_INET6;
205
206 memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr));
207 }
208
209 #ifdef _LKM
210 void
211 pfr_destroy(void)
212 {
213 pool_destroy(&pfr_ktable_pl);
214 pool_destroy(&pfr_kentry_pl);
215 }
216 #endif
217
218 int
219 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
220 {
221 struct pfr_ktable *kt;
222 struct pfr_kentryworkq workq;
223 int s = 0;
224
225 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
226 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
227 return (EINVAL);
228 kt = pfr_lookup_table(tbl);
229 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
230 return (ESRCH);
231 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
232 return (EPERM);
233 pfr_enqueue_addrs(kt, &workq, ndel, 0);
234
235 if (!(flags & PFR_FLAG_DUMMY)) {
236 if (flags & PFR_FLAG_ATOMIC)
237 s = splsoftnet();
238 pfr_remove_kentries(kt, &workq);
239 if (flags & PFR_FLAG_ATOMIC)
240 splx(s);
241 if (kt->pfrkt_cnt) {
242 printf("pfr_clr_addrs: corruption detected (%d).\n",
243 kt->pfrkt_cnt);
244 kt->pfrkt_cnt = 0;
245 }
246 }
247 return (0);
248 }
249
250 int
251 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
252 int *nadd, int flags)
253 {
254 struct pfr_ktable *kt, *tmpkt;
255 struct pfr_kentryworkq workq;
256 struct pfr_kentry *p, *q;
257 struct pfr_addr ad;
258 int i, rv, s = 0, xadd = 0;
259 long tzero = time.tv_sec;
260
261 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
262 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
263 return (EINVAL);
264 kt = pfr_lookup_table(tbl);
265 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
266 return (ESRCH);
267 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
268 return (EPERM);
269 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
270 if (tmpkt == NULL)
271 return (ENOMEM);
272 SLIST_INIT(&workq);
273 for (i = 0; i < size; i++) {
274 if (COPYIN(addr+i, &ad, sizeof(ad)))
275 senderr(EFAULT);
276 if (pfr_validate_addr(&ad))
277 senderr(EINVAL);
278 p = pfr_lookup_addr(kt, &ad, 1);
279 q = pfr_lookup_addr(tmpkt, &ad, 1);
280 if (flags & PFR_FLAG_FEEDBACK) {
281 if (q != NULL)
282 ad.pfra_fback = PFR_FB_DUPLICATE;
283 else if (p == NULL)
284 ad.pfra_fback = PFR_FB_ADDED;
285 else if (p->pfrke_not != ad.pfra_not)
286 ad.pfra_fback = PFR_FB_CONFLICT;
287 else
288 ad.pfra_fback = PFR_FB_NONE;
289 }
290 if (p == NULL && q == NULL) {
291 p = pfr_create_kentry(&ad);
292 if (p == NULL)
293 senderr(ENOMEM);
294 if (pfr_route_kentry(tmpkt, p)) {
295 pfr_destroy_kentry(p);
296 ad.pfra_fback = PFR_FB_NONE;
297 } else {
298 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
299 xadd++;
300 }
301 }
302 if (flags & PFR_FLAG_FEEDBACK)
303 if (COPYOUT(&ad, addr+i, sizeof(ad)))
304 senderr(EFAULT);
305 }
306 pfr_clean_node_mask(tmpkt, &workq);
307 if (!(flags & PFR_FLAG_DUMMY)) {
308 if (flags & PFR_FLAG_ATOMIC)
309 s = splsoftnet();
310 pfr_insert_kentries(kt, &workq, tzero);
311 if (flags & PFR_FLAG_ATOMIC)
312 splx(s);
313 } else
314 pfr_destroy_kentries(&workq);
315 if (nadd != NULL)
316 *nadd = xadd;
317 pfr_destroy_ktable(tmpkt, 0);
318 return (0);
319 _bad:
320 pfr_clean_node_mask(tmpkt, &workq);
321 pfr_destroy_kentries(&workq);
322 if (flags & PFR_FLAG_FEEDBACK)
323 pfr_reset_feedback(addr, size, flags);
324 pfr_destroy_ktable(tmpkt, 0);
325 return (rv);
326 }
327
328 int
329 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
330 int *ndel, int flags)
331 {
332 struct pfr_ktable *kt;
333 struct pfr_kentryworkq workq;
334 struct pfr_kentry *p;
335 struct pfr_addr ad;
336 int i, rv, s = 0, xdel = 0;
337
338 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
339 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
340 return (EINVAL);
341 kt = pfr_lookup_table(tbl);
342 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
343 return (ESRCH);
344 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
345 return (EPERM);
346 pfr_mark_addrs(kt);
347 SLIST_INIT(&workq);
348 for (i = 0; i < size; i++) {
349 if (COPYIN(addr+i, &ad, sizeof(ad)))
350 senderr(EFAULT);
351 if (pfr_validate_addr(&ad))
352 senderr(EINVAL);
353 p = pfr_lookup_addr(kt, &ad, 1);
354 if (flags & PFR_FLAG_FEEDBACK) {
355 if (p == NULL)
356 ad.pfra_fback = PFR_FB_NONE;
357 else if (p->pfrke_not != ad.pfra_not)
358 ad.pfra_fback = PFR_FB_CONFLICT;
359 else if (p->pfrke_mark)
360 ad.pfra_fback = PFR_FB_DUPLICATE;
361 else
362 ad.pfra_fback = PFR_FB_DELETED;
363 }
364 if (p != NULL && p->pfrke_not == ad.pfra_not &&
365 !p->pfrke_mark) {
366 p->pfrke_mark = 1;
367 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
368 xdel++;
369 }
370 if (flags & PFR_FLAG_FEEDBACK)
371 if (COPYOUT(&ad, addr+i, sizeof(ad)))
372 senderr(EFAULT);
373 }
374 if (!(flags & PFR_FLAG_DUMMY)) {
375 if (flags & PFR_FLAG_ATOMIC)
376 s = splsoftnet();
377 pfr_remove_kentries(kt, &workq);
378 if (flags & PFR_FLAG_ATOMIC)
379 splx(s);
380 }
381 if (ndel != NULL)
382 *ndel = xdel;
383 return (0);
384 _bad:
385 if (flags & PFR_FLAG_FEEDBACK)
386 pfr_reset_feedback(addr, size, flags);
387 return (rv);
388 }
389
390 int
391 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
392 int *size2, int *nadd, int *ndel, int *nchange, int flags)
393 {
394 struct pfr_ktable *kt, *tmpkt;
395 struct pfr_kentryworkq addq, delq, changeq;
396 struct pfr_kentry *p, *q;
397 struct pfr_addr ad;
398 int i, rv, s = 0, xadd = 0, xdel = 0, xchange = 0;
399 long tzero = time.tv_sec;
400
401 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
402 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
403 return (EINVAL);
404 kt = pfr_lookup_table(tbl);
405 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
406 return (ESRCH);
407 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
408 return (EPERM);
409 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
410 if (tmpkt == NULL)
411 return (ENOMEM);
412 pfr_mark_addrs(kt);
413 SLIST_INIT(&addq);
414 SLIST_INIT(&delq);
415 SLIST_INIT(&changeq);
416 for (i = 0; i < size; i++) {
417 if (COPYIN(addr+i, &ad, sizeof(ad)))
418 senderr(EFAULT);
419 if (pfr_validate_addr(&ad))
420 senderr(EINVAL);
421 ad.pfra_fback = PFR_FB_NONE;
422 p = pfr_lookup_addr(kt, &ad, 1);
423 if (p != NULL) {
424 if (p->pfrke_mark) {
425 ad.pfra_fback = PFR_FB_DUPLICATE;
426 goto _skip;
427 }
428 p->pfrke_mark = 1;
429 if (p->pfrke_not != ad.pfra_not) {
430 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
431 ad.pfra_fback = PFR_FB_CHANGED;
432 xchange++;
433 }
434 } else {
435 q = pfr_lookup_addr(tmpkt, &ad, 1);
436 if (q != NULL) {
437 ad.pfra_fback = PFR_FB_DUPLICATE;
438 goto _skip;
439 }
440 p = pfr_create_kentry(&ad);
441 if (p == NULL)
442 senderr(ENOMEM);
443 if (pfr_route_kentry(tmpkt, p)) {
444 pfr_destroy_kentry(p);
445 ad.pfra_fback = PFR_FB_NONE;
446 } else {
447 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
448 ad.pfra_fback = PFR_FB_ADDED;
449 xadd++;
450 }
451 }
452 _skip:
453 if (flags & PFR_FLAG_FEEDBACK)
454 if (COPYOUT(&ad, addr+i, sizeof(ad)))
455 senderr(EFAULT);
456 }
457 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
458 if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
459 if (*size2 < size+xdel) {
460 *size2 = size+xdel;
461 senderr(0);
462 }
463 i = 0;
464 SLIST_FOREACH(p, &delq, pfrke_workq) {
465 pfr_copyout_addr(&ad, p);
466 ad.pfra_fback = PFR_FB_DELETED;
467 if (COPYOUT(&ad, addr+size+i, sizeof(ad)))
468 senderr(EFAULT);
469 i++;
470 }
471 }
472 pfr_clean_node_mask(tmpkt, &addq);
473 if (!(flags & PFR_FLAG_DUMMY)) {
474 if (flags & PFR_FLAG_ATOMIC)
475 s = splsoftnet();
476 pfr_insert_kentries(kt, &addq, tzero);
477 pfr_remove_kentries(kt, &delq);
478 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
479 if (flags & PFR_FLAG_ATOMIC)
480 splx(s);
481 } else
482 pfr_destroy_kentries(&addq);
483 if (nadd != NULL)
484 *nadd = xadd;
485 if (ndel != NULL)
486 *ndel = xdel;
487 if (nchange != NULL)
488 *nchange = xchange;
489 if ((flags & PFR_FLAG_FEEDBACK) && size2)
490 *size2 = size+xdel;
491 pfr_destroy_ktable(tmpkt, 0);
492 return (0);
493 _bad:
494 pfr_clean_node_mask(tmpkt, &addq);
495 pfr_destroy_kentries(&addq);
496 if (flags & PFR_FLAG_FEEDBACK)
497 pfr_reset_feedback(addr, size, flags);
498 pfr_destroy_ktable(tmpkt, 0);
499 return (rv);
500 }
501
502 int
503 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
504 int *nmatch, int flags)
505 {
506 struct pfr_ktable *kt;
507 struct pfr_kentry *p;
508 struct pfr_addr ad;
509 int i, xmatch = 0;
510
511 ACCEPT_FLAGS(PFR_FLAG_REPLACE);
512 if (pfr_validate_table(tbl, 0, 0))
513 return (EINVAL);
514 kt = pfr_lookup_table(tbl);
515 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
516 return (ESRCH);
517
518 for (i = 0; i < size; i++) {
519 if (COPYIN(addr+i, &ad, sizeof(ad)))
520 return (EFAULT);
521 if (pfr_validate_addr(&ad))
522 return (EINVAL);
523 if (ADDR_NETWORK(&ad))
524 return (EINVAL);
525 p = pfr_lookup_addr(kt, &ad, 0);
526 if (flags & PFR_FLAG_REPLACE)
527 pfr_copyout_addr(&ad, p);
528 ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
529 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
530 if (p != NULL && !p->pfrke_not)
531 xmatch++;
532 if (COPYOUT(&ad, addr+i, sizeof(ad)))
533 return (EFAULT);
534 }
535 if (nmatch != NULL)
536 *nmatch = xmatch;
537 return (0);
538 }
539
540 int
541 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
542 int flags)
543 {
544 struct pfr_ktable *kt;
545 struct pfr_walktree w;
546 int rv;
547
548 ACCEPT_FLAGS(0);
549 if (pfr_validate_table(tbl, 0, 0))
550 return (EINVAL);
551 kt = pfr_lookup_table(tbl);
552 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
553 return (ESRCH);
554 if (kt->pfrkt_cnt > *size) {
555 *size = kt->pfrkt_cnt;
556 return (0);
557 }
558
559 bzero(&w, sizeof(w));
560 w.pfrw_op = PFRW_GET_ADDRS;
561 w.pfrw_addr = addr;
562 w.pfrw_free = kt->pfrkt_cnt;
563 w.pfrw_flags = flags;
564 rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
565 if (!rv)
566 rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
567 if (rv)
568 return (rv);
569
570 if (w.pfrw_free) {
571 printf("pfr_get_addrs: corruption detected (%d).\n",
572 w.pfrw_free);
573 return (ENOTTY);
574 }
575 *size = kt->pfrkt_cnt;
576 return (0);
577 }
578
579 int
580 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
581 int flags)
582 {
583 struct pfr_ktable *kt;
584 struct pfr_walktree w;
585 struct pfr_kentryworkq workq;
586 int rv, s = 0;
587 long tzero = time.tv_sec;
588
589 ACCEPT_FLAGS(PFR_FLAG_ATOMIC); /* XXX PFR_FLAG_CLSTATS disabled */
590 if (pfr_validate_table(tbl, 0, 0))
591 return (EINVAL);
592 kt = pfr_lookup_table(tbl);
593 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
594 return (ESRCH);
595 if (kt->pfrkt_cnt > *size) {
596 *size = kt->pfrkt_cnt;
597 return (0);
598 }
599
600 bzero(&w, sizeof(w));
601 w.pfrw_op = PFRW_GET_ASTATS;
602 w.pfrw_astats = addr;
603 w.pfrw_free = kt->pfrkt_cnt;
604 w.pfrw_flags = flags;
605 if (flags & PFR_FLAG_ATOMIC)
606 s = splsoftnet();
607 rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
608 if (!rv)
609 rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
610 if (!rv && (flags & PFR_FLAG_CLSTATS)) {
611 pfr_enqueue_addrs(kt, &workq, NULL, 0);
612 pfr_clstats_kentries(&workq, tzero, 0);
613 }
614 if (flags & PFR_FLAG_ATOMIC)
615 splx(s);
616 if (rv)
617 return (rv);
618
619 if (w.pfrw_free) {
620 printf("pfr_get_astats: corruption detected (%d).\n",
621 w.pfrw_free);
622 return (ENOTTY);
623 }
624 *size = kt->pfrkt_cnt;
625 return (0);
626 }
627
628 int
629 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
630 int *nzero, int flags)
631 {
632 struct pfr_ktable *kt;
633 struct pfr_kentryworkq workq;
634 struct pfr_kentry *p;
635 struct pfr_addr ad;
636 int i, rv, s = 0, xzero = 0;
637
638 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
639 if (pfr_validate_table(tbl, 0, 0))
640 return (EINVAL);
641 kt = pfr_lookup_table(tbl);
642 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
643 return (ESRCH);
644 SLIST_INIT(&workq);
645 for (i = 0; i < size; i++) {
646 if (COPYIN(addr+i, &ad, sizeof(ad)))
647 senderr(EFAULT);
648 if (pfr_validate_addr(&ad))
649 senderr(EINVAL);
650 p = pfr_lookup_addr(kt, &ad, 1);
651 if (flags & PFR_FLAG_FEEDBACK) {
652 ad.pfra_fback = (p != NULL) ?
653 PFR_FB_CLEARED : PFR_FB_NONE;
654 if (COPYOUT(&ad, addr+i, sizeof(ad)))
655 senderr(EFAULT);
656 }
657 if (p != NULL) {
658 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
659 xzero++;
660 }
661 }
662
663 if (!(flags & PFR_FLAG_DUMMY)) {
664 if (flags & PFR_FLAG_ATOMIC)
665 s = splsoftnet();
666 pfr_clstats_kentries(&workq, 0, 0);
667 if (flags & PFR_FLAG_ATOMIC)
668 splx(s);
669 }
670 if (nzero != NULL)
671 *nzero = xzero;
672 return (0);
673 _bad:
674 if (flags & PFR_FLAG_FEEDBACK)
675 pfr_reset_feedback(addr, size, flags);
676 return (rv);
677 }
678
679 int
680 pfr_validate_addr(struct pfr_addr *ad)
681 {
682 int i;
683
684 switch (ad->pfra_af) {
685 case AF_INET:
686 if (ad->pfra_net > 32)
687 return (-1);
688 break;
689 case AF_INET6:
690 if (ad->pfra_net > 128)
691 return (-1);
692 break;
693 default:
694 return (-1);
695 }
696 if (ad->pfra_net < 128 &&
697 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
698 return (-1);
699 for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
700 if (((caddr_t)ad)[i])
701 return (-1);
702 if (ad->pfra_not && ad->pfra_not != 1)
703 return (-1);
704 if (ad->pfra_fback)
705 return (-1);
706 return (0);
707 }
708
709 void
710 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
711 int *naddr, int sweep)
712 {
713 struct pfr_walktree w;
714
715 SLIST_INIT(workq);
716 bzero(&w, sizeof(w));
717 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
718 w.pfrw_workq = workq;
719 if (kt->pfrkt_ip4 != NULL)
720 if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
721 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
722 if (kt->pfrkt_ip6 != NULL)
723 if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
724 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
725 if (naddr != NULL)
726 *naddr = w.pfrw_cnt;
727 }
728
729 void
730 pfr_mark_addrs(struct pfr_ktable *kt)
731 {
732 struct pfr_walktree w;
733
734 bzero(&w, sizeof(w));
735 w.pfrw_op = PFRW_MARK;
736 if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
737 printf("pfr_mark_addrs: IPv4 walktree failed.\n");
738 if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
739 printf("pfr_mark_addrs: IPv6 walktree failed.\n");
740 }
741
742
743 struct pfr_kentry *
744 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
745 {
746 union sockaddr_union sa, mask;
747 struct radix_node_head *head;
748 struct pfr_kentry *ke;
749 int s;
750
751 bzero(&sa, sizeof(sa));
752 if (ad->pfra_af == AF_INET) {
753 FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
754 head = kt->pfrkt_ip4;
755 } else {
756 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
757 head = kt->pfrkt_ip6;
758 }
759 if (ADDR_NETWORK(ad)) {
760 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
761 s = splsoftnet(); /* rn_lookup makes use of globals */
762 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
763 splx(s);
764 if (ke && KENTRY_RNF_ROOT(ke))
765 ke = NULL;
766 } else {
767 ke = (struct pfr_kentry *)rn_match(&sa, head);
768 if (ke && KENTRY_RNF_ROOT(ke))
769 ke = NULL;
770 if (exact && ke && KENTRY_NETWORK(ke))
771 ke = NULL;
772 }
773 return (ke);
774 }
775
776 struct pfr_kentry *
777 pfr_create_kentry(struct pfr_addr *ad)
778 {
779 struct pfr_kentry *ke;
780
781 ke = pool_get(&pfr_kentry_pl, PR_NOWAIT);
782 if (ke == NULL)
783 return (NULL);
784 bzero(ke, sizeof(*ke));
785
786 if (ad->pfra_af == AF_INET)
787 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
788 else
789 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
790 ke->pfrke_af = ad->pfra_af;
791 ke->pfrke_net = ad->pfra_net;
792 ke->pfrke_not = ad->pfra_not;
793 return (ke);
794 }
795
796 void
797 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
798 {
799 struct pfr_kentry *p, *q;
800
801 for (p = SLIST_FIRST(workq); p != NULL; p = q) {
802 q = SLIST_NEXT(p, pfrke_workq);
803 pfr_destroy_kentry(p);
804 }
805 }
806
807 void
808 pfr_destroy_kentry(struct pfr_kentry *ke)
809 {
810 pool_put(&pfr_kentry_pl, ke);
811 }
812
813 void
814 pfr_insert_kentries(struct pfr_ktable *kt,
815 struct pfr_kentryworkq *workq, long tzero)
816 {
817 struct pfr_kentry *p;
818 int rv, n = 0;
819
820 SLIST_FOREACH(p, workq, pfrke_workq) {
821 rv = pfr_route_kentry(kt, p);
822 if (rv) {
823 printf("pfr_insert_kentries: cannot route entry "
824 "(code=%d).\n", rv);
825 break;
826 }
827 p->pfrke_tzero = tzero;
828 n++;
829 }
830 kt->pfrkt_cnt += n;
831 }
832
833 void
834 pfr_remove_kentries(struct pfr_ktable *kt,
835 struct pfr_kentryworkq *workq)
836 {
837 struct pfr_kentry *p;
838 int n = 0;
839
840 SLIST_FOREACH(p, workq, pfrke_workq) {
841 pfr_unroute_kentry(kt, p);
842 n++;
843 }
844 kt->pfrkt_cnt -= n;
845 pfr_destroy_kentries(workq);
846 }
847
848 void
849 pfr_clean_node_mask(struct pfr_ktable *kt,
850 struct pfr_kentryworkq *workq)
851 {
852 struct pfr_kentry *p;
853
854 SLIST_FOREACH(p, workq, pfrke_workq)
855 pfr_unroute_kentry(kt, p);
856 }
857
858 void
859 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
860 {
861 struct pfr_kentry *p;
862 int s;
863
864 SLIST_FOREACH(p, workq, pfrke_workq) {
865 s = splsoftnet();
866 if (negchange)
867 p->pfrke_not = !p->pfrke_not;
868 bzero(p->pfrke_packets, sizeof(p->pfrke_packets));
869 bzero(p->pfrke_bytes, sizeof(p->pfrke_bytes));
870 splx(s);
871 p->pfrke_tzero = tzero;
872 }
873 }
874
875 void
876 pfr_reset_feedback(struct pfr_addr *addr, int size, int flags)
877 {
878 struct pfr_addr ad;
879 int i;
880
881 for (i = 0; i < size; i++) {
882 if (COPYIN(addr+i, &ad, sizeof(ad)))
883 break;
884 ad.pfra_fback = PFR_FB_NONE;
885 if (COPYOUT(&ad, addr+i, sizeof(ad)))
886 break;
887 }
888 }
889
890 void
891 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
892 {
893 int i;
894
895 bzero(sa, sizeof(*sa));
896 if (af == AF_INET) {
897 sa->sin.sin_len = sizeof(sa->sin);
898 sa->sin.sin_family = AF_INET;
899 sa->sin.sin_addr.s_addr = htonl(-1 << (32-net));
900 } else {
901 sa->sin6.sin6_len = sizeof(sa->sin6);
902 sa->sin6.sin6_family = AF_INET6;
903 for (i = 0; i < 4; i++) {
904 if (net <= 32) {
905 sa->sin6.sin6_addr.s6_addr32[i] =
906 htonl(-1 << (32-net));
907 break;
908 }
909 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
910 net -= 32;
911 }
912 }
913 }
914
915 int
916 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
917 {
918 union sockaddr_union mask;
919 struct radix_node *rn;
920 struct radix_node_head *head;
921 int s;
922
923 bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
924 if (ke->pfrke_af == AF_INET)
925 head = kt->pfrkt_ip4;
926 else
927 head = kt->pfrkt_ip6;
928
929 s = splsoftnet();
930 if (KENTRY_NETWORK(ke)) {
931 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
932 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
933 } else
934 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
935 splx(s);
936
937 return (rn == NULL ? -1 : 0);
938 }
939
940 int
941 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
942 {
943 union sockaddr_union mask;
944 struct radix_node *rn;
945 struct radix_node_head *head;
946 int s;
947
948 if (ke->pfrke_af == AF_INET)
949 head = kt->pfrkt_ip4;
950 else
951 head = kt->pfrkt_ip6;
952
953 s = splsoftnet();
954 if (KENTRY_NETWORK(ke)) {
955 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
956 rn = rn_delete(&ke->pfrke_sa, &mask, head);
957 } else
958 rn = rn_delete(&ke->pfrke_sa, NULL, head);
959 splx(s);
960
961 if (rn == NULL) {
962 printf("pfr_unroute_kentry: delete failed.\n");
963 return (-1);
964 }
965 return (0);
966 }
967
968 void
969 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
970 {
971 bzero(ad, sizeof(*ad));
972 if (ke == NULL)
973 return;
974 ad->pfra_af = ke->pfrke_af;
975 ad->pfra_net = ke->pfrke_net;
976 ad->pfra_not = ke->pfrke_not;
977 if (ad->pfra_af == AF_INET)
978 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
979 else
980 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
981 }
982
983 int
984 pfr_walktree(struct radix_node *rn, void *arg)
985 {
986 struct pfr_kentry *ke = (struct pfr_kentry *)rn;
987 struct pfr_walktree *w = arg;
988 int s, flags = w->pfrw_flags;
989
990 switch (w->pfrw_op) {
991 case PFRW_MARK:
992 ke->pfrke_mark = 0;
993 break;
994 case PFRW_SWEEP:
995 if (ke->pfrke_mark)
996 break;
997 /* FALLTHROUGH */
998 case PFRW_ENQUEUE:
999 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1000 w->pfrw_cnt++;
1001 break;
1002 case PFRW_GET_ADDRS:
1003 if (w->pfrw_free-- > 0) {
1004 struct pfr_addr ad;
1005
1006 pfr_copyout_addr(&ad, ke);
1007 if (copyout(&ad, w->pfrw_addr, sizeof(ad)))
1008 return (EFAULT);
1009 w->pfrw_addr++;
1010 }
1011 break;
1012 case PFRW_GET_ASTATS:
1013 if (w->pfrw_free-- > 0) {
1014 struct pfr_astats as;
1015
1016 pfr_copyout_addr(&as.pfras_a, ke);
1017
1018 s = splsoftnet();
1019 bcopy(ke->pfrke_packets, as.pfras_packets,
1020 sizeof(as.pfras_packets));
1021 bcopy(ke->pfrke_bytes, as.pfras_bytes,
1022 sizeof(as.pfras_bytes));
1023 splx(s);
1024 as.pfras_tzero = ke->pfrke_tzero;
1025
1026 if (COPYOUT(&as, w->pfrw_astats, sizeof(as)))
1027 return (EFAULT);
1028 w->pfrw_astats++;
1029 }
1030 break;
1031 case PFRW_POOL_GET:
1032 if (ke->pfrke_not)
1033 break; /* negative entries are ignored */
1034 if (!w->pfrw_cnt--) {
1035 w->pfrw_kentry = ke;
1036 return (1); /* finish search */
1037 }
1038 break;
1039 case PFRW_DYNADDR_UPDATE:
1040 if (ke->pfrke_af == AF_INET) {
1041 if (w->pfrw_dyn->pfid_acnt4++ > 0)
1042 break;
1043 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1044 w->pfrw_dyn->pfid_addr4 = *SUNION2PF(
1045 &ke->pfrke_sa, AF_INET);
1046 w->pfrw_dyn->pfid_mask4 = *SUNION2PF(
1047 &pfr_mask, AF_INET);
1048 } else {
1049 if (w->pfrw_dyn->pfid_acnt6++ > 0)
1050 break;
1051 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1052 w->pfrw_dyn->pfid_addr6 = *SUNION2PF(
1053 &ke->pfrke_sa, AF_INET6);
1054 w->pfrw_dyn->pfid_mask6 = *SUNION2PF(
1055 &pfr_mask, AF_INET6);
1056 }
1057 break;
1058 }
1059 return (0);
1060 }
1061
1062 int
1063 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1064 {
1065 struct pfr_ktableworkq workq;
1066 struct pfr_ktable *p;
1067 int s = 0, xdel = 0;
1068
1069 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ALLRSETS);
1070 if (pfr_table_count(filter, flags) < 0)
1071 return (ENOENT);
1072
1073 SLIST_INIT(&workq);
1074 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1075 if (pfr_skip_table(filter, p, flags))
1076 continue;
1077 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1078 continue;
1079 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1080 continue;
1081 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1082 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1083 xdel++;
1084 }
1085 if (!(flags & PFR_FLAG_DUMMY)) {
1086 if (flags & PFR_FLAG_ATOMIC)
1087 s = splsoftnet();
1088 pfr_setflags_ktables(&workq);
1089 if (flags & PFR_FLAG_ATOMIC)
1090 splx(s);
1091 }
1092 if (ndel != NULL)
1093 *ndel = xdel;
1094 return (0);
1095 }
1096
1097 int
1098 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1099 {
1100 struct pfr_ktableworkq addq, changeq;
1101 struct pfr_ktable *p, *q, *r, key;
1102 int i, rv, s = 0, xadd = 0;
1103 long tzero = time.tv_sec;
1104
1105 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1106 SLIST_INIT(&addq);
1107 SLIST_INIT(&changeq);
1108 for (i = 0; i < size; i++) {
1109 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1110 senderr(EFAULT);
1111 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1112 flags & PFR_FLAG_USERIOCTL))
1113 senderr(EINVAL);
1114 key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1115 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1116 if (p == NULL) {
1117 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1118 if (p == NULL)
1119 senderr(ENOMEM);
1120 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1121 if (!pfr_ktable_compare(p, q))
1122 goto _skip;
1123 }
1124 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1125 xadd++;
1126 if (!key.pfrkt_anchor[0])
1127 goto _skip;
1128
1129 /* find or create root table */
1130 bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1131 bzero(key.pfrkt_ruleset, sizeof(key.pfrkt_ruleset));
1132 r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1133 if (r != NULL) {
1134 p->pfrkt_root = r;
1135 goto _skip;
1136 }
1137 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1138 if (!pfr_ktable_compare(&key, q)) {
1139 p->pfrkt_root = q;
1140 goto _skip;
1141 }
1142 }
1143 key.pfrkt_flags = 0;
1144 r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1145 if (r == NULL)
1146 senderr(ENOMEM);
1147 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1148 p->pfrkt_root = r;
1149 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1150 SLIST_FOREACH(q, &changeq, pfrkt_workq)
1151 if (!pfr_ktable_compare(&key, q))
1152 goto _skip;
1153 p->pfrkt_nflags = (p->pfrkt_flags &
1154 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1155 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1156 xadd++;
1157 }
1158 _skip:
1159 ;
1160 }
1161 if (!(flags & PFR_FLAG_DUMMY)) {
1162 if (flags & PFR_FLAG_ATOMIC)
1163 s = splsoftnet();
1164 pfr_insert_ktables(&addq);
1165 pfr_setflags_ktables(&changeq);
1166 if (flags & PFR_FLAG_ATOMIC)
1167 splx(s);
1168 } else
1169 pfr_destroy_ktables(&addq, 0);
1170 if (nadd != NULL)
1171 *nadd = xadd;
1172 return (0);
1173 _bad:
1174 pfr_destroy_ktables(&addq, 0);
1175 return (rv);
1176 }
1177
1178 int
1179 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1180 {
1181 struct pfr_ktableworkq workq;
1182 struct pfr_ktable *p, *q, key;
1183 int i, s = 0, xdel = 0;
1184
1185 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1186 SLIST_INIT(&workq);
1187 for (i = 0; i < size; i++) {
1188 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1189 return (EFAULT);
1190 if (pfr_validate_table(&key.pfrkt_t, 0,
1191 flags & PFR_FLAG_USERIOCTL))
1192 return (EINVAL);
1193 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1194 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1195 SLIST_FOREACH(q, &workq, pfrkt_workq)
1196 if (!pfr_ktable_compare(p, q))
1197 goto _skip;
1198 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1199 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1200 xdel++;
1201 }
1202 _skip:
1203 ;
1204 }
1205
1206 if (!(flags & PFR_FLAG_DUMMY)) {
1207 if (flags & PFR_FLAG_ATOMIC)
1208 s = splsoftnet();
1209 pfr_setflags_ktables(&workq);
1210 if (flags & PFR_FLAG_ATOMIC)
1211 splx(s);
1212 }
1213 if (ndel != NULL)
1214 *ndel = xdel;
1215 return (0);
1216 }
1217
1218 int
1219 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1220 int flags)
1221 {
1222 struct pfr_ktable *p;
1223 int n, nn;
1224
1225 ACCEPT_FLAGS(PFR_FLAG_ALLRSETS);
1226 n = nn = pfr_table_count(filter, flags);
1227 if (n < 0)
1228 return (ENOENT);
1229 if (n > *size) {
1230 *size = n;
1231 return (0);
1232 }
1233 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1234 if (pfr_skip_table(filter, p, flags))
1235 continue;
1236 if (n-- <= 0)
1237 continue;
1238 if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl)))
1239 return (EFAULT);
1240 }
1241 if (n) {
1242 printf("pfr_get_tables: corruption detected (%d).\n", n);
1243 return (ENOTTY);
1244 }
1245 *size = nn;
1246 return (0);
1247 }
1248
1249 int
1250 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1251 int flags)
1252 {
1253 struct pfr_ktable *p;
1254 struct pfr_ktableworkq workq;
1255 int s = 0, n, nn;
1256 long tzero = time.tv_sec;
1257
1258 ACCEPT_FLAGS(PFR_FLAG_ATOMIC|PFR_FLAG_ALLRSETS);
1259 /* XXX PFR_FLAG_CLSTATS disabled */
1260 n = nn = pfr_table_count(filter, flags);
1261 if (n < 0)
1262 return (ENOENT);
1263 if (n > *size) {
1264 *size = n;
1265 return (0);
1266 }
1267 SLIST_INIT(&workq);
1268 if (flags & PFR_FLAG_ATOMIC)
1269 s = splsoftnet();
1270 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1271 if (pfr_skip_table(filter, p, flags))
1272 continue;
1273 if (n-- <= 0)
1274 continue;
1275 if (!(flags & PFR_FLAG_ATOMIC))
1276 s = splsoftnet();
1277 if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl))) {
1278 splx(s);
1279 return (EFAULT);
1280 }
1281 if (!(flags & PFR_FLAG_ATOMIC))
1282 splx(s);
1283 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1284 }
1285 if (flags & PFR_FLAG_CLSTATS)
1286 pfr_clstats_ktables(&workq, tzero,
1287 flags & PFR_FLAG_ADDRSTOO);
1288 if (flags & PFR_FLAG_ATOMIC)
1289 splx(s);
1290 if (n) {
1291 printf("pfr_get_tstats: corruption detected (%d).\n", n);
1292 return (ENOTTY);
1293 }
1294 *size = nn;
1295 return (0);
1296 }
1297
1298 int
1299 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1300 {
1301 struct pfr_ktableworkq workq;
1302 struct pfr_ktable *p, key;
1303 int i, s = 0, xzero = 0;
1304 long tzero = time.tv_sec;
1305
1306 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ADDRSTOO);
1307 SLIST_INIT(&workq);
1308 for (i = 0; i < size; i++) {
1309 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1310 return (EFAULT);
1311 if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1312 return (EINVAL);
1313 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1314 if (p != NULL) {
1315 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1316 xzero++;
1317 }
1318 }
1319 if (!(flags & PFR_FLAG_DUMMY)) {
1320 if (flags & PFR_FLAG_ATOMIC)
1321 s = splsoftnet();
1322 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1323 if (flags & PFR_FLAG_ATOMIC)
1324 splx(s);
1325 }
1326 if (nzero != NULL)
1327 *nzero = xzero;
1328 return (0);
1329 }
1330
1331 int
1332 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1333 int *nchange, int *ndel, int flags)
1334 {
1335 struct pfr_ktableworkq workq;
1336 struct pfr_ktable *p, *q, key;
1337 int i, s = 0, xchange = 0, xdel = 0;
1338
1339 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1340 if ((setflag & ~PFR_TFLAG_USRMASK) ||
1341 (clrflag & ~PFR_TFLAG_USRMASK) ||
1342 (setflag & clrflag))
1343 return (EINVAL);
1344 SLIST_INIT(&workq);
1345 for (i = 0; i < size; i++) {
1346 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1347 return (EFAULT);
1348 if (pfr_validate_table(&key.pfrkt_t, 0,
1349 flags & PFR_FLAG_USERIOCTL))
1350 return (EINVAL);
1351 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1352 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1353 p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1354 ~clrflag;
1355 if (p->pfrkt_nflags == p->pfrkt_flags)
1356 goto _skip;
1357 SLIST_FOREACH(q, &workq, pfrkt_workq)
1358 if (!pfr_ktable_compare(p, q))
1359 goto _skip;
1360 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1361 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1362 (clrflag & PFR_TFLAG_PERSIST) &&
1363 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1364 xdel++;
1365 else
1366 xchange++;
1367 }
1368 _skip:
1369 ;
1370 }
1371 if (!(flags & PFR_FLAG_DUMMY)) {
1372 if (flags & PFR_FLAG_ATOMIC)
1373 s = splsoftnet();
1374 pfr_setflags_ktables(&workq);
1375 if (flags & PFR_FLAG_ATOMIC)
1376 splx(s);
1377 }
1378 if (nchange != NULL)
1379 *nchange = xchange;
1380 if (ndel != NULL)
1381 *ndel = xdel;
1382 return (0);
1383 }
1384
1385 int
1386 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1387 {
1388 struct pfr_ktableworkq workq;
1389 struct pfr_ktable *p;
1390 struct pf_ruleset *rs;
1391 int xdel = 0;
1392
1393 ACCEPT_FLAGS(PFR_FLAG_DUMMY);
1394 rs = pf_find_or_create_ruleset(trs->pfrt_anchor, trs->pfrt_ruleset);
1395 if (rs == NULL)
1396 return (ENOMEM);
1397 SLIST_INIT(&workq);
1398 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1399 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1400 pfr_skip_table(trs, p, 0))
1401 continue;
1402 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1403 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1404 xdel++;
1405 }
1406 if (!(flags & PFR_FLAG_DUMMY)) {
1407 pfr_setflags_ktables(&workq);
1408 if (ticket != NULL)
1409 *ticket = ++rs->tticket;
1410 rs->topen = 1;
1411 } else
1412 pf_remove_if_empty_ruleset(rs);
1413 if (ndel != NULL)
1414 *ndel = xdel;
1415 return (0);
1416 }
1417
1418 int
1419 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1420 int *nadd, int *naddr, u_int32_t ticket, int flags)
1421 {
1422 struct pfr_ktableworkq tableq;
1423 struct pfr_kentryworkq addrq;
1424 struct pfr_ktable *kt, *rt, *shadow, key;
1425 struct pfr_kentry *p;
1426 struct pfr_addr ad;
1427 struct pf_ruleset *rs;
1428 int i, rv, xadd = 0, xaddr = 0;
1429
1430 ACCEPT_FLAGS(PFR_FLAG_DUMMY|PFR_FLAG_ADDRSTOO);
1431 if (size && !(flags & PFR_FLAG_ADDRSTOO))
1432 return (EINVAL);
1433 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1434 flags & PFR_FLAG_USERIOCTL))
1435 return (EINVAL);
1436 rs = pf_find_ruleset(tbl->pfrt_anchor, tbl->pfrt_ruleset);
1437 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1438 return (EBUSY);
1439 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1440 SLIST_INIT(&tableq);
1441 kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1442 if (kt == NULL) {
1443 kt = pfr_create_ktable(tbl, 0, 1);
1444 if (kt == NULL)
1445 return (ENOMEM);
1446 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1447 xadd++;
1448 if (!tbl->pfrt_anchor[0])
1449 goto _skip;
1450
1451 /* find or create root table */
1452 bzero(&key, sizeof(key));
1453 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1454 rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1455 if (rt != NULL) {
1456 kt->pfrkt_root = rt;
1457 goto _skip;
1458 }
1459 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1460 if (rt == NULL) {
1461 pfr_destroy_ktables(&tableq, 0);
1462 return (ENOMEM);
1463 }
1464 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1465 kt->pfrkt_root = rt;
1466 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1467 xadd++;
1468 _skip:
1469 shadow = pfr_create_ktable(tbl, 0, 0);
1470 if (shadow == NULL) {
1471 pfr_destroy_ktables(&tableq, 0);
1472 return (ENOMEM);
1473 }
1474 SLIST_INIT(&addrq);
1475 for (i = 0; i < size; i++) {
1476 if (COPYIN(addr+i, &ad, sizeof(ad)))
1477 senderr(EFAULT);
1478 if (pfr_validate_addr(&ad))
1479 senderr(EINVAL);
1480 if (pfr_lookup_addr(shadow, &ad, 1) != NULL)
1481 continue;
1482 p = pfr_create_kentry(&ad);
1483 if (p == NULL)
1484 senderr(ENOMEM);
1485 if (pfr_route_kentry(shadow, p)) {
1486 pfr_destroy_kentry(p);
1487 continue;
1488 }
1489 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1490 xaddr++;
1491 }
1492 if (!(flags & PFR_FLAG_DUMMY)) {
1493 if (kt->pfrkt_shadow != NULL)
1494 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1495 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1496 pfr_insert_ktables(&tableq);
1497 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1498 xaddr : NO_ADDRESSES;
1499 kt->pfrkt_shadow = shadow;
1500 } else {
1501 pfr_clean_node_mask(shadow, &addrq);
1502 pfr_destroy_ktable(shadow, 0);
1503 pfr_destroy_ktables(&tableq, 0);
1504 pfr_destroy_kentries(&addrq);
1505 }
1506 if (nadd != NULL)
1507 *nadd = xadd;
1508 if (naddr != NULL)
1509 *naddr = xaddr;
1510 return (0);
1511 _bad:
1512 pfr_destroy_ktable(shadow, 0);
1513 pfr_destroy_ktables(&tableq, 0);
1514 pfr_destroy_kentries(&addrq);
1515 return (rv);
1516 }
1517
1518 int
1519 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1520 {
1521 struct pfr_ktableworkq workq;
1522 struct pfr_ktable *p;
1523 struct pf_ruleset *rs;
1524 int xdel = 0;
1525
1526 ACCEPT_FLAGS(PFR_FLAG_DUMMY);
1527 rs = pf_find_ruleset(trs->pfrt_anchor, trs->pfrt_ruleset);
1528 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1529 return (0);
1530 SLIST_INIT(&workq);
1531 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1532 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1533 pfr_skip_table(trs, p, 0))
1534 continue;
1535 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1536 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1537 xdel++;
1538 }
1539 if (!(flags & PFR_FLAG_DUMMY)) {
1540 pfr_setflags_ktables(&workq);
1541 rs->topen = 0;
1542 pf_remove_if_empty_ruleset(rs);
1543 }
1544 if (ndel != NULL)
1545 *ndel = xdel;
1546 return (0);
1547 }
1548
1549 int
1550 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1551 int *nchange, int flags)
1552 {
1553 struct pfr_ktable *p, *q;
1554 struct pfr_ktableworkq workq;
1555 struct pf_ruleset *rs;
1556 int s = 0, xadd = 0, xchange = 0;
1557 long tzero = time.tv_sec;
1558
1559 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1560 rs = pf_find_ruleset(trs->pfrt_anchor, trs->pfrt_ruleset);
1561 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1562 return (EBUSY);
1563
1564 SLIST_INIT(&workq);
1565 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1566 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1567 pfr_skip_table(trs, p, 0))
1568 continue;
1569 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1570 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1571 xchange++;
1572 else
1573 xadd++;
1574 }
1575
1576 if (!(flags & PFR_FLAG_DUMMY)) {
1577 if (flags & PFR_FLAG_ATOMIC)
1578 s = splsoftnet();
1579 for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1580 q = SLIST_NEXT(p, pfrkt_workq);
1581 pfr_commit_ktable(p, tzero);
1582 }
1583 if (flags & PFR_FLAG_ATOMIC)
1584 splx(s);
1585 rs->topen = 0;
1586 pf_remove_if_empty_ruleset(rs);
1587 }
1588 if (nadd != NULL)
1589 *nadd = xadd;
1590 if (nchange != NULL)
1591 *nchange = xchange;
1592
1593 return (0);
1594 }
1595
1596 void
1597 pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1598 {
1599 struct pfr_ktable *shadow = kt->pfrkt_shadow;
1600 int nflags;
1601
1602 if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1603 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1604 pfr_clstats_ktable(kt, tzero, 1);
1605 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1606 /* kt might contain addresses */
1607 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq;
1608 struct pfr_kentry *p, *q, *next;
1609 struct pfr_addr ad;
1610
1611 pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1612 pfr_mark_addrs(kt);
1613 SLIST_INIT(&addq);
1614 SLIST_INIT(&changeq);
1615 SLIST_INIT(&delq);
1616 SLIST_INIT(&garbageq);
1617 pfr_clean_node_mask(shadow, &addrq);
1618 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1619 next = SLIST_NEXT(p, pfrke_workq); /* XXX */
1620 pfr_copyout_addr(&ad, p);
1621 q = pfr_lookup_addr(kt, &ad, 1);
1622 if (q != NULL) {
1623 if (q->pfrke_not != p->pfrke_not)
1624 SLIST_INSERT_HEAD(&changeq, q,
1625 pfrke_workq);
1626 q->pfrke_mark = 1;
1627 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1628 } else {
1629 p->pfrke_tzero = tzero;
1630 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1631 }
1632 }
1633 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1634 pfr_insert_kentries(kt, &addq, tzero);
1635 pfr_remove_kentries(kt, &delq);
1636 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1637 pfr_destroy_kentries(&garbageq);
1638 } else {
1639 /* kt cannot contain addresses */
1640 SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1641 shadow->pfrkt_ip4);
1642 SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1643 shadow->pfrkt_ip6);
1644 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1645 pfr_clstats_ktable(kt, tzero, 1);
1646 }
1647 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1648 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1649 & ~PFR_TFLAG_INACTIVE;
1650 pfr_destroy_ktable(shadow, 0);
1651 kt->pfrkt_shadow = NULL;
1652 pfr_setflags_ktable(kt, nflags);
1653 }
1654
1655 int
1656 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1657 {
1658 int i;
1659
1660 if (!tbl->pfrt_name[0])
1661 return (-1);
1662 if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1663 return (-1);
1664 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1665 return (-1);
1666 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1667 if (tbl->pfrt_name[i])
1668 return (-1);
1669 if (tbl->pfrt_flags & ~allowedflags)
1670 return (-1);
1671 return (0);
1672 }
1673
1674 int
1675 pfr_table_count(struct pfr_table *filter, int flags)
1676 {
1677 struct pf_ruleset *rs;
1678 struct pf_anchor *ac;
1679
1680 if (flags & PFR_FLAG_ALLRSETS)
1681 return (pfr_ktable_cnt);
1682 if (filter->pfrt_ruleset[0]) {
1683 rs = pf_find_ruleset(filter->pfrt_anchor,
1684 filter->pfrt_ruleset);
1685 return ((rs != NULL) ? rs->tables : -1);
1686 }
1687 if (filter->pfrt_anchor[0]) {
1688 ac = pf_find_anchor(filter->pfrt_anchor);
1689 return ((ac != NULL) ? ac->tables : -1);
1690 }
1691 return (pf_main_ruleset.tables);
1692 }
1693
1694 int
1695 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1696 {
1697 if (flags & PFR_FLAG_ALLRSETS)
1698 return (0);
1699 if (strncmp(filter->pfrt_anchor, kt->pfrkt_anchor,
1700 PF_ANCHOR_NAME_SIZE))
1701 return (1);
1702 if (!filter->pfrt_ruleset[0])
1703 return (0);
1704 if (strncmp(filter->pfrt_ruleset, kt->pfrkt_ruleset,
1705 PF_RULESET_NAME_SIZE))
1706 return (1);
1707 return (0);
1708 }
1709
1710 void
1711 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1712 {
1713 struct pfr_ktable *p;
1714
1715 SLIST_FOREACH(p, workq, pfrkt_workq)
1716 pfr_insert_ktable(p);
1717 }
1718
1719 void
1720 pfr_insert_ktable(struct pfr_ktable *kt)
1721 {
1722 RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1723 pfr_ktable_cnt++;
1724 if (kt->pfrkt_root != NULL)
1725 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1726 pfr_setflags_ktable(kt->pfrkt_root,
1727 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1728 }
1729
1730 void
1731 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1732 {
1733 struct pfr_ktable *p, *q;
1734
1735 for (p = SLIST_FIRST(workq); p; p = q) {
1736 q = SLIST_NEXT(p, pfrkt_workq);
1737 pfr_setflags_ktable(p, p->pfrkt_nflags);
1738 }
1739 }
1740
1741 void
1742 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1743 {
1744 struct pfr_kentryworkq addrq;
1745
1746 if (!(newf & PFR_TFLAG_REFERENCED) &&
1747 !(newf & PFR_TFLAG_PERSIST))
1748 newf &= ~PFR_TFLAG_ACTIVE;
1749 if (!(newf & PFR_TFLAG_ACTIVE))
1750 newf &= ~PFR_TFLAG_USRMASK;
1751 if (!(newf & PFR_TFLAG_SETMASK)) {
1752 RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
1753 if (kt->pfrkt_root != NULL)
1754 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1755 pfr_setflags_ktable(kt->pfrkt_root,
1756 kt->pfrkt_root->pfrkt_flags &
1757 ~PFR_TFLAG_REFDANCHOR);
1758 pfr_destroy_ktable(kt, 1);
1759 pfr_ktable_cnt--;
1760 return;
1761 }
1762 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1763 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1764 pfr_remove_kentries(kt, &addrq);
1765 }
1766 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1767 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1768 kt->pfrkt_shadow = NULL;
1769 }
1770 kt->pfrkt_flags = newf;
1771 }
1772
1773 void
1774 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1775 {
1776 struct pfr_ktable *p;
1777
1778 SLIST_FOREACH(p, workq, pfrkt_workq)
1779 pfr_clstats_ktable(p, tzero, recurse);
1780 }
1781
1782 void
1783 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1784 {
1785 struct pfr_kentryworkq addrq;
1786 int s;
1787
1788 if (recurse) {
1789 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1790 pfr_clstats_kentries(&addrq, tzero, 0);
1791 }
1792 s = splsoftnet();
1793 bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
1794 bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
1795 kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1796 splx(s);
1797 kt->pfrkt_tzero = tzero;
1798 }
1799
1800 struct pfr_ktable *
1801 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1802 {
1803 struct pfr_ktable *kt;
1804 struct pf_ruleset *rs;
1805
1806 kt = pool_get(&pfr_ktable_pl, PR_NOWAIT);
1807 if (kt == NULL)
1808 return (NULL);
1809 bzero(kt, sizeof(*kt));
1810 kt->pfrkt_t = *tbl;
1811
1812 if (attachruleset) {
1813 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor,
1814 tbl->pfrt_ruleset);
1815 if (!rs) {
1816 pfr_destroy_ktable(kt, 0);
1817 return (NULL);
1818 }
1819 kt->pfrkt_rs = rs;
1820 rs->tables++;
1821 if (rs->anchor != NULL)
1822 rs->anchor->tables++;
1823 }
1824
1825 if (!rn_inithead((void **)&kt->pfrkt_ip4,
1826 offsetof(struct sockaddr_in, sin_addr) * 8) ||
1827 !rn_inithead((void **)&kt->pfrkt_ip6,
1828 offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1829 pfr_destroy_ktable(kt, 0);
1830 return (NULL);
1831 }
1832 kt->pfrkt_tzero = tzero;
1833
1834 return (kt);
1835 }
1836
1837 void
1838 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1839 {
1840 struct pfr_ktable *p, *q;
1841
1842 for (p = SLIST_FIRST(workq); p; p = q) {
1843 q = SLIST_NEXT(p, pfrkt_workq);
1844 pfr_destroy_ktable(p, flushaddr);
1845 }
1846 }
1847
1848 void
1849 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1850 {
1851 struct pfr_kentryworkq addrq;
1852
1853 if (flushaddr) {
1854 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1855 pfr_clean_node_mask(kt, &addrq);
1856 pfr_destroy_kentries(&addrq);
1857 }
1858 if (kt->pfrkt_ip4 != NULL)
1859 free((caddr_t)kt->pfrkt_ip4, M_RTABLE);
1860 if (kt->pfrkt_ip6 != NULL)
1861 free((caddr_t)kt->pfrkt_ip6, M_RTABLE);
1862 if (kt->pfrkt_shadow != NULL)
1863 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1864 if (kt->pfrkt_rs != NULL) {
1865 kt->pfrkt_rs->tables--;
1866 if (kt->pfrkt_rs->anchor != NULL)
1867 kt->pfrkt_rs->anchor->tables--;
1868 pf_remove_if_empty_ruleset(kt->pfrkt_rs);
1869 }
1870 pool_put(&pfr_ktable_pl, kt);
1871 }
1872
1873 int
1874 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
1875 {
1876 int d;
1877
1878 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
1879 return (d);
1880 if ((d = strncmp(p->pfrkt_anchor, q->pfrkt_anchor,
1881 PF_ANCHOR_NAME_SIZE)))
1882 return (d);
1883 return (strncmp(p->pfrkt_ruleset, q->pfrkt_ruleset,
1884 PF_RULESET_NAME_SIZE));
1885 }
1886
1887 struct pfr_ktable *
1888 pfr_lookup_table(struct pfr_table *tbl)
1889 {
1890 /* struct pfr_ktable start like a struct pfr_table */
1891 return (RB_FIND(pfr_ktablehead, &pfr_ktables,
1892 (struct pfr_ktable *)tbl));
1893 }
1894
1895 int
1896 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
1897 {
1898 struct pfr_kentry *ke = NULL;
1899 int match;
1900
1901 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1902 kt = kt->pfrkt_root;
1903 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1904 return (0);
1905
1906 switch (af) {
1907 case AF_INET:
1908 pfr_sin.sin_addr.s_addr = a->addr32[0];
1909 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
1910 if (ke && KENTRY_RNF_ROOT(ke))
1911 ke = NULL;
1912 break;
1913 case AF_INET6:
1914 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
1915 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
1916 if (ke && KENTRY_RNF_ROOT(ke))
1917 ke = NULL;
1918 break;
1919 }
1920 match = (ke && !ke->pfrke_not);
1921 if (match)
1922 kt->pfrkt_match++;
1923 else
1924 kt->pfrkt_nomatch++;
1925 return (match);
1926 }
1927
1928 void
1929 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
1930 u_int64_t len, int dir_out, int op_pass, int notrule)
1931 {
1932 struct pfr_kentry *ke = NULL;
1933
1934 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1935 kt = kt->pfrkt_root;
1936 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1937 return;
1938
1939 switch (af) {
1940 case AF_INET:
1941 pfr_sin.sin_addr.s_addr = a->addr32[0];
1942 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
1943 if (ke && KENTRY_RNF_ROOT(ke))
1944 ke = NULL;
1945 break;
1946 case AF_INET6:
1947 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
1948 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
1949 if (ke && KENTRY_RNF_ROOT(ke))
1950 ke = NULL;
1951 break;
1952 }
1953 if ((ke == NULL || ke->pfrke_not) != notrule) {
1954 if (op_pass != PFR_OP_PASS)
1955 printf("pfr_update_stats: assertion failed.\n");
1956 op_pass = PFR_OP_XPASS;
1957 }
1958 kt->pfrkt_packets[dir_out][op_pass]++;
1959 kt->pfrkt_bytes[dir_out][op_pass] += len;
1960 if (ke != NULL && op_pass != PFR_OP_XPASS) {
1961 ke->pfrke_packets[dir_out][op_pass]++;
1962 ke->pfrke_bytes[dir_out][op_pass] += len;
1963 }
1964 }
1965
1966 struct pfr_ktable *
1967 pfr_attach_table(struct pf_ruleset *rs, char *name)
1968 {
1969 struct pfr_ktable *kt, *rt;
1970 struct pfr_table tbl;
1971 struct pf_anchor *ac = rs->anchor;
1972
1973 bzero(&tbl, sizeof(tbl));
1974 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
1975 if (ac != NULL) {
1976 strlcpy(tbl.pfrt_anchor, ac->name, sizeof(tbl.pfrt_anchor));
1977 strlcpy(tbl.pfrt_ruleset, rs->name, sizeof(tbl.pfrt_ruleset));
1978 }
1979 kt = pfr_lookup_table(&tbl);
1980 if (kt == NULL) {
1981 kt = pfr_create_ktable(&tbl, time.tv_sec, 1);
1982 if (kt == NULL)
1983 return (NULL);
1984 if (ac != NULL) {
1985 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
1986 bzero(tbl.pfrt_ruleset, sizeof(tbl.pfrt_ruleset));
1987 rt = pfr_lookup_table(&tbl);
1988 if (rt == NULL) {
1989 rt = pfr_create_ktable(&tbl, 0, 1);
1990 if (rt == NULL) {
1991 pfr_destroy_ktable(kt, 0);
1992 return (NULL);
1993 }
1994 pfr_insert_ktable(rt);
1995 }
1996 kt->pfrkt_root = rt;
1997 }
1998 pfr_insert_ktable(kt);
1999 }
2000 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2001 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2002 return (kt);
2003 }
2004
2005 void
2006 pfr_detach_table(struct pfr_ktable *kt)
2007 {
2008 if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
2009 printf("pfr_detach_table: refcount = %d.\n",
2010 kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
2011 else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2012 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2013 }
2014
2015 int
2016 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2017 struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af)
2018 {
2019 struct pfr_kentry *ke, *ke2;
2020 struct pf_addr *addr;
2021 union sockaddr_union mask;
2022 int idx = -1, use_counter = 0;
2023
2024 addr = (af == AF_INET) ? (struct pf_addr *)&pfr_sin.sin_addr :
2025 (struct pf_addr *)&pfr_sin6.sin6_addr;
2026 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2027 kt = kt->pfrkt_root;
2028 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2029 return (-1);
2030
2031 if (pidx != NULL)
2032 idx = *pidx;
2033 if (counter != NULL && idx >= 0)
2034 use_counter = 1;
2035 if (idx < 0)
2036 idx = 0;
2037
2038 _next_block:
2039 ke = pfr_kentry_byidx(kt, idx, af);
2040 if (ke == NULL)
2041 return (1);
2042 pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
2043 *raddr = SUNION2PF(&ke->pfrke_sa, af);
2044 *rmask = SUNION2PF(&pfr_mask, af);
2045
2046 if (use_counter) {
2047 /* is supplied address within block? */
2048 if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
2049 /* no, go to next block in table */
2050 idx++;
2051 use_counter = 0;
2052 goto _next_block;
2053 }
2054 PF_ACPY(addr, counter, af);
2055 } else {
2056 /* use first address of block */
2057 PF_ACPY(addr, *raddr, af);
2058 }
2059
2060 if (!KENTRY_NETWORK(ke)) {
2061 /* this is a single IP address - no possible nested block */
2062 PF_ACPY(counter, addr, af);
2063 *pidx = idx;
2064 return (0);
2065 }
2066 for (;;) {
2067 /* we don't want to use a nested block */
2068 ke2 = (struct pfr_kentry *)(af == AF_INET ?
2069 rn_match(&pfr_sin, kt->pfrkt_ip4) :
2070 rn_match(&pfr_sin6, kt->pfrkt_ip6));
2071 /* no need to check KENTRY_RNF_ROOT() here */
2072 if (ke2 == ke) {
2073 /* lookup return the same block - perfect */
2074 PF_ACPY(counter, addr, af);
2075 *pidx = idx;
2076 return (0);
2077 }
2078
2079 /* we need to increase the counter past the nested block */
2080 pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
2081 PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
2082 PF_AINC(addr, af);
2083 if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
2084 /* ok, we reached the end of our main block */
2085 /* go to next block in table */
2086 idx++;
2087 use_counter = 0;
2088 goto _next_block;
2089 }
2090 }
2091 }
2092
2093 struct pfr_kentry *
2094 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2095 {
2096 struct pfr_walktree w;
2097
2098 bzero(&w, sizeof(w));
2099 w.pfrw_op = PFRW_POOL_GET;
2100 w.pfrw_cnt = idx;
2101
2102 switch (af) {
2103 case AF_INET:
2104 rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2105 return (w.pfrw_kentry);
2106 case AF_INET6:
2107 rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2108 return (w.pfrw_kentry);
2109 default:
2110 return (NULL);
2111 }
2112 }
2113
2114 void
2115 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2116 {
2117 struct pfr_walktree w;
2118 int s;
2119
2120 bzero(&w, sizeof(w));
2121 w.pfrw_op = PFRW_DYNADDR_UPDATE;
2122 w.pfrw_dyn = dyn;
2123
2124 s = splsoftnet();
2125 dyn->pfid_acnt4 = 0;
2126 dyn->pfid_acnt6 = 0;
2127 if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2128 rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2129 if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2130 rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2131 splx(s);
2132 }
2133