pf_table.c revision 1.1 1 /* $OpenBSD: pf_table.c,v 1.47 2004/03/09 21:44:41 mcbride Exp $ */
2
3 /*
4 * Copyright (c) 2002 Cedric Berger
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/socket.h>
36 #include <sys/mbuf.h>
37 #include <sys/kernel.h>
38
39 #include <net/if.h>
40 #include <net/route.h>
41 #include <netinet/in.h>
42 #include <netinet/ip_ipsp.h>
43 #include <net/pfvar.h>
44
45 #define ACCEPT_FLAGS(oklist) \
46 do { \
47 if ((flags & ~(oklist)) & \
48 PFR_FLAG_ALLMASK) \
49 return (EINVAL); \
50 } while (0)
51
52 #define COPYIN(from, to, size) \
53 ((flags & PFR_FLAG_USERIOCTL) ? \
54 copyin((from), (to), (size)) : \
55 (bcopy((from), (to), (size)), 0))
56
57 #define COPYOUT(from, to, size) \
58 ((flags & PFR_FLAG_USERIOCTL) ? \
59 copyout((from), (to), (size)) : \
60 (bcopy((from), (to), (size)), 0))
61
62 #define FILLIN_SIN(sin, addr) \
63 do { \
64 (sin).sin_len = sizeof(sin); \
65 (sin).sin_family = AF_INET; \
66 (sin).sin_addr = (addr); \
67 } while (0)
68
69 #define FILLIN_SIN6(sin6, addr) \
70 do { \
71 (sin6).sin6_len = sizeof(sin6); \
72 (sin6).sin6_family = AF_INET6; \
73 (sin6).sin6_addr = (addr); \
74 } while (0)
75
76 #define SWAP(type, a1, a2) \
77 do { \
78 type tmp = a1; \
79 a1 = a2; \
80 a2 = tmp; \
81 } while (0)
82
83 #define SUNION2PF(su, af) (((af)==AF_INET) ? \
84 (struct pf_addr *)&(su)->sin.sin_addr : \
85 (struct pf_addr *)&(su)->sin6.sin6_addr)
86
87 #define AF_BITS(af) (((af)==AF_INET)?32:128)
88 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
89 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
90 #define KENTRY_RNF_ROOT(ke) \
91 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
92
93 #define NO_ADDRESSES (-1)
94 #define ENQUEUE_UNMARKED_ONLY (1)
95 #define INVERT_NEG_FLAG (1)
96
97 struct pfr_walktree {
98 enum pfrw_op {
99 PFRW_MARK,
100 PFRW_SWEEP,
101 PFRW_ENQUEUE,
102 PFRW_GET_ADDRS,
103 PFRW_GET_ASTATS,
104 PFRW_POOL_GET,
105 PFRW_DYNADDR_UPDATE
106 } pfrw_op;
107 union {
108 struct pfr_addr *pfrw1_addr;
109 struct pfr_astats *pfrw1_astats;
110 struct pfr_kentryworkq *pfrw1_workq;
111 struct pfr_kentry *pfrw1_kentry;
112 struct pfi_dynaddr *pfrw1_dyn;
113 } pfrw_1;
114 int pfrw_free;
115 int pfrw_flags;
116 };
117 #define pfrw_addr pfrw_1.pfrw1_addr
118 #define pfrw_astats pfrw_1.pfrw1_astats
119 #define pfrw_workq pfrw_1.pfrw1_workq
120 #define pfrw_kentry pfrw_1.pfrw1_kentry
121 #define pfrw_dyn pfrw_1.pfrw1_dyn
122 #define pfrw_cnt pfrw_free
123
124 #define senderr(e) do { rv = (e); goto _bad; } while (0)
125
126 struct pool pfr_ktable_pl;
127 struct pool pfr_kentry_pl;
128 struct sockaddr_in pfr_sin;
129 struct sockaddr_in6 pfr_sin6;
130 union sockaddr_union pfr_mask;
131 struct pf_addr pfr_ffaddr;
132
133 void pfr_copyout_addr(struct pfr_addr *,
134 struct pfr_kentry *ke);
135 int pfr_validate_addr(struct pfr_addr *);
136 void pfr_enqueue_addrs(struct pfr_ktable *,
137 struct pfr_kentryworkq *, int *, int);
138 void pfr_mark_addrs(struct pfr_ktable *);
139 struct pfr_kentry *pfr_lookup_addr(struct pfr_ktable *,
140 struct pfr_addr *, int);
141 struct pfr_kentry *pfr_create_kentry(struct pfr_addr *);
142 void pfr_destroy_kentries(struct pfr_kentryworkq *);
143 void pfr_destroy_kentry(struct pfr_kentry *);
144 void pfr_insert_kentries(struct pfr_ktable *,
145 struct pfr_kentryworkq *, long);
146 void pfr_remove_kentries(struct pfr_ktable *,
147 struct pfr_kentryworkq *);
148 void pfr_clstats_kentries(struct pfr_kentryworkq *, long,
149 int);
150 void pfr_reset_feedback(struct pfr_addr *, int, int);
151 void pfr_prepare_network(union sockaddr_union *, int, int);
152 int pfr_route_kentry(struct pfr_ktable *,
153 struct pfr_kentry *);
154 int pfr_unroute_kentry(struct pfr_ktable *,
155 struct pfr_kentry *);
156 int pfr_walktree(struct radix_node *, void *);
157 int pfr_validate_table(struct pfr_table *, int, int);
158 void pfr_commit_ktable(struct pfr_ktable *, long);
159 void pfr_insert_ktables(struct pfr_ktableworkq *);
160 void pfr_insert_ktable(struct pfr_ktable *);
161 void pfr_setflags_ktables(struct pfr_ktableworkq *);
162 void pfr_setflags_ktable(struct pfr_ktable *, int);
163 void pfr_clstats_ktables(struct pfr_ktableworkq *, long,
164 int);
165 void pfr_clstats_ktable(struct pfr_ktable *, long, int);
166 struct pfr_ktable *pfr_create_ktable(struct pfr_table *, long, int);
167 void pfr_destroy_ktables(struct pfr_ktableworkq *, int);
168 void pfr_destroy_ktable(struct pfr_ktable *, int);
169 int pfr_ktable_compare(struct pfr_ktable *,
170 struct pfr_ktable *);
171 struct pfr_ktable *pfr_lookup_table(struct pfr_table *);
172 void pfr_clean_node_mask(struct pfr_ktable *,
173 struct pfr_kentryworkq *);
174 int pfr_table_count(struct pfr_table *, int);
175 int pfr_skip_table(struct pfr_table *,
176 struct pfr_ktable *, int);
177 struct pfr_kentry *pfr_kentry_byidx(struct pfr_ktable *, int, int);
178
179 RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
180 RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
181
182 struct pfr_ktablehead pfr_ktables;
183 struct pfr_table pfr_nulltable;
184 int pfr_ktable_cnt;
185
186 void
187 pfr_initialize(void)
188 {
189 pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0,
190 "pfrktable", NULL);
191 pool_init(&pfr_kentry_pl, sizeof(struct pfr_kentry), 0, 0, 0,
192 "pfrkentry", NULL);
193
194 pfr_sin.sin_len = sizeof(pfr_sin);
195 pfr_sin.sin_family = AF_INET;
196 pfr_sin6.sin6_len = sizeof(pfr_sin6);
197 pfr_sin6.sin6_family = AF_INET6;
198
199 memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr));
200 }
201
202 int
203 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
204 {
205 struct pfr_ktable *kt;
206 struct pfr_kentryworkq workq;
207 int s;
208
209 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
210 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
211 return (EINVAL);
212 kt = pfr_lookup_table(tbl);
213 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
214 return (ESRCH);
215 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
216 return (EPERM);
217 pfr_enqueue_addrs(kt, &workq, ndel, 0);
218
219 if (!(flags & PFR_FLAG_DUMMY)) {
220 if (flags & PFR_FLAG_ATOMIC)
221 s = splsoftnet();
222 pfr_remove_kentries(kt, &workq);
223 if (flags & PFR_FLAG_ATOMIC)
224 splx(s);
225 if (kt->pfrkt_cnt) {
226 printf("pfr_clr_addrs: corruption detected (%d).\n",
227 kt->pfrkt_cnt);
228 kt->pfrkt_cnt = 0;
229 }
230 }
231 return (0);
232 }
233
234 int
235 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
236 int *nadd, int flags)
237 {
238 struct pfr_ktable *kt, *tmpkt;
239 struct pfr_kentryworkq workq;
240 struct pfr_kentry *p, *q;
241 struct pfr_addr ad;
242 int i, rv, s, xadd = 0;
243 long tzero = time.tv_sec;
244
245 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
246 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
247 return (EINVAL);
248 kt = pfr_lookup_table(tbl);
249 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
250 return (ESRCH);
251 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
252 return (EPERM);
253 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
254 if (tmpkt == NULL)
255 return (ENOMEM);
256 SLIST_INIT(&workq);
257 for (i = 0; i < size; i++) {
258 if (COPYIN(addr+i, &ad, sizeof(ad)))
259 senderr(EFAULT);
260 if (pfr_validate_addr(&ad))
261 senderr(EINVAL);
262 p = pfr_lookup_addr(kt, &ad, 1);
263 q = pfr_lookup_addr(tmpkt, &ad, 1);
264 if (flags & PFR_FLAG_FEEDBACK) {
265 if (q != NULL)
266 ad.pfra_fback = PFR_FB_DUPLICATE;
267 else if (p == NULL)
268 ad.pfra_fback = PFR_FB_ADDED;
269 else if (p->pfrke_not != ad.pfra_not)
270 ad.pfra_fback = PFR_FB_CONFLICT;
271 else
272 ad.pfra_fback = PFR_FB_NONE;
273 }
274 if (p == NULL && q == NULL) {
275 p = pfr_create_kentry(&ad);
276 if (p == NULL)
277 senderr(ENOMEM);
278 if (pfr_route_kentry(tmpkt, p)) {
279 pfr_destroy_kentry(p);
280 ad.pfra_fback = PFR_FB_NONE;
281 } else {
282 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
283 xadd++;
284 }
285 }
286 if (flags & PFR_FLAG_FEEDBACK)
287 if (COPYOUT(&ad, addr+i, sizeof(ad)))
288 senderr(EFAULT);
289 }
290 pfr_clean_node_mask(tmpkt, &workq);
291 if (!(flags & PFR_FLAG_DUMMY)) {
292 if (flags & PFR_FLAG_ATOMIC)
293 s = splsoftnet();
294 pfr_insert_kentries(kt, &workq, tzero);
295 if (flags & PFR_FLAG_ATOMIC)
296 splx(s);
297 } else
298 pfr_destroy_kentries(&workq);
299 if (nadd != NULL)
300 *nadd = xadd;
301 pfr_destroy_ktable(tmpkt, 0);
302 return (0);
303 _bad:
304 pfr_clean_node_mask(tmpkt, &workq);
305 pfr_destroy_kentries(&workq);
306 if (flags & PFR_FLAG_FEEDBACK)
307 pfr_reset_feedback(addr, size, flags);
308 pfr_destroy_ktable(tmpkt, 0);
309 return (rv);
310 }
311
312 int
313 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
314 int *ndel, int flags)
315 {
316 struct pfr_ktable *kt;
317 struct pfr_kentryworkq workq;
318 struct pfr_kentry *p;
319 struct pfr_addr ad;
320 int i, rv, s, xdel = 0;
321
322 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
323 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
324 return (EINVAL);
325 kt = pfr_lookup_table(tbl);
326 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
327 return (ESRCH);
328 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
329 return (EPERM);
330 pfr_mark_addrs(kt);
331 SLIST_INIT(&workq);
332 for (i = 0; i < size; i++) {
333 if (COPYIN(addr+i, &ad, sizeof(ad)))
334 senderr(EFAULT);
335 if (pfr_validate_addr(&ad))
336 senderr(EINVAL);
337 p = pfr_lookup_addr(kt, &ad, 1);
338 if (flags & PFR_FLAG_FEEDBACK) {
339 if (p == NULL)
340 ad.pfra_fback = PFR_FB_NONE;
341 else if (p->pfrke_not != ad.pfra_not)
342 ad.pfra_fback = PFR_FB_CONFLICT;
343 else if (p->pfrke_mark)
344 ad.pfra_fback = PFR_FB_DUPLICATE;
345 else
346 ad.pfra_fback = PFR_FB_DELETED;
347 }
348 if (p != NULL && p->pfrke_not == ad.pfra_not &&
349 !p->pfrke_mark) {
350 p->pfrke_mark = 1;
351 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
352 xdel++;
353 }
354 if (flags & PFR_FLAG_FEEDBACK)
355 if (COPYOUT(&ad, addr+i, sizeof(ad)))
356 senderr(EFAULT);
357 }
358 if (!(flags & PFR_FLAG_DUMMY)) {
359 if (flags & PFR_FLAG_ATOMIC)
360 s = splsoftnet();
361 pfr_remove_kentries(kt, &workq);
362 if (flags & PFR_FLAG_ATOMIC)
363 splx(s);
364 }
365 if (ndel != NULL)
366 *ndel = xdel;
367 return (0);
368 _bad:
369 if (flags & PFR_FLAG_FEEDBACK)
370 pfr_reset_feedback(addr, size, flags);
371 return (rv);
372 }
373
374 int
375 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
376 int *size2, int *nadd, int *ndel, int *nchange, int flags)
377 {
378 struct pfr_ktable *kt, *tmpkt;
379 struct pfr_kentryworkq addq, delq, changeq;
380 struct pfr_kentry *p, *q;
381 struct pfr_addr ad;
382 int i, rv, s, xadd = 0, xdel = 0, xchange = 0;
383 long tzero = time.tv_sec;
384
385 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
386 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
387 return (EINVAL);
388 kt = pfr_lookup_table(tbl);
389 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
390 return (ESRCH);
391 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
392 return (EPERM);
393 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
394 if (tmpkt == NULL)
395 return (ENOMEM);
396 pfr_mark_addrs(kt);
397 SLIST_INIT(&addq);
398 SLIST_INIT(&delq);
399 SLIST_INIT(&changeq);
400 for (i = 0; i < size; i++) {
401 if (COPYIN(addr+i, &ad, sizeof(ad)))
402 senderr(EFAULT);
403 if (pfr_validate_addr(&ad))
404 senderr(EINVAL);
405 ad.pfra_fback = PFR_FB_NONE;
406 p = pfr_lookup_addr(kt, &ad, 1);
407 if (p != NULL) {
408 if (p->pfrke_mark) {
409 ad.pfra_fback = PFR_FB_DUPLICATE;
410 goto _skip;
411 }
412 p->pfrke_mark = 1;
413 if (p->pfrke_not != ad.pfra_not) {
414 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
415 ad.pfra_fback = PFR_FB_CHANGED;
416 xchange++;
417 }
418 } else {
419 q = pfr_lookup_addr(tmpkt, &ad, 1);
420 if (q != NULL) {
421 ad.pfra_fback = PFR_FB_DUPLICATE;
422 goto _skip;
423 }
424 p = pfr_create_kentry(&ad);
425 if (p == NULL)
426 senderr(ENOMEM);
427 if (pfr_route_kentry(tmpkt, p)) {
428 pfr_destroy_kentry(p);
429 ad.pfra_fback = PFR_FB_NONE;
430 } else {
431 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
432 ad.pfra_fback = PFR_FB_ADDED;
433 xadd++;
434 }
435 }
436 _skip:
437 if (flags & PFR_FLAG_FEEDBACK)
438 if (COPYOUT(&ad, addr+i, sizeof(ad)))
439 senderr(EFAULT);
440 }
441 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
442 if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
443 if (*size2 < size+xdel) {
444 *size2 = size+xdel;
445 senderr(0);
446 }
447 i = 0;
448 SLIST_FOREACH(p, &delq, pfrke_workq) {
449 pfr_copyout_addr(&ad, p);
450 ad.pfra_fback = PFR_FB_DELETED;
451 if (COPYOUT(&ad, addr+size+i, sizeof(ad)))
452 senderr(EFAULT);
453 i++;
454 }
455 }
456 pfr_clean_node_mask(tmpkt, &addq);
457 if (!(flags & PFR_FLAG_DUMMY)) {
458 if (flags & PFR_FLAG_ATOMIC)
459 s = splsoftnet();
460 pfr_insert_kentries(kt, &addq, tzero);
461 pfr_remove_kentries(kt, &delq);
462 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
463 if (flags & PFR_FLAG_ATOMIC)
464 splx(s);
465 } else
466 pfr_destroy_kentries(&addq);
467 if (nadd != NULL)
468 *nadd = xadd;
469 if (ndel != NULL)
470 *ndel = xdel;
471 if (nchange != NULL)
472 *nchange = xchange;
473 if ((flags & PFR_FLAG_FEEDBACK) && size2)
474 *size2 = size+xdel;
475 pfr_destroy_ktable(tmpkt, 0);
476 return (0);
477 _bad:
478 pfr_clean_node_mask(tmpkt, &addq);
479 pfr_destroy_kentries(&addq);
480 if (flags & PFR_FLAG_FEEDBACK)
481 pfr_reset_feedback(addr, size, flags);
482 pfr_destroy_ktable(tmpkt, 0);
483 return (rv);
484 }
485
486 int
487 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
488 int *nmatch, int flags)
489 {
490 struct pfr_ktable *kt;
491 struct pfr_kentry *p;
492 struct pfr_addr ad;
493 int i, xmatch = 0;
494
495 ACCEPT_FLAGS(PFR_FLAG_REPLACE);
496 if (pfr_validate_table(tbl, 0, 0))
497 return (EINVAL);
498 kt = pfr_lookup_table(tbl);
499 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
500 return (ESRCH);
501
502 for (i = 0; i < size; i++) {
503 if (COPYIN(addr+i, &ad, sizeof(ad)))
504 return (EFAULT);
505 if (pfr_validate_addr(&ad))
506 return (EINVAL);
507 if (ADDR_NETWORK(&ad))
508 return (EINVAL);
509 p = pfr_lookup_addr(kt, &ad, 0);
510 if (flags & PFR_FLAG_REPLACE)
511 pfr_copyout_addr(&ad, p);
512 ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
513 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
514 if (p != NULL && !p->pfrke_not)
515 xmatch++;
516 if (COPYOUT(&ad, addr+i, sizeof(ad)))
517 return (EFAULT);
518 }
519 if (nmatch != NULL)
520 *nmatch = xmatch;
521 return (0);
522 }
523
524 int
525 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
526 int flags)
527 {
528 struct pfr_ktable *kt;
529 struct pfr_walktree w;
530 int rv;
531
532 ACCEPT_FLAGS(0);
533 if (pfr_validate_table(tbl, 0, 0))
534 return (EINVAL);
535 kt = pfr_lookup_table(tbl);
536 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
537 return (ESRCH);
538 if (kt->pfrkt_cnt > *size) {
539 *size = kt->pfrkt_cnt;
540 return (0);
541 }
542
543 bzero(&w, sizeof(w));
544 w.pfrw_op = PFRW_GET_ADDRS;
545 w.pfrw_addr = addr;
546 w.pfrw_free = kt->pfrkt_cnt;
547 w.pfrw_flags = flags;
548 rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
549 if (!rv)
550 rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
551 if (rv)
552 return (rv);
553
554 if (w.pfrw_free) {
555 printf("pfr_get_addrs: corruption detected (%d).\n",
556 w.pfrw_free);
557 return (ENOTTY);
558 }
559 *size = kt->pfrkt_cnt;
560 return (0);
561 }
562
563 int
564 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
565 int flags)
566 {
567 struct pfr_ktable *kt;
568 struct pfr_walktree w;
569 struct pfr_kentryworkq workq;
570 int rv, s;
571 long tzero = time.tv_sec;
572
573 ACCEPT_FLAGS(PFR_FLAG_ATOMIC); /* XXX PFR_FLAG_CLSTATS disabled */
574 if (pfr_validate_table(tbl, 0, 0))
575 return (EINVAL);
576 kt = pfr_lookup_table(tbl);
577 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
578 return (ESRCH);
579 if (kt->pfrkt_cnt > *size) {
580 *size = kt->pfrkt_cnt;
581 return (0);
582 }
583
584 bzero(&w, sizeof(w));
585 w.pfrw_op = PFRW_GET_ASTATS;
586 w.pfrw_astats = addr;
587 w.pfrw_free = kt->pfrkt_cnt;
588 w.pfrw_flags = flags;
589 if (flags & PFR_FLAG_ATOMIC)
590 s = splsoftnet();
591 rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
592 if (!rv)
593 rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
594 if (!rv && (flags & PFR_FLAG_CLSTATS)) {
595 pfr_enqueue_addrs(kt, &workq, NULL, 0);
596 pfr_clstats_kentries(&workq, tzero, 0);
597 }
598 if (flags & PFR_FLAG_ATOMIC)
599 splx(s);
600 if (rv)
601 return (rv);
602
603 if (w.pfrw_free) {
604 printf("pfr_get_astats: corruption detected (%d).\n",
605 w.pfrw_free);
606 return (ENOTTY);
607 }
608 *size = kt->pfrkt_cnt;
609 return (0);
610 }
611
612 int
613 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
614 int *nzero, int flags)
615 {
616 struct pfr_ktable *kt;
617 struct pfr_kentryworkq workq;
618 struct pfr_kentry *p;
619 struct pfr_addr ad;
620 int i, rv, s, xzero = 0;
621
622 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
623 if (pfr_validate_table(tbl, 0, 0))
624 return (EINVAL);
625 kt = pfr_lookup_table(tbl);
626 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
627 return (ESRCH);
628 SLIST_INIT(&workq);
629 for (i = 0; i < size; i++) {
630 if (COPYIN(addr+i, &ad, sizeof(ad)))
631 senderr(EFAULT);
632 if (pfr_validate_addr(&ad))
633 senderr(EINVAL);
634 p = pfr_lookup_addr(kt, &ad, 1);
635 if (flags & PFR_FLAG_FEEDBACK) {
636 ad.pfra_fback = (p != NULL) ?
637 PFR_FB_CLEARED : PFR_FB_NONE;
638 if (COPYOUT(&ad, addr+i, sizeof(ad)))
639 senderr(EFAULT);
640 }
641 if (p != NULL) {
642 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
643 xzero++;
644 }
645 }
646
647 if (!(flags & PFR_FLAG_DUMMY)) {
648 if (flags & PFR_FLAG_ATOMIC)
649 s = splsoftnet();
650 pfr_clstats_kentries(&workq, 0, 0);
651 if (flags & PFR_FLAG_ATOMIC)
652 splx(s);
653 }
654 if (nzero != NULL)
655 *nzero = xzero;
656 return (0);
657 _bad:
658 if (flags & PFR_FLAG_FEEDBACK)
659 pfr_reset_feedback(addr, size, flags);
660 return (rv);
661 }
662
663 int
664 pfr_validate_addr(struct pfr_addr *ad)
665 {
666 int i;
667
668 switch (ad->pfra_af) {
669 case AF_INET:
670 if (ad->pfra_net > 32)
671 return (-1);
672 break;
673 case AF_INET6:
674 if (ad->pfra_net > 128)
675 return (-1);
676 break;
677 default:
678 return (-1);
679 }
680 if (ad->pfra_net < 128 &&
681 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
682 return (-1);
683 for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
684 if (((caddr_t)ad)[i])
685 return (-1);
686 if (ad->pfra_not && ad->pfra_not != 1)
687 return (-1);
688 if (ad->pfra_fback)
689 return (-1);
690 return (0);
691 }
692
693 void
694 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
695 int *naddr, int sweep)
696 {
697 struct pfr_walktree w;
698
699 SLIST_INIT(workq);
700 bzero(&w, sizeof(w));
701 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
702 w.pfrw_workq = workq;
703 if (kt->pfrkt_ip4 != NULL)
704 if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
705 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
706 if (kt->pfrkt_ip6 != NULL)
707 if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
708 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
709 if (naddr != NULL)
710 *naddr = w.pfrw_cnt;
711 }
712
713 void
714 pfr_mark_addrs(struct pfr_ktable *kt)
715 {
716 struct pfr_walktree w;
717
718 bzero(&w, sizeof(w));
719 w.pfrw_op = PFRW_MARK;
720 if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
721 printf("pfr_mark_addrs: IPv4 walktree failed.\n");
722 if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
723 printf("pfr_mark_addrs: IPv6 walktree failed.\n");
724 }
725
726
727 struct pfr_kentry *
728 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
729 {
730 union sockaddr_union sa, mask;
731 struct radix_node_head *head;
732 struct pfr_kentry *ke;
733 int s;
734
735 bzero(&sa, sizeof(sa));
736 if (ad->pfra_af == AF_INET) {
737 FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
738 head = kt->pfrkt_ip4;
739 } else {
740 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
741 head = kt->pfrkt_ip6;
742 }
743 if (ADDR_NETWORK(ad)) {
744 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
745 s = splsoftnet(); /* rn_lookup makes use of globals */
746 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
747 splx(s);
748 if (ke && KENTRY_RNF_ROOT(ke))
749 ke = NULL;
750 } else {
751 ke = (struct pfr_kentry *)rn_match(&sa, head);
752 if (ke && KENTRY_RNF_ROOT(ke))
753 ke = NULL;
754 if (exact && ke && KENTRY_NETWORK(ke))
755 ke = NULL;
756 }
757 return (ke);
758 }
759
760 struct pfr_kentry *
761 pfr_create_kentry(struct pfr_addr *ad)
762 {
763 struct pfr_kentry *ke;
764
765 ke = pool_get(&pfr_kentry_pl, PR_NOWAIT);
766 if (ke == NULL)
767 return (NULL);
768 bzero(ke, sizeof(*ke));
769
770 if (ad->pfra_af == AF_INET)
771 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
772 else
773 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
774 ke->pfrke_af = ad->pfra_af;
775 ke->pfrke_net = ad->pfra_net;
776 ke->pfrke_not = ad->pfra_not;
777 return (ke);
778 }
779
780 void
781 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
782 {
783 struct pfr_kentry *p, *q;
784
785 for (p = SLIST_FIRST(workq); p != NULL; p = q) {
786 q = SLIST_NEXT(p, pfrke_workq);
787 pfr_destroy_kentry(p);
788 }
789 }
790
791 void
792 pfr_destroy_kentry(struct pfr_kentry *ke)
793 {
794 pool_put(&pfr_kentry_pl, ke);
795 }
796
797 void
798 pfr_insert_kentries(struct pfr_ktable *kt,
799 struct pfr_kentryworkq *workq, long tzero)
800 {
801 struct pfr_kentry *p;
802 int rv, n = 0;
803
804 SLIST_FOREACH(p, workq, pfrke_workq) {
805 rv = pfr_route_kentry(kt, p);
806 if (rv) {
807 printf("pfr_insert_kentries: cannot route entry "
808 "(code=%d).\n", rv);
809 break;
810 }
811 p->pfrke_tzero = tzero;
812 n++;
813 }
814 kt->pfrkt_cnt += n;
815 }
816
817 void
818 pfr_remove_kentries(struct pfr_ktable *kt,
819 struct pfr_kentryworkq *workq)
820 {
821 struct pfr_kentry *p;
822 int n = 0;
823
824 SLIST_FOREACH(p, workq, pfrke_workq) {
825 pfr_unroute_kentry(kt, p);
826 n++;
827 }
828 kt->pfrkt_cnt -= n;
829 pfr_destroy_kentries(workq);
830 }
831
832 void
833 pfr_clean_node_mask(struct pfr_ktable *kt,
834 struct pfr_kentryworkq *workq)
835 {
836 struct pfr_kentry *p;
837
838 SLIST_FOREACH(p, workq, pfrke_workq)
839 pfr_unroute_kentry(kt, p);
840 }
841
842 void
843 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
844 {
845 struct pfr_kentry *p;
846 int s;
847
848 SLIST_FOREACH(p, workq, pfrke_workq) {
849 s = splsoftnet();
850 if (negchange)
851 p->pfrke_not = !p->pfrke_not;
852 bzero(p->pfrke_packets, sizeof(p->pfrke_packets));
853 bzero(p->pfrke_bytes, sizeof(p->pfrke_bytes));
854 splx(s);
855 p->pfrke_tzero = tzero;
856 }
857 }
858
859 void
860 pfr_reset_feedback(struct pfr_addr *addr, int size, int flags)
861 {
862 struct pfr_addr ad;
863 int i;
864
865 for (i = 0; i < size; i++) {
866 if (COPYIN(addr+i, &ad, sizeof(ad)))
867 break;
868 ad.pfra_fback = PFR_FB_NONE;
869 if (COPYOUT(&ad, addr+i, sizeof(ad)))
870 break;
871 }
872 }
873
874 void
875 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
876 {
877 int i;
878
879 bzero(sa, sizeof(*sa));
880 if (af == AF_INET) {
881 sa->sin.sin_len = sizeof(sa->sin);
882 sa->sin.sin_family = AF_INET;
883 sa->sin.sin_addr.s_addr = htonl(-1 << (32-net));
884 } else {
885 sa->sin6.sin6_len = sizeof(sa->sin6);
886 sa->sin6.sin6_family = AF_INET6;
887 for (i = 0; i < 4; i++) {
888 if (net <= 32) {
889 sa->sin6.sin6_addr.s6_addr32[i] =
890 htonl(-1 << (32-net));
891 break;
892 }
893 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
894 net -= 32;
895 }
896 }
897 }
898
899 int
900 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
901 {
902 union sockaddr_union mask;
903 struct radix_node *rn;
904 struct radix_node_head *head;
905 int s;
906
907 bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
908 if (ke->pfrke_af == AF_INET)
909 head = kt->pfrkt_ip4;
910 else
911 head = kt->pfrkt_ip6;
912
913 s = splsoftnet();
914 if (KENTRY_NETWORK(ke)) {
915 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
916 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
917 } else
918 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
919 splx(s);
920
921 return (rn == NULL ? -1 : 0);
922 }
923
924 int
925 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
926 {
927 union sockaddr_union mask;
928 struct radix_node *rn;
929 struct radix_node_head *head;
930 int s;
931
932 if (ke->pfrke_af == AF_INET)
933 head = kt->pfrkt_ip4;
934 else
935 head = kt->pfrkt_ip6;
936
937 s = splsoftnet();
938 if (KENTRY_NETWORK(ke)) {
939 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
940 rn = rn_delete(&ke->pfrke_sa, &mask, head);
941 } else
942 rn = rn_delete(&ke->pfrke_sa, NULL, head);
943 splx(s);
944
945 if (rn == NULL) {
946 printf("pfr_unroute_kentry: delete failed.\n");
947 return (-1);
948 }
949 return (0);
950 }
951
952 void
953 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
954 {
955 bzero(ad, sizeof(*ad));
956 if (ke == NULL)
957 return;
958 ad->pfra_af = ke->pfrke_af;
959 ad->pfra_net = ke->pfrke_net;
960 ad->pfra_not = ke->pfrke_not;
961 if (ad->pfra_af == AF_INET)
962 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
963 else
964 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
965 }
966
967 int
968 pfr_walktree(struct radix_node *rn, void *arg)
969 {
970 struct pfr_kentry *ke = (struct pfr_kentry *)rn;
971 struct pfr_walktree *w = arg;
972 int s, flags = w->pfrw_flags;
973
974 switch (w->pfrw_op) {
975 case PFRW_MARK:
976 ke->pfrke_mark = 0;
977 break;
978 case PFRW_SWEEP:
979 if (ke->pfrke_mark)
980 break;
981 /* FALLTHROUGH */
982 case PFRW_ENQUEUE:
983 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
984 w->pfrw_cnt++;
985 break;
986 case PFRW_GET_ADDRS:
987 if (w->pfrw_free-- > 0) {
988 struct pfr_addr ad;
989
990 pfr_copyout_addr(&ad, ke);
991 if (copyout(&ad, w->pfrw_addr, sizeof(ad)))
992 return (EFAULT);
993 w->pfrw_addr++;
994 }
995 break;
996 case PFRW_GET_ASTATS:
997 if (w->pfrw_free-- > 0) {
998 struct pfr_astats as;
999
1000 pfr_copyout_addr(&as.pfras_a, ke);
1001
1002 s = splsoftnet();
1003 bcopy(ke->pfrke_packets, as.pfras_packets,
1004 sizeof(as.pfras_packets));
1005 bcopy(ke->pfrke_bytes, as.pfras_bytes,
1006 sizeof(as.pfras_bytes));
1007 splx(s);
1008 as.pfras_tzero = ke->pfrke_tzero;
1009
1010 if (COPYOUT(&as, w->pfrw_astats, sizeof(as)))
1011 return (EFAULT);
1012 w->pfrw_astats++;
1013 }
1014 break;
1015 case PFRW_POOL_GET:
1016 if (ke->pfrke_not)
1017 break; /* negative entries are ignored */
1018 if (!w->pfrw_cnt--) {
1019 w->pfrw_kentry = ke;
1020 return (1); /* finish search */
1021 }
1022 break;
1023 case PFRW_DYNADDR_UPDATE:
1024 if (ke->pfrke_af == AF_INET) {
1025 if (w->pfrw_dyn->pfid_acnt4++ > 0)
1026 break;
1027 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1028 w->pfrw_dyn->pfid_addr4 = *SUNION2PF(
1029 &ke->pfrke_sa, AF_INET);
1030 w->pfrw_dyn->pfid_mask4 = *SUNION2PF(
1031 &pfr_mask, AF_INET);
1032 } else {
1033 if (w->pfrw_dyn->pfid_acnt6++ > 0)
1034 break;
1035 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1036 w->pfrw_dyn->pfid_addr6 = *SUNION2PF(
1037 &ke->pfrke_sa, AF_INET6);
1038 w->pfrw_dyn->pfid_mask6 = *SUNION2PF(
1039 &pfr_mask, AF_INET6);
1040 }
1041 break;
1042 }
1043 return (0);
1044 }
1045
1046 int
1047 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1048 {
1049 struct pfr_ktableworkq workq;
1050 struct pfr_ktable *p;
1051 int s, xdel = 0;
1052
1053 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ALLRSETS);
1054 if (pfr_table_count(filter, flags) < 0)
1055 return (ENOENT);
1056
1057 SLIST_INIT(&workq);
1058 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1059 if (pfr_skip_table(filter, p, flags))
1060 continue;
1061 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1062 continue;
1063 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1064 continue;
1065 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1066 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1067 xdel++;
1068 }
1069 if (!(flags & PFR_FLAG_DUMMY)) {
1070 if (flags & PFR_FLAG_ATOMIC)
1071 s = splsoftnet();
1072 pfr_setflags_ktables(&workq);
1073 if (flags & PFR_FLAG_ATOMIC)
1074 splx(s);
1075 }
1076 if (ndel != NULL)
1077 *ndel = xdel;
1078 return (0);
1079 }
1080
1081 int
1082 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1083 {
1084 struct pfr_ktableworkq addq, changeq;
1085 struct pfr_ktable *p, *q, *r, key;
1086 int i, rv, s, xadd = 0;
1087 long tzero = time.tv_sec;
1088
1089 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1090 SLIST_INIT(&addq);
1091 SLIST_INIT(&changeq);
1092 for (i = 0; i < size; i++) {
1093 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1094 senderr(EFAULT);
1095 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1096 flags & PFR_FLAG_USERIOCTL))
1097 senderr(EINVAL);
1098 key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1099 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1100 if (p == NULL) {
1101 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1102 if (p == NULL)
1103 senderr(ENOMEM);
1104 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1105 if (!pfr_ktable_compare(p, q))
1106 goto _skip;
1107 }
1108 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1109 xadd++;
1110 if (!key.pfrkt_anchor[0])
1111 goto _skip;
1112
1113 /* find or create root table */
1114 bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1115 bzero(key.pfrkt_ruleset, sizeof(key.pfrkt_ruleset));
1116 r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1117 if (r != NULL) {
1118 p->pfrkt_root = r;
1119 goto _skip;
1120 }
1121 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1122 if (!pfr_ktable_compare(&key, q)) {
1123 p->pfrkt_root = q;
1124 goto _skip;
1125 }
1126 }
1127 key.pfrkt_flags = 0;
1128 r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1129 if (r == NULL)
1130 senderr(ENOMEM);
1131 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1132 p->pfrkt_root = r;
1133 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1134 SLIST_FOREACH(q, &changeq, pfrkt_workq)
1135 if (!pfr_ktable_compare(&key, q))
1136 goto _skip;
1137 p->pfrkt_nflags = (p->pfrkt_flags &
1138 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1139 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1140 xadd++;
1141 }
1142 _skip:
1143 ;
1144 }
1145 if (!(flags & PFR_FLAG_DUMMY)) {
1146 if (flags & PFR_FLAG_ATOMIC)
1147 s = splsoftnet();
1148 pfr_insert_ktables(&addq);
1149 pfr_setflags_ktables(&changeq);
1150 if (flags & PFR_FLAG_ATOMIC)
1151 splx(s);
1152 } else
1153 pfr_destroy_ktables(&addq, 0);
1154 if (nadd != NULL)
1155 *nadd = xadd;
1156 return (0);
1157 _bad:
1158 pfr_destroy_ktables(&addq, 0);
1159 return (rv);
1160 }
1161
1162 int
1163 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1164 {
1165 struct pfr_ktableworkq workq;
1166 struct pfr_ktable *p, *q, key;
1167 int i, s, xdel = 0;
1168
1169 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1170 SLIST_INIT(&workq);
1171 for (i = 0; i < size; i++) {
1172 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1173 return (EFAULT);
1174 if (pfr_validate_table(&key.pfrkt_t, 0,
1175 flags & PFR_FLAG_USERIOCTL))
1176 return (EINVAL);
1177 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1178 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1179 SLIST_FOREACH(q, &workq, pfrkt_workq)
1180 if (!pfr_ktable_compare(p, q))
1181 goto _skip;
1182 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1183 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1184 xdel++;
1185 }
1186 _skip:
1187 ;
1188 }
1189
1190 if (!(flags & PFR_FLAG_DUMMY)) {
1191 if (flags & PFR_FLAG_ATOMIC)
1192 s = splsoftnet();
1193 pfr_setflags_ktables(&workq);
1194 if (flags & PFR_FLAG_ATOMIC)
1195 splx(s);
1196 }
1197 if (ndel != NULL)
1198 *ndel = xdel;
1199 return (0);
1200 }
1201
1202 int
1203 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1204 int flags)
1205 {
1206 struct pfr_ktable *p;
1207 int n, nn;
1208
1209 ACCEPT_FLAGS(PFR_FLAG_ALLRSETS);
1210 n = nn = pfr_table_count(filter, flags);
1211 if (n < 0)
1212 return (ENOENT);
1213 if (n > *size) {
1214 *size = n;
1215 return (0);
1216 }
1217 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1218 if (pfr_skip_table(filter, p, flags))
1219 continue;
1220 if (n-- <= 0)
1221 continue;
1222 if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl)))
1223 return (EFAULT);
1224 }
1225 if (n) {
1226 printf("pfr_get_tables: corruption detected (%d).\n", n);
1227 return (ENOTTY);
1228 }
1229 *size = nn;
1230 return (0);
1231 }
1232
1233 int
1234 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1235 int flags)
1236 {
1237 struct pfr_ktable *p;
1238 struct pfr_ktableworkq workq;
1239 int s, n, nn;
1240 long tzero = time.tv_sec;
1241
1242 ACCEPT_FLAGS(PFR_FLAG_ATOMIC|PFR_FLAG_ALLRSETS);
1243 /* XXX PFR_FLAG_CLSTATS disabled */
1244 n = nn = pfr_table_count(filter, flags);
1245 if (n < 0)
1246 return (ENOENT);
1247 if (n > *size) {
1248 *size = n;
1249 return (0);
1250 }
1251 SLIST_INIT(&workq);
1252 if (flags & PFR_FLAG_ATOMIC)
1253 s = splsoftnet();
1254 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1255 if (pfr_skip_table(filter, p, flags))
1256 continue;
1257 if (n-- <= 0)
1258 continue;
1259 if (!(flags & PFR_FLAG_ATOMIC))
1260 s = splsoftnet();
1261 if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl))) {
1262 splx(s);
1263 return (EFAULT);
1264 }
1265 if (!(flags & PFR_FLAG_ATOMIC))
1266 splx(s);
1267 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1268 }
1269 if (flags & PFR_FLAG_CLSTATS)
1270 pfr_clstats_ktables(&workq, tzero,
1271 flags & PFR_FLAG_ADDRSTOO);
1272 if (flags & PFR_FLAG_ATOMIC)
1273 splx(s);
1274 if (n) {
1275 printf("pfr_get_tstats: corruption detected (%d).\n", n);
1276 return (ENOTTY);
1277 }
1278 *size = nn;
1279 return (0);
1280 }
1281
1282 int
1283 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1284 {
1285 struct pfr_ktableworkq workq;
1286 struct pfr_ktable *p, key;
1287 int i, s, xzero = 0;
1288 long tzero = time.tv_sec;
1289
1290 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ADDRSTOO);
1291 SLIST_INIT(&workq);
1292 for (i = 0; i < size; i++) {
1293 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1294 return (EFAULT);
1295 if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1296 return (EINVAL);
1297 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1298 if (p != NULL) {
1299 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1300 xzero++;
1301 }
1302 }
1303 if (!(flags & PFR_FLAG_DUMMY)) {
1304 if (flags & PFR_FLAG_ATOMIC)
1305 s = splsoftnet();
1306 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1307 if (flags & PFR_FLAG_ATOMIC)
1308 splx(s);
1309 }
1310 if (nzero != NULL)
1311 *nzero = xzero;
1312 return (0);
1313 }
1314
1315 int
1316 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1317 int *nchange, int *ndel, int flags)
1318 {
1319 struct pfr_ktableworkq workq;
1320 struct pfr_ktable *p, *q, key;
1321 int i, s, xchange = 0, xdel = 0;
1322
1323 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1324 if ((setflag & ~PFR_TFLAG_USRMASK) ||
1325 (clrflag & ~PFR_TFLAG_USRMASK) ||
1326 (setflag & clrflag))
1327 return (EINVAL);
1328 SLIST_INIT(&workq);
1329 for (i = 0; i < size; i++) {
1330 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1331 return (EFAULT);
1332 if (pfr_validate_table(&key.pfrkt_t, 0,
1333 flags & PFR_FLAG_USERIOCTL))
1334 return (EINVAL);
1335 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1336 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1337 p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1338 ~clrflag;
1339 if (p->pfrkt_nflags == p->pfrkt_flags)
1340 goto _skip;
1341 SLIST_FOREACH(q, &workq, pfrkt_workq)
1342 if (!pfr_ktable_compare(p, q))
1343 goto _skip;
1344 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1345 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1346 (clrflag & PFR_TFLAG_PERSIST) &&
1347 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1348 xdel++;
1349 else
1350 xchange++;
1351 }
1352 _skip:
1353 ;
1354 }
1355 if (!(flags & PFR_FLAG_DUMMY)) {
1356 if (flags & PFR_FLAG_ATOMIC)
1357 s = splsoftnet();
1358 pfr_setflags_ktables(&workq);
1359 if (flags & PFR_FLAG_ATOMIC)
1360 splx(s);
1361 }
1362 if (nchange != NULL)
1363 *nchange = xchange;
1364 if (ndel != NULL)
1365 *ndel = xdel;
1366 return (0);
1367 }
1368
1369 int
1370 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1371 {
1372 struct pfr_ktableworkq workq;
1373 struct pfr_ktable *p;
1374 struct pf_ruleset *rs;
1375 int xdel = 0;
1376
1377 ACCEPT_FLAGS(PFR_FLAG_DUMMY);
1378 rs = pf_find_or_create_ruleset(trs->pfrt_anchor, trs->pfrt_ruleset);
1379 if (rs == NULL)
1380 return (ENOMEM);
1381 SLIST_INIT(&workq);
1382 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1383 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1384 pfr_skip_table(trs, p, 0))
1385 continue;
1386 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1387 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1388 xdel++;
1389 }
1390 if (!(flags & PFR_FLAG_DUMMY)) {
1391 pfr_setflags_ktables(&workq);
1392 if (ticket != NULL)
1393 *ticket = ++rs->tticket;
1394 rs->topen = 1;
1395 } else
1396 pf_remove_if_empty_ruleset(rs);
1397 if (ndel != NULL)
1398 *ndel = xdel;
1399 return (0);
1400 }
1401
1402 int
1403 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1404 int *nadd, int *naddr, u_int32_t ticket, int flags)
1405 {
1406 struct pfr_ktableworkq tableq;
1407 struct pfr_kentryworkq addrq;
1408 struct pfr_ktable *kt, *rt, *shadow, key;
1409 struct pfr_kentry *p;
1410 struct pfr_addr ad;
1411 struct pf_ruleset *rs;
1412 int i, rv, xadd = 0, xaddr = 0;
1413
1414 ACCEPT_FLAGS(PFR_FLAG_DUMMY|PFR_FLAG_ADDRSTOO);
1415 if (size && !(flags & PFR_FLAG_ADDRSTOO))
1416 return (EINVAL);
1417 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1418 flags & PFR_FLAG_USERIOCTL))
1419 return (EINVAL);
1420 rs = pf_find_ruleset(tbl->pfrt_anchor, tbl->pfrt_ruleset);
1421 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1422 return (EBUSY);
1423 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1424 SLIST_INIT(&tableq);
1425 kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1426 if (kt == NULL) {
1427 kt = pfr_create_ktable(tbl, 0, 1);
1428 if (kt == NULL)
1429 return (ENOMEM);
1430 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1431 xadd++;
1432 if (!tbl->pfrt_anchor[0])
1433 goto _skip;
1434
1435 /* find or create root table */
1436 bzero(&key, sizeof(key));
1437 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1438 rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1439 if (rt != NULL) {
1440 kt->pfrkt_root = rt;
1441 goto _skip;
1442 }
1443 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1444 if (rt == NULL) {
1445 pfr_destroy_ktables(&tableq, 0);
1446 return (ENOMEM);
1447 }
1448 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1449 kt->pfrkt_root = rt;
1450 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1451 xadd++;
1452 _skip:
1453 shadow = pfr_create_ktable(tbl, 0, 0);
1454 if (shadow == NULL) {
1455 pfr_destroy_ktables(&tableq, 0);
1456 return (ENOMEM);
1457 }
1458 SLIST_INIT(&addrq);
1459 for (i = 0; i < size; i++) {
1460 if (COPYIN(addr+i, &ad, sizeof(ad)))
1461 senderr(EFAULT);
1462 if (pfr_validate_addr(&ad))
1463 senderr(EINVAL);
1464 if (pfr_lookup_addr(shadow, &ad, 1) != NULL)
1465 continue;
1466 p = pfr_create_kentry(&ad);
1467 if (p == NULL)
1468 senderr(ENOMEM);
1469 if (pfr_route_kentry(shadow, p)) {
1470 pfr_destroy_kentry(p);
1471 continue;
1472 }
1473 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1474 xaddr++;
1475 }
1476 if (!(flags & PFR_FLAG_DUMMY)) {
1477 if (kt->pfrkt_shadow != NULL)
1478 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1479 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1480 pfr_insert_ktables(&tableq);
1481 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1482 xaddr : NO_ADDRESSES;
1483 kt->pfrkt_shadow = shadow;
1484 } else {
1485 pfr_clean_node_mask(shadow, &addrq);
1486 pfr_destroy_ktable(shadow, 0);
1487 pfr_destroy_ktables(&tableq, 0);
1488 pfr_destroy_kentries(&addrq);
1489 }
1490 if (nadd != NULL)
1491 *nadd = xadd;
1492 if (naddr != NULL)
1493 *naddr = xaddr;
1494 return (0);
1495 _bad:
1496 pfr_destroy_ktable(shadow, 0);
1497 pfr_destroy_ktables(&tableq, 0);
1498 pfr_destroy_kentries(&addrq);
1499 return (rv);
1500 }
1501
1502 int
1503 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1504 {
1505 struct pfr_ktableworkq workq;
1506 struct pfr_ktable *p;
1507 struct pf_ruleset *rs;
1508 int xdel = 0;
1509
1510 ACCEPT_FLAGS(PFR_FLAG_DUMMY);
1511 rs = pf_find_ruleset(trs->pfrt_anchor, trs->pfrt_ruleset);
1512 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1513 return (0);
1514 SLIST_INIT(&workq);
1515 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1516 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1517 pfr_skip_table(trs, p, 0))
1518 continue;
1519 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1520 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1521 xdel++;
1522 }
1523 if (!(flags & PFR_FLAG_DUMMY)) {
1524 pfr_setflags_ktables(&workq);
1525 rs->topen = 0;
1526 pf_remove_if_empty_ruleset(rs);
1527 }
1528 if (ndel != NULL)
1529 *ndel = xdel;
1530 return (0);
1531 }
1532
1533 int
1534 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1535 int *nchange, int flags)
1536 {
1537 struct pfr_ktable *p;
1538 struct pfr_ktableworkq workq;
1539 struct pf_ruleset *rs;
1540 int s, xadd = 0, xchange = 0;
1541 long tzero = time.tv_sec;
1542
1543 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1544 rs = pf_find_ruleset(trs->pfrt_anchor, trs->pfrt_ruleset);
1545 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1546 return (EBUSY);
1547
1548 SLIST_INIT(&workq);
1549 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1550 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1551 pfr_skip_table(trs, p, 0))
1552 continue;
1553 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1554 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1555 xchange++;
1556 else
1557 xadd++;
1558 }
1559
1560 if (!(flags & PFR_FLAG_DUMMY)) {
1561 if (flags & PFR_FLAG_ATOMIC)
1562 s = splsoftnet();
1563 SLIST_FOREACH(p, &workq, pfrkt_workq)
1564 pfr_commit_ktable(p, tzero);
1565 if (flags & PFR_FLAG_ATOMIC)
1566 splx(s);
1567 rs->topen = 0;
1568 pf_remove_if_empty_ruleset(rs);
1569 }
1570 if (nadd != NULL)
1571 *nadd = xadd;
1572 if (nchange != NULL)
1573 *nchange = xchange;
1574
1575 return (0);
1576 }
1577
1578 void
1579 pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1580 {
1581 struct pfr_ktable *shadow = kt->pfrkt_shadow;
1582 int nflags;
1583
1584 if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1585 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1586 pfr_clstats_ktable(kt, tzero, 1);
1587 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1588 /* kt might contain addresses */
1589 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq;
1590 struct pfr_kentry *p, *q, *next;
1591 struct pfr_addr ad;
1592
1593 pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1594 pfr_mark_addrs(kt);
1595 SLIST_INIT(&addq);
1596 SLIST_INIT(&changeq);
1597 SLIST_INIT(&delq);
1598 SLIST_INIT(&garbageq);
1599 pfr_clean_node_mask(shadow, &addrq);
1600 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1601 next = SLIST_NEXT(p, pfrke_workq); /* XXX */
1602 pfr_copyout_addr(&ad, p);
1603 q = pfr_lookup_addr(kt, &ad, 1);
1604 if (q != NULL) {
1605 if (q->pfrke_not != p->pfrke_not)
1606 SLIST_INSERT_HEAD(&changeq, q,
1607 pfrke_workq);
1608 q->pfrke_mark = 1;
1609 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1610 } else {
1611 p->pfrke_tzero = tzero;
1612 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1613 }
1614 }
1615 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1616 pfr_insert_kentries(kt, &addq, tzero);
1617 pfr_remove_kentries(kt, &delq);
1618 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1619 pfr_destroy_kentries(&garbageq);
1620 } else {
1621 /* kt cannot contain addresses */
1622 SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1623 shadow->pfrkt_ip4);
1624 SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1625 shadow->pfrkt_ip6);
1626 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1627 pfr_clstats_ktable(kt, tzero, 1);
1628 }
1629 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1630 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1631 & ~PFR_TFLAG_INACTIVE;
1632 pfr_destroy_ktable(shadow, 0);
1633 kt->pfrkt_shadow = NULL;
1634 pfr_setflags_ktable(kt, nflags);
1635 }
1636
1637 int
1638 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1639 {
1640 int i;
1641
1642 if (!tbl->pfrt_name[0])
1643 return (-1);
1644 if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1645 return (-1);
1646 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1647 return (-1);
1648 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1649 if (tbl->pfrt_name[i])
1650 return (-1);
1651 if (tbl->pfrt_flags & ~allowedflags)
1652 return (-1);
1653 return (0);
1654 }
1655
1656 int
1657 pfr_table_count(struct pfr_table *filter, int flags)
1658 {
1659 struct pf_ruleset *rs;
1660 struct pf_anchor *ac;
1661
1662 if (flags & PFR_FLAG_ALLRSETS)
1663 return (pfr_ktable_cnt);
1664 if (filter->pfrt_ruleset[0]) {
1665 rs = pf_find_ruleset(filter->pfrt_anchor,
1666 filter->pfrt_ruleset);
1667 return ((rs != NULL) ? rs->tables : -1);
1668 }
1669 if (filter->pfrt_anchor[0]) {
1670 ac = pf_find_anchor(filter->pfrt_anchor);
1671 return ((ac != NULL) ? ac->tables : -1);
1672 }
1673 return (pf_main_ruleset.tables);
1674 }
1675
1676 int
1677 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1678 {
1679 if (flags & PFR_FLAG_ALLRSETS)
1680 return (0);
1681 if (strncmp(filter->pfrt_anchor, kt->pfrkt_anchor,
1682 PF_ANCHOR_NAME_SIZE))
1683 return (1);
1684 if (!filter->pfrt_ruleset[0])
1685 return (0);
1686 if (strncmp(filter->pfrt_ruleset, kt->pfrkt_ruleset,
1687 PF_RULESET_NAME_SIZE))
1688 return (1);
1689 return (0);
1690 }
1691
1692 void
1693 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1694 {
1695 struct pfr_ktable *p;
1696
1697 SLIST_FOREACH(p, workq, pfrkt_workq)
1698 pfr_insert_ktable(p);
1699 }
1700
1701 void
1702 pfr_insert_ktable(struct pfr_ktable *kt)
1703 {
1704 RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1705 pfr_ktable_cnt++;
1706 if (kt->pfrkt_root != NULL)
1707 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1708 pfr_setflags_ktable(kt->pfrkt_root,
1709 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1710 }
1711
1712 void
1713 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1714 {
1715 struct pfr_ktable *p;
1716
1717 SLIST_FOREACH(p, workq, pfrkt_workq)
1718 pfr_setflags_ktable(p, p->pfrkt_nflags);
1719 }
1720
1721 void
1722 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1723 {
1724 struct pfr_kentryworkq addrq;
1725
1726 if (!(newf & PFR_TFLAG_REFERENCED) &&
1727 !(newf & PFR_TFLAG_PERSIST))
1728 newf &= ~PFR_TFLAG_ACTIVE;
1729 if (!(newf & PFR_TFLAG_ACTIVE))
1730 newf &= ~PFR_TFLAG_USRMASK;
1731 if (!(newf & PFR_TFLAG_SETMASK)) {
1732 RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
1733 if (kt->pfrkt_root != NULL)
1734 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1735 pfr_setflags_ktable(kt->pfrkt_root,
1736 kt->pfrkt_root->pfrkt_flags &
1737 ~PFR_TFLAG_REFDANCHOR);
1738 pfr_destroy_ktable(kt, 1);
1739 pfr_ktable_cnt--;
1740 return;
1741 }
1742 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1743 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1744 pfr_remove_kentries(kt, &addrq);
1745 }
1746 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1747 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1748 kt->pfrkt_shadow = NULL;
1749 }
1750 kt->pfrkt_flags = newf;
1751 }
1752
1753 void
1754 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1755 {
1756 struct pfr_ktable *p;
1757
1758 SLIST_FOREACH(p, workq, pfrkt_workq)
1759 pfr_clstats_ktable(p, tzero, recurse);
1760 }
1761
1762 void
1763 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1764 {
1765 struct pfr_kentryworkq addrq;
1766 int s;
1767
1768 if (recurse) {
1769 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1770 pfr_clstats_kentries(&addrq, tzero, 0);
1771 }
1772 s = splsoftnet();
1773 bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
1774 bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
1775 kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1776 splx(s);
1777 kt->pfrkt_tzero = tzero;
1778 }
1779
1780 struct pfr_ktable *
1781 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1782 {
1783 struct pfr_ktable *kt;
1784 struct pf_ruleset *rs;
1785
1786 kt = pool_get(&pfr_ktable_pl, PR_NOWAIT);
1787 if (kt == NULL)
1788 return (NULL);
1789 bzero(kt, sizeof(*kt));
1790 kt->pfrkt_t = *tbl;
1791
1792 if (attachruleset) {
1793 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor,
1794 tbl->pfrt_ruleset);
1795 if (!rs) {
1796 pfr_destroy_ktable(kt, 0);
1797 return (NULL);
1798 }
1799 kt->pfrkt_rs = rs;
1800 rs->tables++;
1801 if (rs->anchor != NULL)
1802 rs->anchor->tables++;
1803 }
1804
1805 if (!rn_inithead((void **)&kt->pfrkt_ip4,
1806 offsetof(struct sockaddr_in, sin_addr) * 8) ||
1807 !rn_inithead((void **)&kt->pfrkt_ip6,
1808 offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1809 pfr_destroy_ktable(kt, 0);
1810 return (NULL);
1811 }
1812 kt->pfrkt_tzero = tzero;
1813
1814 return (kt);
1815 }
1816
1817 void
1818 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1819 {
1820 struct pfr_ktable *p, *q;
1821
1822 for (p = SLIST_FIRST(workq); p; p = q) {
1823 q = SLIST_NEXT(p, pfrkt_workq);
1824 pfr_destroy_ktable(p, flushaddr);
1825 }
1826 }
1827
1828 void
1829 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1830 {
1831 struct pfr_kentryworkq addrq;
1832
1833 if (flushaddr) {
1834 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1835 pfr_clean_node_mask(kt, &addrq);
1836 pfr_destroy_kentries(&addrq);
1837 }
1838 if (kt->pfrkt_ip4 != NULL)
1839 free((caddr_t)kt->pfrkt_ip4, M_RTABLE);
1840 if (kt->pfrkt_ip6 != NULL)
1841 free((caddr_t)kt->pfrkt_ip6, M_RTABLE);
1842 if (kt->pfrkt_shadow != NULL)
1843 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1844 if (kt->pfrkt_rs != NULL) {
1845 kt->pfrkt_rs->tables--;
1846 if (kt->pfrkt_rs->anchor != NULL)
1847 kt->pfrkt_rs->anchor->tables--;
1848 pf_remove_if_empty_ruleset(kt->pfrkt_rs);
1849 }
1850 pool_put(&pfr_ktable_pl, kt);
1851 }
1852
1853 int
1854 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
1855 {
1856 int d;
1857
1858 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
1859 return (d);
1860 if ((d = strncmp(p->pfrkt_anchor, q->pfrkt_anchor,
1861 PF_ANCHOR_NAME_SIZE)))
1862 return (d);
1863 return (strncmp(p->pfrkt_ruleset, q->pfrkt_ruleset,
1864 PF_RULESET_NAME_SIZE));
1865 }
1866
1867 struct pfr_ktable *
1868 pfr_lookup_table(struct pfr_table *tbl)
1869 {
1870 /* struct pfr_ktable start like a struct pfr_table */
1871 return (RB_FIND(pfr_ktablehead, &pfr_ktables,
1872 (struct pfr_ktable *)tbl));
1873 }
1874
1875 int
1876 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
1877 {
1878 struct pfr_kentry *ke = NULL;
1879 int match;
1880
1881 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1882 kt = kt->pfrkt_root;
1883 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1884 return (0);
1885
1886 switch (af) {
1887 case AF_INET:
1888 pfr_sin.sin_addr.s_addr = a->addr32[0];
1889 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
1890 if (ke && KENTRY_RNF_ROOT(ke))
1891 ke = NULL;
1892 break;
1893 case AF_INET6:
1894 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
1895 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
1896 if (ke && KENTRY_RNF_ROOT(ke))
1897 ke = NULL;
1898 break;
1899 }
1900 match = (ke && !ke->pfrke_not);
1901 if (match)
1902 kt->pfrkt_match++;
1903 else
1904 kt->pfrkt_nomatch++;
1905 return (match);
1906 }
1907
1908 void
1909 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
1910 u_int64_t len, int dir_out, int op_pass, int notrule)
1911 {
1912 struct pfr_kentry *ke = NULL;
1913
1914 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1915 kt = kt->pfrkt_root;
1916 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1917 return;
1918
1919 switch (af) {
1920 case AF_INET:
1921 pfr_sin.sin_addr.s_addr = a->addr32[0];
1922 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
1923 if (ke && KENTRY_RNF_ROOT(ke))
1924 ke = NULL;
1925 break;
1926 case AF_INET6:
1927 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
1928 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
1929 if (ke && KENTRY_RNF_ROOT(ke))
1930 ke = NULL;
1931 break;
1932 }
1933 if ((ke == NULL || ke->pfrke_not) != notrule) {
1934 if (op_pass != PFR_OP_PASS)
1935 printf("pfr_update_stats: assertion failed.\n");
1936 op_pass = PFR_OP_XPASS;
1937 }
1938 kt->pfrkt_packets[dir_out][op_pass]++;
1939 kt->pfrkt_bytes[dir_out][op_pass] += len;
1940 if (ke != NULL && op_pass != PFR_OP_XPASS) {
1941 ke->pfrke_packets[dir_out][op_pass]++;
1942 ke->pfrke_bytes[dir_out][op_pass] += len;
1943 }
1944 }
1945
1946 struct pfr_ktable *
1947 pfr_attach_table(struct pf_ruleset *rs, char *name)
1948 {
1949 struct pfr_ktable *kt, *rt;
1950 struct pfr_table tbl;
1951 struct pf_anchor *ac = rs->anchor;
1952
1953 bzero(&tbl, sizeof(tbl));
1954 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
1955 if (ac != NULL) {
1956 strlcpy(tbl.pfrt_anchor, ac->name, sizeof(tbl.pfrt_anchor));
1957 strlcpy(tbl.pfrt_ruleset, rs->name, sizeof(tbl.pfrt_ruleset));
1958 }
1959 kt = pfr_lookup_table(&tbl);
1960 if (kt == NULL) {
1961 kt = pfr_create_ktable(&tbl, time.tv_sec, 1);
1962 if (kt == NULL)
1963 return (NULL);
1964 if (ac != NULL) {
1965 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
1966 bzero(tbl.pfrt_ruleset, sizeof(tbl.pfrt_ruleset));
1967 rt = pfr_lookup_table(&tbl);
1968 if (rt == NULL) {
1969 rt = pfr_create_ktable(&tbl, 0, 1);
1970 if (rt == NULL) {
1971 pfr_destroy_ktable(kt, 0);
1972 return (NULL);
1973 }
1974 pfr_insert_ktable(rt);
1975 }
1976 kt->pfrkt_root = rt;
1977 }
1978 pfr_insert_ktable(kt);
1979 }
1980 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
1981 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
1982 return (kt);
1983 }
1984
1985 void
1986 pfr_detach_table(struct pfr_ktable *kt)
1987 {
1988 if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
1989 printf("pfr_detach_table: refcount = %d.\n",
1990 kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
1991 else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
1992 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
1993 }
1994
1995 int
1996 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
1997 struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af)
1998 {
1999 struct pfr_kentry *ke, *ke2;
2000 struct pf_addr *addr;
2001 union sockaddr_union mask;
2002 int idx = -1, use_counter = 0;
2003
2004 addr = (af == AF_INET) ? (struct pf_addr *)&pfr_sin.sin_addr :
2005 (struct pf_addr *)&pfr_sin6.sin6_addr;
2006 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2007 kt = kt->pfrkt_root;
2008 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2009 return (-1);
2010
2011 if (pidx != NULL)
2012 idx = *pidx;
2013 if (counter != NULL && idx >= 0)
2014 use_counter = 1;
2015 if (idx < 0)
2016 idx = 0;
2017
2018 _next_block:
2019 ke = pfr_kentry_byidx(kt, idx, af);
2020 if (ke == NULL)
2021 return (1);
2022 pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
2023 *raddr = SUNION2PF(&ke->pfrke_sa, af);
2024 *rmask = SUNION2PF(&pfr_mask, af);
2025
2026 if (use_counter) {
2027 /* is supplied address within block? */
2028 if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
2029 /* no, go to next block in table */
2030 idx++;
2031 use_counter = 0;
2032 goto _next_block;
2033 }
2034 PF_ACPY(addr, counter, af);
2035 } else {
2036 /* use first address of block */
2037 PF_ACPY(addr, *raddr, af);
2038 }
2039
2040 if (!KENTRY_NETWORK(ke)) {
2041 /* this is a single IP address - no possible nested block */
2042 PF_ACPY(counter, addr, af);
2043 *pidx = idx;
2044 return (0);
2045 }
2046 for (;;) {
2047 /* we don't want to use a nested block */
2048 ke2 = (struct pfr_kentry *)(af == AF_INET ?
2049 rn_match(&pfr_sin, kt->pfrkt_ip4) :
2050 rn_match(&pfr_sin6, kt->pfrkt_ip6));
2051 /* no need to check KENTRY_RNF_ROOT() here */
2052 if (ke2 == ke) {
2053 /* lookup return the same block - perfect */
2054 PF_ACPY(counter, addr, af);
2055 *pidx = idx;
2056 return (0);
2057 }
2058
2059 /* we need to increase the counter past the nested block */
2060 pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
2061 PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
2062 PF_AINC(addr, af);
2063 if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
2064 /* ok, we reached the end of our main block */
2065 /* go to next block in table */
2066 idx++;
2067 use_counter = 0;
2068 goto _next_block;
2069 }
2070 }
2071 }
2072
2073 struct pfr_kentry *
2074 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2075 {
2076 struct pfr_walktree w;
2077
2078 bzero(&w, sizeof(w));
2079 w.pfrw_op = PFRW_POOL_GET;
2080 w.pfrw_cnt = idx;
2081
2082 switch (af) {
2083 case AF_INET:
2084 rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2085 return (w.pfrw_kentry);
2086 case AF_INET6:
2087 rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2088 return (w.pfrw_kentry);
2089 default:
2090 return (NULL);
2091 }
2092 }
2093
2094 void
2095 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2096 {
2097 struct pfr_walktree w;
2098 int s;
2099
2100 bzero(&w, sizeof(w));
2101 w.pfrw_op = PFRW_DYNADDR_UPDATE;
2102 w.pfrw_dyn = dyn;
2103
2104 s = splsoftnet();
2105 dyn->pfid_acnt4 = 0;
2106 dyn->pfid_acnt6 = 0;
2107 if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2108 rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2109 if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2110 rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2111 splx(s);
2112 }
2113