pf_table.c revision 1.1.1.2 1 /* $OpenBSD: pf_table.c,v 1.59 2004/07/08 23:17:38 mcbride Exp $ */
2
3 /*
4 * Copyright (c) 2002 Cedric Berger
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/socket.h>
36 #include <sys/mbuf.h>
37 #include <sys/kernel.h>
38
39 #include <net/if.h>
40 #include <net/route.h>
41 #include <netinet/in.h>
42 #include <netinet/ip_ipsp.h>
43 #include <net/pfvar.h>
44
45 #define ACCEPT_FLAGS(oklist) \
46 do { \
47 if ((flags & ~(oklist)) & \
48 PFR_FLAG_ALLMASK) \
49 return (EINVAL); \
50 } while (0)
51
52 #define COPYIN(from, to, size) \
53 ((flags & PFR_FLAG_USERIOCTL) ? \
54 copyin((from), (to), (size)) : \
55 (bcopy((from), (to), (size)), 0))
56
57 #define COPYOUT(from, to, size) \
58 ((flags & PFR_FLAG_USERIOCTL) ? \
59 copyout((from), (to), (size)) : \
60 (bcopy((from), (to), (size)), 0))
61
62 #define FILLIN_SIN(sin, addr) \
63 do { \
64 (sin).sin_len = sizeof(sin); \
65 (sin).sin_family = AF_INET; \
66 (sin).sin_addr = (addr); \
67 } while (0)
68
69 #define FILLIN_SIN6(sin6, addr) \
70 do { \
71 (sin6).sin6_len = sizeof(sin6); \
72 (sin6).sin6_family = AF_INET6; \
73 (sin6).sin6_addr = (addr); \
74 } while (0)
75
76 #define SWAP(type, a1, a2) \
77 do { \
78 type tmp = a1; \
79 a1 = a2; \
80 a2 = tmp; \
81 } while (0)
82
83 #define SUNION2PF(su, af) (((af)==AF_INET) ? \
84 (struct pf_addr *)&(su)->sin.sin_addr : \
85 (struct pf_addr *)&(su)->sin6.sin6_addr)
86
87 #define AF_BITS(af) (((af)==AF_INET)?32:128)
88 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
89 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
90 #define KENTRY_RNF_ROOT(ke) \
91 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
92
93 #define NO_ADDRESSES (-1)
94 #define ENQUEUE_UNMARKED_ONLY (1)
95 #define INVERT_NEG_FLAG (1)
96
97 struct pfr_walktree {
98 enum pfrw_op {
99 PFRW_MARK,
100 PFRW_SWEEP,
101 PFRW_ENQUEUE,
102 PFRW_GET_ADDRS,
103 PFRW_GET_ASTATS,
104 PFRW_POOL_GET,
105 PFRW_DYNADDR_UPDATE
106 } pfrw_op;
107 union {
108 struct pfr_addr *pfrw1_addr;
109 struct pfr_astats *pfrw1_astats;
110 struct pfr_kentryworkq *pfrw1_workq;
111 struct pfr_kentry *pfrw1_kentry;
112 struct pfi_dynaddr *pfrw1_dyn;
113 } pfrw_1;
114 int pfrw_free;
115 int pfrw_flags;
116 };
117 #define pfrw_addr pfrw_1.pfrw1_addr
118 #define pfrw_astats pfrw_1.pfrw1_astats
119 #define pfrw_workq pfrw_1.pfrw1_workq
120 #define pfrw_kentry pfrw_1.pfrw1_kentry
121 #define pfrw_dyn pfrw_1.pfrw1_dyn
122 #define pfrw_cnt pfrw_free
123
124 #define senderr(e) do { rv = (e); goto _bad; } while (0)
125
126 struct pool pfr_ktable_pl;
127 struct pool pfr_kentry_pl;
128 struct sockaddr_in pfr_sin;
129 struct sockaddr_in6 pfr_sin6;
130 union sockaddr_union pfr_mask;
131 struct pf_addr pfr_ffaddr;
132
133 void pfr_copyout_addr(struct pfr_addr *,
134 struct pfr_kentry *ke);
135 int pfr_validate_addr(struct pfr_addr *);
136 void pfr_enqueue_addrs(struct pfr_ktable *,
137 struct pfr_kentryworkq *, int *, int);
138 void pfr_mark_addrs(struct pfr_ktable *);
139 struct pfr_kentry *pfr_lookup_addr(struct pfr_ktable *,
140 struct pfr_addr *, int);
141 struct pfr_kentry *pfr_create_kentry(struct pfr_addr *);
142 void pfr_destroy_kentries(struct pfr_kentryworkq *);
143 void pfr_destroy_kentry(struct pfr_kentry *);
144 void pfr_insert_kentries(struct pfr_ktable *,
145 struct pfr_kentryworkq *, long);
146 void pfr_remove_kentries(struct pfr_ktable *,
147 struct pfr_kentryworkq *);
148 void pfr_clstats_kentries(struct pfr_kentryworkq *, long,
149 int);
150 void pfr_reset_feedback(struct pfr_addr *, int, int);
151 void pfr_prepare_network(union sockaddr_union *, int, int);
152 int pfr_route_kentry(struct pfr_ktable *,
153 struct pfr_kentry *);
154 int pfr_unroute_kentry(struct pfr_ktable *,
155 struct pfr_kentry *);
156 int pfr_walktree(struct radix_node *, void *);
157 int pfr_validate_table(struct pfr_table *, int, int);
158 void pfr_commit_ktable(struct pfr_ktable *, long);
159 void pfr_insert_ktables(struct pfr_ktableworkq *);
160 void pfr_insert_ktable(struct pfr_ktable *);
161 void pfr_setflags_ktables(struct pfr_ktableworkq *);
162 void pfr_setflags_ktable(struct pfr_ktable *, int);
163 void pfr_clstats_ktables(struct pfr_ktableworkq *, long,
164 int);
165 void pfr_clstats_ktable(struct pfr_ktable *, long, int);
166 struct pfr_ktable *pfr_create_ktable(struct pfr_table *, long, int);
167 void pfr_destroy_ktables(struct pfr_ktableworkq *, int);
168 void pfr_destroy_ktable(struct pfr_ktable *, int);
169 int pfr_ktable_compare(struct pfr_ktable *,
170 struct pfr_ktable *);
171 struct pfr_ktable *pfr_lookup_table(struct pfr_table *);
172 void pfr_clean_node_mask(struct pfr_ktable *,
173 struct pfr_kentryworkq *);
174 int pfr_table_count(struct pfr_table *, int);
175 int pfr_skip_table(struct pfr_table *,
176 struct pfr_ktable *, int);
177 struct pfr_kentry *pfr_kentry_byidx(struct pfr_ktable *, int, int);
178
179 RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
180 RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
181
182 struct pfr_ktablehead pfr_ktables;
183 struct pfr_table pfr_nulltable;
184 int pfr_ktable_cnt;
185
186 void
187 pfr_initialize(void)
188 {
189 pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0,
190 "pfrktable", &pool_allocator_oldnointr);
191 pool_init(&pfr_kentry_pl, sizeof(struct pfr_kentry), 0, 0, 0,
192 "pfrkentry", &pool_allocator_oldnointr);
193
194 pfr_sin.sin_len = sizeof(pfr_sin);
195 pfr_sin.sin_family = AF_INET;
196 pfr_sin6.sin6_len = sizeof(pfr_sin6);
197 pfr_sin6.sin6_family = AF_INET6;
198
199 memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr));
200 }
201
202 int
203 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
204 {
205 struct pfr_ktable *kt;
206 struct pfr_kentryworkq workq;
207 int s;
208
209 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
210 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
211 return (EINVAL);
212 kt = pfr_lookup_table(tbl);
213 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
214 return (ESRCH);
215 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
216 return (EPERM);
217 pfr_enqueue_addrs(kt, &workq, ndel, 0);
218
219 if (!(flags & PFR_FLAG_DUMMY)) {
220 if (flags & PFR_FLAG_ATOMIC)
221 s = splsoftnet();
222 pfr_remove_kentries(kt, &workq);
223 if (flags & PFR_FLAG_ATOMIC)
224 splx(s);
225 if (kt->pfrkt_cnt) {
226 printf("pfr_clr_addrs: corruption detected (%d).\n",
227 kt->pfrkt_cnt);
228 kt->pfrkt_cnt = 0;
229 }
230 }
231 return (0);
232 }
233
234 int
235 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
236 int *nadd, int flags)
237 {
238 struct pfr_ktable *kt, *tmpkt;
239 struct pfr_kentryworkq workq;
240 struct pfr_kentry *p, *q;
241 struct pfr_addr ad;
242 int i, rv, s, xadd = 0;
243 long tzero = time_second;
244
245 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
246 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
247 return (EINVAL);
248 kt = pfr_lookup_table(tbl);
249 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
250 return (ESRCH);
251 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
252 return (EPERM);
253 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
254 if (tmpkt == NULL)
255 return (ENOMEM);
256 SLIST_INIT(&workq);
257 for (i = 0; i < size; i++) {
258 if (COPYIN(addr+i, &ad, sizeof(ad)))
259 senderr(EFAULT);
260 if (pfr_validate_addr(&ad))
261 senderr(EINVAL);
262 p = pfr_lookup_addr(kt, &ad, 1);
263 q = pfr_lookup_addr(tmpkt, &ad, 1);
264 if (flags & PFR_FLAG_FEEDBACK) {
265 if (q != NULL)
266 ad.pfra_fback = PFR_FB_DUPLICATE;
267 else if (p == NULL)
268 ad.pfra_fback = PFR_FB_ADDED;
269 else if (p->pfrke_not != ad.pfra_not)
270 ad.pfra_fback = PFR_FB_CONFLICT;
271 else
272 ad.pfra_fback = PFR_FB_NONE;
273 }
274 if (p == NULL && q == NULL) {
275 p = pfr_create_kentry(&ad);
276 if (p == NULL)
277 senderr(ENOMEM);
278 if (pfr_route_kentry(tmpkt, p)) {
279 pfr_destroy_kentry(p);
280 ad.pfra_fback = PFR_FB_NONE;
281 } else {
282 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
283 xadd++;
284 }
285 }
286 if (flags & PFR_FLAG_FEEDBACK)
287 if (COPYOUT(&ad, addr+i, sizeof(ad)))
288 senderr(EFAULT);
289 }
290 pfr_clean_node_mask(tmpkt, &workq);
291 if (!(flags & PFR_FLAG_DUMMY)) {
292 if (flags & PFR_FLAG_ATOMIC)
293 s = splsoftnet();
294 pfr_insert_kentries(kt, &workq, tzero);
295 if (flags & PFR_FLAG_ATOMIC)
296 splx(s);
297 } else
298 pfr_destroy_kentries(&workq);
299 if (nadd != NULL)
300 *nadd = xadd;
301 pfr_destroy_ktable(tmpkt, 0);
302 return (0);
303 _bad:
304 pfr_clean_node_mask(tmpkt, &workq);
305 pfr_destroy_kentries(&workq);
306 if (flags & PFR_FLAG_FEEDBACK)
307 pfr_reset_feedback(addr, size, flags);
308 pfr_destroy_ktable(tmpkt, 0);
309 return (rv);
310 }
311
312 int
313 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
314 int *ndel, int flags)
315 {
316 struct pfr_ktable *kt;
317 struct pfr_kentryworkq workq;
318 struct pfr_kentry *p;
319 struct pfr_addr ad;
320 int i, rv, s, xdel = 0, log = 1;
321
322 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
323 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
324 return (EINVAL);
325 kt = pfr_lookup_table(tbl);
326 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
327 return (ESRCH);
328 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
329 return (EPERM);
330 /*
331 * there are two algorithms to choose from here.
332 * with:
333 * n: number of addresses to delete
334 * N: number of addresses in the table
335 *
336 * one is O(N) and is better for large 'n'
337 * one is O(n*LOG(N)) and is better for small 'n'
338 *
339 * following code try to decide which one is best.
340 */
341 for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
342 log++;
343 if (size > kt->pfrkt_cnt/log) {
344 /* full table scan */
345 pfr_mark_addrs(kt);
346 } else {
347 /* iterate over addresses to delete */
348 for (i = 0; i < size; i++) {
349 if (COPYIN(addr+i, &ad, sizeof(ad)))
350 return (EFAULT);
351 if (pfr_validate_addr(&ad))
352 return (EINVAL);
353 p = pfr_lookup_addr(kt, &ad, 1);
354 if (p != NULL)
355 p->pfrke_mark = 0;
356 }
357 }
358 SLIST_INIT(&workq);
359 for (i = 0; i < size; i++) {
360 if (COPYIN(addr+i, &ad, sizeof(ad)))
361 senderr(EFAULT);
362 if (pfr_validate_addr(&ad))
363 senderr(EINVAL);
364 p = pfr_lookup_addr(kt, &ad, 1);
365 if (flags & PFR_FLAG_FEEDBACK) {
366 if (p == NULL)
367 ad.pfra_fback = PFR_FB_NONE;
368 else if (p->pfrke_not != ad.pfra_not)
369 ad.pfra_fback = PFR_FB_CONFLICT;
370 else if (p->pfrke_mark)
371 ad.pfra_fback = PFR_FB_DUPLICATE;
372 else
373 ad.pfra_fback = PFR_FB_DELETED;
374 }
375 if (p != NULL && p->pfrke_not == ad.pfra_not &&
376 !p->pfrke_mark) {
377 p->pfrke_mark = 1;
378 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
379 xdel++;
380 }
381 if (flags & PFR_FLAG_FEEDBACK)
382 if (COPYOUT(&ad, addr+i, sizeof(ad)))
383 senderr(EFAULT);
384 }
385 if (!(flags & PFR_FLAG_DUMMY)) {
386 if (flags & PFR_FLAG_ATOMIC)
387 s = splsoftnet();
388 pfr_remove_kentries(kt, &workq);
389 if (flags & PFR_FLAG_ATOMIC)
390 splx(s);
391 }
392 if (ndel != NULL)
393 *ndel = xdel;
394 return (0);
395 _bad:
396 if (flags & PFR_FLAG_FEEDBACK)
397 pfr_reset_feedback(addr, size, flags);
398 return (rv);
399 }
400
401 int
402 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
403 int *size2, int *nadd, int *ndel, int *nchange, int flags)
404 {
405 struct pfr_ktable *kt, *tmpkt;
406 struct pfr_kentryworkq addq, delq, changeq;
407 struct pfr_kentry *p, *q;
408 struct pfr_addr ad;
409 int i, rv, s, xadd = 0, xdel = 0, xchange = 0;
410 long tzero = time_second;
411
412 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
413 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
414 return (EINVAL);
415 kt = pfr_lookup_table(tbl);
416 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
417 return (ESRCH);
418 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
419 return (EPERM);
420 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
421 if (tmpkt == NULL)
422 return (ENOMEM);
423 pfr_mark_addrs(kt);
424 SLIST_INIT(&addq);
425 SLIST_INIT(&delq);
426 SLIST_INIT(&changeq);
427 for (i = 0; i < size; i++) {
428 if (COPYIN(addr+i, &ad, sizeof(ad)))
429 senderr(EFAULT);
430 if (pfr_validate_addr(&ad))
431 senderr(EINVAL);
432 ad.pfra_fback = PFR_FB_NONE;
433 p = pfr_lookup_addr(kt, &ad, 1);
434 if (p != NULL) {
435 if (p->pfrke_mark) {
436 ad.pfra_fback = PFR_FB_DUPLICATE;
437 goto _skip;
438 }
439 p->pfrke_mark = 1;
440 if (p->pfrke_not != ad.pfra_not) {
441 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
442 ad.pfra_fback = PFR_FB_CHANGED;
443 xchange++;
444 }
445 } else {
446 q = pfr_lookup_addr(tmpkt, &ad, 1);
447 if (q != NULL) {
448 ad.pfra_fback = PFR_FB_DUPLICATE;
449 goto _skip;
450 }
451 p = pfr_create_kentry(&ad);
452 if (p == NULL)
453 senderr(ENOMEM);
454 if (pfr_route_kentry(tmpkt, p)) {
455 pfr_destroy_kentry(p);
456 ad.pfra_fback = PFR_FB_NONE;
457 } else {
458 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
459 ad.pfra_fback = PFR_FB_ADDED;
460 xadd++;
461 }
462 }
463 _skip:
464 if (flags & PFR_FLAG_FEEDBACK)
465 if (COPYOUT(&ad, addr+i, sizeof(ad)))
466 senderr(EFAULT);
467 }
468 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
469 if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
470 if (*size2 < size+xdel) {
471 *size2 = size+xdel;
472 senderr(0);
473 }
474 i = 0;
475 SLIST_FOREACH(p, &delq, pfrke_workq) {
476 pfr_copyout_addr(&ad, p);
477 ad.pfra_fback = PFR_FB_DELETED;
478 if (COPYOUT(&ad, addr+size+i, sizeof(ad)))
479 senderr(EFAULT);
480 i++;
481 }
482 }
483 pfr_clean_node_mask(tmpkt, &addq);
484 if (!(flags & PFR_FLAG_DUMMY)) {
485 if (flags & PFR_FLAG_ATOMIC)
486 s = splsoftnet();
487 pfr_insert_kentries(kt, &addq, tzero);
488 pfr_remove_kentries(kt, &delq);
489 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
490 if (flags & PFR_FLAG_ATOMIC)
491 splx(s);
492 } else
493 pfr_destroy_kentries(&addq);
494 if (nadd != NULL)
495 *nadd = xadd;
496 if (ndel != NULL)
497 *ndel = xdel;
498 if (nchange != NULL)
499 *nchange = xchange;
500 if ((flags & PFR_FLAG_FEEDBACK) && size2)
501 *size2 = size+xdel;
502 pfr_destroy_ktable(tmpkt, 0);
503 return (0);
504 _bad:
505 pfr_clean_node_mask(tmpkt, &addq);
506 pfr_destroy_kentries(&addq);
507 if (flags & PFR_FLAG_FEEDBACK)
508 pfr_reset_feedback(addr, size, flags);
509 pfr_destroy_ktable(tmpkt, 0);
510 return (rv);
511 }
512
513 int
514 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
515 int *nmatch, int flags)
516 {
517 struct pfr_ktable *kt;
518 struct pfr_kentry *p;
519 struct pfr_addr ad;
520 int i, xmatch = 0;
521
522 ACCEPT_FLAGS(PFR_FLAG_REPLACE);
523 if (pfr_validate_table(tbl, 0, 0))
524 return (EINVAL);
525 kt = pfr_lookup_table(tbl);
526 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
527 return (ESRCH);
528
529 for (i = 0; i < size; i++) {
530 if (COPYIN(addr+i, &ad, sizeof(ad)))
531 return (EFAULT);
532 if (pfr_validate_addr(&ad))
533 return (EINVAL);
534 if (ADDR_NETWORK(&ad))
535 return (EINVAL);
536 p = pfr_lookup_addr(kt, &ad, 0);
537 if (flags & PFR_FLAG_REPLACE)
538 pfr_copyout_addr(&ad, p);
539 ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
540 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
541 if (p != NULL && !p->pfrke_not)
542 xmatch++;
543 if (COPYOUT(&ad, addr+i, sizeof(ad)))
544 return (EFAULT);
545 }
546 if (nmatch != NULL)
547 *nmatch = xmatch;
548 return (0);
549 }
550
551 int
552 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
553 int flags)
554 {
555 struct pfr_ktable *kt;
556 struct pfr_walktree w;
557 int rv;
558
559 ACCEPT_FLAGS(0);
560 if (pfr_validate_table(tbl, 0, 0))
561 return (EINVAL);
562 kt = pfr_lookup_table(tbl);
563 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
564 return (ESRCH);
565 if (kt->pfrkt_cnt > *size) {
566 *size = kt->pfrkt_cnt;
567 return (0);
568 }
569
570 bzero(&w, sizeof(w));
571 w.pfrw_op = PFRW_GET_ADDRS;
572 w.pfrw_addr = addr;
573 w.pfrw_free = kt->pfrkt_cnt;
574 w.pfrw_flags = flags;
575 rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
576 if (!rv)
577 rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
578 if (rv)
579 return (rv);
580
581 if (w.pfrw_free) {
582 printf("pfr_get_addrs: corruption detected (%d).\n",
583 w.pfrw_free);
584 return (ENOTTY);
585 }
586 *size = kt->pfrkt_cnt;
587 return (0);
588 }
589
590 int
591 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
592 int flags)
593 {
594 struct pfr_ktable *kt;
595 struct pfr_walktree w;
596 struct pfr_kentryworkq workq;
597 int rv, s;
598 long tzero = time_second;
599
600 ACCEPT_FLAGS(PFR_FLAG_ATOMIC); /* XXX PFR_FLAG_CLSTATS disabled */
601 if (pfr_validate_table(tbl, 0, 0))
602 return (EINVAL);
603 kt = pfr_lookup_table(tbl);
604 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
605 return (ESRCH);
606 if (kt->pfrkt_cnt > *size) {
607 *size = kt->pfrkt_cnt;
608 return (0);
609 }
610
611 bzero(&w, sizeof(w));
612 w.pfrw_op = PFRW_GET_ASTATS;
613 w.pfrw_astats = addr;
614 w.pfrw_free = kt->pfrkt_cnt;
615 w.pfrw_flags = flags;
616 if (flags & PFR_FLAG_ATOMIC)
617 s = splsoftnet();
618 rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
619 if (!rv)
620 rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
621 if (!rv && (flags & PFR_FLAG_CLSTATS)) {
622 pfr_enqueue_addrs(kt, &workq, NULL, 0);
623 pfr_clstats_kentries(&workq, tzero, 0);
624 }
625 if (flags & PFR_FLAG_ATOMIC)
626 splx(s);
627 if (rv)
628 return (rv);
629
630 if (w.pfrw_free) {
631 printf("pfr_get_astats: corruption detected (%d).\n",
632 w.pfrw_free);
633 return (ENOTTY);
634 }
635 *size = kt->pfrkt_cnt;
636 return (0);
637 }
638
639 int
640 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
641 int *nzero, int flags)
642 {
643 struct pfr_ktable *kt;
644 struct pfr_kentryworkq workq;
645 struct pfr_kentry *p;
646 struct pfr_addr ad;
647 int i, rv, s, xzero = 0;
648
649 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
650 if (pfr_validate_table(tbl, 0, 0))
651 return (EINVAL);
652 kt = pfr_lookup_table(tbl);
653 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
654 return (ESRCH);
655 SLIST_INIT(&workq);
656 for (i = 0; i < size; i++) {
657 if (COPYIN(addr+i, &ad, sizeof(ad)))
658 senderr(EFAULT);
659 if (pfr_validate_addr(&ad))
660 senderr(EINVAL);
661 p = pfr_lookup_addr(kt, &ad, 1);
662 if (flags & PFR_FLAG_FEEDBACK) {
663 ad.pfra_fback = (p != NULL) ?
664 PFR_FB_CLEARED : PFR_FB_NONE;
665 if (COPYOUT(&ad, addr+i, sizeof(ad)))
666 senderr(EFAULT);
667 }
668 if (p != NULL) {
669 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
670 xzero++;
671 }
672 }
673
674 if (!(flags & PFR_FLAG_DUMMY)) {
675 if (flags & PFR_FLAG_ATOMIC)
676 s = splsoftnet();
677 pfr_clstats_kentries(&workq, 0, 0);
678 if (flags & PFR_FLAG_ATOMIC)
679 splx(s);
680 }
681 if (nzero != NULL)
682 *nzero = xzero;
683 return (0);
684 _bad:
685 if (flags & PFR_FLAG_FEEDBACK)
686 pfr_reset_feedback(addr, size, flags);
687 return (rv);
688 }
689
690 int
691 pfr_validate_addr(struct pfr_addr *ad)
692 {
693 int i;
694
695 switch (ad->pfra_af) {
696 #ifdef INET
697 case AF_INET:
698 if (ad->pfra_net > 32)
699 return (-1);
700 break;
701 #endif /* INET */
702 #ifdef INET6
703 case AF_INET6:
704 if (ad->pfra_net > 128)
705 return (-1);
706 break;
707 #endif /* INET6 */
708 default:
709 return (-1);
710 }
711 if (ad->pfra_net < 128 &&
712 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
713 return (-1);
714 for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
715 if (((caddr_t)ad)[i])
716 return (-1);
717 if (ad->pfra_not && ad->pfra_not != 1)
718 return (-1);
719 if (ad->pfra_fback)
720 return (-1);
721 return (0);
722 }
723
724 void
725 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
726 int *naddr, int sweep)
727 {
728 struct pfr_walktree w;
729
730 SLIST_INIT(workq);
731 bzero(&w, sizeof(w));
732 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
733 w.pfrw_workq = workq;
734 if (kt->pfrkt_ip4 != NULL)
735 if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
736 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
737 if (kt->pfrkt_ip6 != NULL)
738 if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
739 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
740 if (naddr != NULL)
741 *naddr = w.pfrw_cnt;
742 }
743
744 void
745 pfr_mark_addrs(struct pfr_ktable *kt)
746 {
747 struct pfr_walktree w;
748
749 bzero(&w, sizeof(w));
750 w.pfrw_op = PFRW_MARK;
751 if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
752 printf("pfr_mark_addrs: IPv4 walktree failed.\n");
753 if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
754 printf("pfr_mark_addrs: IPv6 walktree failed.\n");
755 }
756
757
758 struct pfr_kentry *
759 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
760 {
761 union sockaddr_union sa, mask;
762 struct radix_node_head *head;
763 struct pfr_kentry *ke;
764 int s;
765
766 bzero(&sa, sizeof(sa));
767 if (ad->pfra_af == AF_INET) {
768 FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
769 head = kt->pfrkt_ip4;
770 } else if ( ad->pfra_af == AF_INET6 ) {
771 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
772 head = kt->pfrkt_ip6;
773 }
774 if (ADDR_NETWORK(ad)) {
775 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
776 s = splsoftnet(); /* rn_lookup makes use of globals */
777 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
778 splx(s);
779 if (ke && KENTRY_RNF_ROOT(ke))
780 ke = NULL;
781 } else {
782 ke = (struct pfr_kentry *)rn_match(&sa, head);
783 if (ke && KENTRY_RNF_ROOT(ke))
784 ke = NULL;
785 if (exact && ke && KENTRY_NETWORK(ke))
786 ke = NULL;
787 }
788 return (ke);
789 }
790
791 struct pfr_kentry *
792 pfr_create_kentry(struct pfr_addr *ad)
793 {
794 struct pfr_kentry *ke;
795
796 ke = pool_get(&pfr_kentry_pl, PR_NOWAIT);
797 if (ke == NULL)
798 return (NULL);
799 bzero(ke, sizeof(*ke));
800
801 if (ad->pfra_af == AF_INET)
802 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
803 else if (ad->pfra_af == AF_INET6)
804 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
805 ke->pfrke_af = ad->pfra_af;
806 ke->pfrke_net = ad->pfra_net;
807 ke->pfrke_not = ad->pfra_not;
808 return (ke);
809 }
810
811 void
812 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
813 {
814 struct pfr_kentry *p, *q;
815
816 for (p = SLIST_FIRST(workq); p != NULL; p = q) {
817 q = SLIST_NEXT(p, pfrke_workq);
818 pfr_destroy_kentry(p);
819 }
820 }
821
822 void
823 pfr_destroy_kentry(struct pfr_kentry *ke)
824 {
825 pool_put(&pfr_kentry_pl, ke);
826 }
827
828 void
829 pfr_insert_kentries(struct pfr_ktable *kt,
830 struct pfr_kentryworkq *workq, long tzero)
831 {
832 struct pfr_kentry *p;
833 int rv, n = 0;
834
835 SLIST_FOREACH(p, workq, pfrke_workq) {
836 rv = pfr_route_kentry(kt, p);
837 if (rv) {
838 printf("pfr_insert_kentries: cannot route entry "
839 "(code=%d).\n", rv);
840 break;
841 }
842 p->pfrke_tzero = tzero;
843 n++;
844 }
845 kt->pfrkt_cnt += n;
846 }
847
848 void
849 pfr_remove_kentries(struct pfr_ktable *kt,
850 struct pfr_kentryworkq *workq)
851 {
852 struct pfr_kentry *p;
853 int n = 0;
854
855 SLIST_FOREACH(p, workq, pfrke_workq) {
856 pfr_unroute_kentry(kt, p);
857 n++;
858 }
859 kt->pfrkt_cnt -= n;
860 pfr_destroy_kentries(workq);
861 }
862
863 void
864 pfr_clean_node_mask(struct pfr_ktable *kt,
865 struct pfr_kentryworkq *workq)
866 {
867 struct pfr_kentry *p;
868
869 SLIST_FOREACH(p, workq, pfrke_workq)
870 pfr_unroute_kentry(kt, p);
871 }
872
873 void
874 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
875 {
876 struct pfr_kentry *p;
877 int s;
878
879 SLIST_FOREACH(p, workq, pfrke_workq) {
880 s = splsoftnet();
881 if (negchange)
882 p->pfrke_not = !p->pfrke_not;
883 bzero(p->pfrke_packets, sizeof(p->pfrke_packets));
884 bzero(p->pfrke_bytes, sizeof(p->pfrke_bytes));
885 splx(s);
886 p->pfrke_tzero = tzero;
887 }
888 }
889
890 void
891 pfr_reset_feedback(struct pfr_addr *addr, int size, int flags)
892 {
893 struct pfr_addr ad;
894 int i;
895
896 for (i = 0; i < size; i++) {
897 if (COPYIN(addr+i, &ad, sizeof(ad)))
898 break;
899 ad.pfra_fback = PFR_FB_NONE;
900 if (COPYOUT(&ad, addr+i, sizeof(ad)))
901 break;
902 }
903 }
904
905 void
906 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
907 {
908 int i;
909
910 bzero(sa, sizeof(*sa));
911 if (af == AF_INET) {
912 sa->sin.sin_len = sizeof(sa->sin);
913 sa->sin.sin_family = AF_INET;
914 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
915 } else if (af == AF_INET6) {
916 sa->sin6.sin6_len = sizeof(sa->sin6);
917 sa->sin6.sin6_family = AF_INET6;
918 for (i = 0; i < 4; i++) {
919 if (net <= 32) {
920 sa->sin6.sin6_addr.s6_addr32[i] =
921 net ? htonl(-1 << (32-net)) : 0;
922 break;
923 }
924 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
925 net -= 32;
926 }
927 }
928 }
929
930 int
931 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
932 {
933 union sockaddr_union mask;
934 struct radix_node *rn;
935 struct radix_node_head *head;
936 int s;
937
938 bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
939 if (ke->pfrke_af == AF_INET)
940 head = kt->pfrkt_ip4;
941 else if (ke->pfrke_af == AF_INET6)
942 head = kt->pfrkt_ip6;
943
944 s = splsoftnet();
945 if (KENTRY_NETWORK(ke)) {
946 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
947 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
948 } else
949 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
950 splx(s);
951
952 return (rn == NULL ? -1 : 0);
953 }
954
955 int
956 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
957 {
958 union sockaddr_union mask;
959 struct radix_node *rn;
960 struct radix_node_head *head;
961 int s;
962
963 if (ke->pfrke_af == AF_INET)
964 head = kt->pfrkt_ip4;
965 else if (ke->pfrke_af == AF_INET6)
966 head = kt->pfrkt_ip6;
967
968 s = splsoftnet();
969 if (KENTRY_NETWORK(ke)) {
970 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
971 rn = rn_delete(&ke->pfrke_sa, &mask, head, NULL);
972 } else
973 rn = rn_delete(&ke->pfrke_sa, NULL, head, NULL);
974 splx(s);
975
976 if (rn == NULL) {
977 printf("pfr_unroute_kentry: delete failed.\n");
978 return (-1);
979 }
980 return (0);
981 }
982
983 void
984 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
985 {
986 bzero(ad, sizeof(*ad));
987 if (ke == NULL)
988 return;
989 ad->pfra_af = ke->pfrke_af;
990 ad->pfra_net = ke->pfrke_net;
991 ad->pfra_not = ke->pfrke_not;
992 if (ad->pfra_af == AF_INET)
993 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
994 else if (ad->pfra_af == AF_INET6)
995 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
996 }
997
998 int
999 pfr_walktree(struct radix_node *rn, void *arg)
1000 {
1001 struct pfr_kentry *ke = (struct pfr_kentry *)rn;
1002 struct pfr_walktree *w = arg;
1003 int s, flags = w->pfrw_flags;
1004
1005 switch (w->pfrw_op) {
1006 case PFRW_MARK:
1007 ke->pfrke_mark = 0;
1008 break;
1009 case PFRW_SWEEP:
1010 if (ke->pfrke_mark)
1011 break;
1012 /* FALLTHROUGH */
1013 case PFRW_ENQUEUE:
1014 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1015 w->pfrw_cnt++;
1016 break;
1017 case PFRW_GET_ADDRS:
1018 if (w->pfrw_free-- > 0) {
1019 struct pfr_addr ad;
1020
1021 pfr_copyout_addr(&ad, ke);
1022 if (copyout(&ad, w->pfrw_addr, sizeof(ad)))
1023 return (EFAULT);
1024 w->pfrw_addr++;
1025 }
1026 break;
1027 case PFRW_GET_ASTATS:
1028 if (w->pfrw_free-- > 0) {
1029 struct pfr_astats as;
1030
1031 pfr_copyout_addr(&as.pfras_a, ke);
1032
1033 s = splsoftnet();
1034 bcopy(ke->pfrke_packets, as.pfras_packets,
1035 sizeof(as.pfras_packets));
1036 bcopy(ke->pfrke_bytes, as.pfras_bytes,
1037 sizeof(as.pfras_bytes));
1038 splx(s);
1039 as.pfras_tzero = ke->pfrke_tzero;
1040
1041 if (COPYOUT(&as, w->pfrw_astats, sizeof(as)))
1042 return (EFAULT);
1043 w->pfrw_astats++;
1044 }
1045 break;
1046 case PFRW_POOL_GET:
1047 if (ke->pfrke_not)
1048 break; /* negative entries are ignored */
1049 if (!w->pfrw_cnt--) {
1050 w->pfrw_kentry = ke;
1051 return (1); /* finish search */
1052 }
1053 break;
1054 case PFRW_DYNADDR_UPDATE:
1055 if (ke->pfrke_af == AF_INET) {
1056 if (w->pfrw_dyn->pfid_acnt4++ > 0)
1057 break;
1058 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1059 w->pfrw_dyn->pfid_addr4 = *SUNION2PF(
1060 &ke->pfrke_sa, AF_INET);
1061 w->pfrw_dyn->pfid_mask4 = *SUNION2PF(
1062 &pfr_mask, AF_INET);
1063 } else if (ke->pfrke_af == AF_INET6){
1064 if (w->pfrw_dyn->pfid_acnt6++ > 0)
1065 break;
1066 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1067 w->pfrw_dyn->pfid_addr6 = *SUNION2PF(
1068 &ke->pfrke_sa, AF_INET6);
1069 w->pfrw_dyn->pfid_mask6 = *SUNION2PF(
1070 &pfr_mask, AF_INET6);
1071 }
1072 break;
1073 }
1074 return (0);
1075 }
1076
1077 int
1078 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1079 {
1080 struct pfr_ktableworkq workq;
1081 struct pfr_ktable *p;
1082 int s, xdel = 0;
1083
1084 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ALLRSETS);
1085 if (pfr_table_count(filter, flags) < 0)
1086 return (ENOENT);
1087
1088 SLIST_INIT(&workq);
1089 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1090 if (pfr_skip_table(filter, p, flags))
1091 continue;
1092 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1093 continue;
1094 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1095 continue;
1096 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1097 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1098 xdel++;
1099 }
1100 if (!(flags & PFR_FLAG_DUMMY)) {
1101 if (flags & PFR_FLAG_ATOMIC)
1102 s = splsoftnet();
1103 pfr_setflags_ktables(&workq);
1104 if (flags & PFR_FLAG_ATOMIC)
1105 splx(s);
1106 }
1107 if (ndel != NULL)
1108 *ndel = xdel;
1109 return (0);
1110 }
1111
1112 int
1113 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1114 {
1115 struct pfr_ktableworkq addq, changeq;
1116 struct pfr_ktable *p, *q, *r, key;
1117 int i, rv, s, xadd = 0;
1118 long tzero = time_second;
1119
1120 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1121 SLIST_INIT(&addq);
1122 SLIST_INIT(&changeq);
1123 for (i = 0; i < size; i++) {
1124 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1125 senderr(EFAULT);
1126 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1127 flags & PFR_FLAG_USERIOCTL))
1128 senderr(EINVAL);
1129 key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1130 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1131 if (p == NULL) {
1132 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1133 if (p == NULL)
1134 senderr(ENOMEM);
1135 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1136 if (!pfr_ktable_compare(p, q))
1137 goto _skip;
1138 }
1139 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1140 xadd++;
1141 if (!key.pfrkt_anchor[0])
1142 goto _skip;
1143
1144 /* find or create root table */
1145 bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1146 r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1147 if (r != NULL) {
1148 p->pfrkt_root = r;
1149 goto _skip;
1150 }
1151 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1152 if (!pfr_ktable_compare(&key, q)) {
1153 p->pfrkt_root = q;
1154 goto _skip;
1155 }
1156 }
1157 key.pfrkt_flags = 0;
1158 r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1159 if (r == NULL)
1160 senderr(ENOMEM);
1161 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1162 p->pfrkt_root = r;
1163 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1164 SLIST_FOREACH(q, &changeq, pfrkt_workq)
1165 if (!pfr_ktable_compare(&key, q))
1166 goto _skip;
1167 p->pfrkt_nflags = (p->pfrkt_flags &
1168 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1169 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1170 xadd++;
1171 }
1172 _skip:
1173 ;
1174 }
1175 if (!(flags & PFR_FLAG_DUMMY)) {
1176 if (flags & PFR_FLAG_ATOMIC)
1177 s = splsoftnet();
1178 pfr_insert_ktables(&addq);
1179 pfr_setflags_ktables(&changeq);
1180 if (flags & PFR_FLAG_ATOMIC)
1181 splx(s);
1182 } else
1183 pfr_destroy_ktables(&addq, 0);
1184 if (nadd != NULL)
1185 *nadd = xadd;
1186 return (0);
1187 _bad:
1188 pfr_destroy_ktables(&addq, 0);
1189 return (rv);
1190 }
1191
1192 int
1193 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1194 {
1195 struct pfr_ktableworkq workq;
1196 struct pfr_ktable *p, *q, key;
1197 int i, s, xdel = 0;
1198
1199 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1200 SLIST_INIT(&workq);
1201 for (i = 0; i < size; i++) {
1202 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1203 return (EFAULT);
1204 if (pfr_validate_table(&key.pfrkt_t, 0,
1205 flags & PFR_FLAG_USERIOCTL))
1206 return (EINVAL);
1207 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1208 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1209 SLIST_FOREACH(q, &workq, pfrkt_workq)
1210 if (!pfr_ktable_compare(p, q))
1211 goto _skip;
1212 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1213 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1214 xdel++;
1215 }
1216 _skip:
1217 ;
1218 }
1219
1220 if (!(flags & PFR_FLAG_DUMMY)) {
1221 if (flags & PFR_FLAG_ATOMIC)
1222 s = splsoftnet();
1223 pfr_setflags_ktables(&workq);
1224 if (flags & PFR_FLAG_ATOMIC)
1225 splx(s);
1226 }
1227 if (ndel != NULL)
1228 *ndel = xdel;
1229 return (0);
1230 }
1231
1232 int
1233 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1234 int flags)
1235 {
1236 struct pfr_ktable *p;
1237 int n, nn;
1238
1239 ACCEPT_FLAGS(PFR_FLAG_ALLRSETS);
1240 n = nn = pfr_table_count(filter, flags);
1241 if (n < 0)
1242 return (ENOENT);
1243 if (n > *size) {
1244 *size = n;
1245 return (0);
1246 }
1247 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1248 if (pfr_skip_table(filter, p, flags))
1249 continue;
1250 if (n-- <= 0)
1251 continue;
1252 if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl)))
1253 return (EFAULT);
1254 }
1255 if (n) {
1256 printf("pfr_get_tables: corruption detected (%d).\n", n);
1257 return (ENOTTY);
1258 }
1259 *size = nn;
1260 return (0);
1261 }
1262
1263 int
1264 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1265 int flags)
1266 {
1267 struct pfr_ktable *p;
1268 struct pfr_ktableworkq workq;
1269 int s, n, nn;
1270 long tzero = time_second;
1271
1272 ACCEPT_FLAGS(PFR_FLAG_ATOMIC|PFR_FLAG_ALLRSETS);
1273 /* XXX PFR_FLAG_CLSTATS disabled */
1274 n = nn = pfr_table_count(filter, flags);
1275 if (n < 0)
1276 return (ENOENT);
1277 if (n > *size) {
1278 *size = n;
1279 return (0);
1280 }
1281 SLIST_INIT(&workq);
1282 if (flags & PFR_FLAG_ATOMIC)
1283 s = splsoftnet();
1284 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1285 if (pfr_skip_table(filter, p, flags))
1286 continue;
1287 if (n-- <= 0)
1288 continue;
1289 if (!(flags & PFR_FLAG_ATOMIC))
1290 s = splsoftnet();
1291 if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl))) {
1292 splx(s);
1293 return (EFAULT);
1294 }
1295 if (!(flags & PFR_FLAG_ATOMIC))
1296 splx(s);
1297 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1298 }
1299 if (flags & PFR_FLAG_CLSTATS)
1300 pfr_clstats_ktables(&workq, tzero,
1301 flags & PFR_FLAG_ADDRSTOO);
1302 if (flags & PFR_FLAG_ATOMIC)
1303 splx(s);
1304 if (n) {
1305 printf("pfr_get_tstats: corruption detected (%d).\n", n);
1306 return (ENOTTY);
1307 }
1308 *size = nn;
1309 return (0);
1310 }
1311
1312 int
1313 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1314 {
1315 struct pfr_ktableworkq workq;
1316 struct pfr_ktable *p, key;
1317 int i, s, xzero = 0;
1318 long tzero = time_second;
1319
1320 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ADDRSTOO);
1321 SLIST_INIT(&workq);
1322 for (i = 0; i < size; i++) {
1323 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1324 return (EFAULT);
1325 if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1326 return (EINVAL);
1327 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1328 if (p != NULL) {
1329 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1330 xzero++;
1331 }
1332 }
1333 if (!(flags & PFR_FLAG_DUMMY)) {
1334 if (flags & PFR_FLAG_ATOMIC)
1335 s = splsoftnet();
1336 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1337 if (flags & PFR_FLAG_ATOMIC)
1338 splx(s);
1339 }
1340 if (nzero != NULL)
1341 *nzero = xzero;
1342 return (0);
1343 }
1344
1345 int
1346 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1347 int *nchange, int *ndel, int flags)
1348 {
1349 struct pfr_ktableworkq workq;
1350 struct pfr_ktable *p, *q, key;
1351 int i, s, xchange = 0, xdel = 0;
1352
1353 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1354 if ((setflag & ~PFR_TFLAG_USRMASK) ||
1355 (clrflag & ~PFR_TFLAG_USRMASK) ||
1356 (setflag & clrflag))
1357 return (EINVAL);
1358 SLIST_INIT(&workq);
1359 for (i = 0; i < size; i++) {
1360 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1361 return (EFAULT);
1362 if (pfr_validate_table(&key.pfrkt_t, 0,
1363 flags & PFR_FLAG_USERIOCTL))
1364 return (EINVAL);
1365 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1366 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1367 p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1368 ~clrflag;
1369 if (p->pfrkt_nflags == p->pfrkt_flags)
1370 goto _skip;
1371 SLIST_FOREACH(q, &workq, pfrkt_workq)
1372 if (!pfr_ktable_compare(p, q))
1373 goto _skip;
1374 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1375 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1376 (clrflag & PFR_TFLAG_PERSIST) &&
1377 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1378 xdel++;
1379 else
1380 xchange++;
1381 }
1382 _skip:
1383 ;
1384 }
1385 if (!(flags & PFR_FLAG_DUMMY)) {
1386 if (flags & PFR_FLAG_ATOMIC)
1387 s = splsoftnet();
1388 pfr_setflags_ktables(&workq);
1389 if (flags & PFR_FLAG_ATOMIC)
1390 splx(s);
1391 }
1392 if (nchange != NULL)
1393 *nchange = xchange;
1394 if (ndel != NULL)
1395 *ndel = xdel;
1396 return (0);
1397 }
1398
1399 int
1400 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1401 {
1402 struct pfr_ktableworkq workq;
1403 struct pfr_ktable *p;
1404 struct pf_ruleset *rs;
1405 int xdel = 0;
1406
1407 ACCEPT_FLAGS(PFR_FLAG_DUMMY);
1408 rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
1409 if (rs == NULL)
1410 return (ENOMEM);
1411 SLIST_INIT(&workq);
1412 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1413 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1414 pfr_skip_table(trs, p, 0))
1415 continue;
1416 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1417 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1418 xdel++;
1419 }
1420 if (!(flags & PFR_FLAG_DUMMY)) {
1421 pfr_setflags_ktables(&workq);
1422 if (ticket != NULL)
1423 *ticket = ++rs->tticket;
1424 rs->topen = 1;
1425 } else
1426 pf_remove_if_empty_ruleset(rs);
1427 if (ndel != NULL)
1428 *ndel = xdel;
1429 return (0);
1430 }
1431
1432 int
1433 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1434 int *nadd, int *naddr, u_int32_t ticket, int flags)
1435 {
1436 struct pfr_ktableworkq tableq;
1437 struct pfr_kentryworkq addrq;
1438 struct pfr_ktable *kt, *rt, *shadow, key;
1439 struct pfr_kentry *p;
1440 struct pfr_addr ad;
1441 struct pf_ruleset *rs;
1442 int i, rv, xadd = 0, xaddr = 0;
1443
1444 ACCEPT_FLAGS(PFR_FLAG_DUMMY|PFR_FLAG_ADDRSTOO);
1445 if (size && !(flags & PFR_FLAG_ADDRSTOO))
1446 return (EINVAL);
1447 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1448 flags & PFR_FLAG_USERIOCTL))
1449 return (EINVAL);
1450 rs = pf_find_ruleset(tbl->pfrt_anchor);
1451 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1452 return (EBUSY);
1453 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1454 SLIST_INIT(&tableq);
1455 kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1456 if (kt == NULL) {
1457 kt = pfr_create_ktable(tbl, 0, 1);
1458 if (kt == NULL)
1459 return (ENOMEM);
1460 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1461 xadd++;
1462 if (!tbl->pfrt_anchor[0])
1463 goto _skip;
1464
1465 /* find or create root table */
1466 bzero(&key, sizeof(key));
1467 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1468 rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1469 if (rt != NULL) {
1470 kt->pfrkt_root = rt;
1471 goto _skip;
1472 }
1473 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1474 if (rt == NULL) {
1475 pfr_destroy_ktables(&tableq, 0);
1476 return (ENOMEM);
1477 }
1478 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1479 kt->pfrkt_root = rt;
1480 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1481 xadd++;
1482 _skip:
1483 shadow = pfr_create_ktable(tbl, 0, 0);
1484 if (shadow == NULL) {
1485 pfr_destroy_ktables(&tableq, 0);
1486 return (ENOMEM);
1487 }
1488 SLIST_INIT(&addrq);
1489 for (i = 0; i < size; i++) {
1490 if (COPYIN(addr+i, &ad, sizeof(ad)))
1491 senderr(EFAULT);
1492 if (pfr_validate_addr(&ad))
1493 senderr(EINVAL);
1494 if (pfr_lookup_addr(shadow, &ad, 1) != NULL)
1495 continue;
1496 p = pfr_create_kentry(&ad);
1497 if (p == NULL)
1498 senderr(ENOMEM);
1499 if (pfr_route_kentry(shadow, p)) {
1500 pfr_destroy_kentry(p);
1501 continue;
1502 }
1503 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1504 xaddr++;
1505 }
1506 if (!(flags & PFR_FLAG_DUMMY)) {
1507 if (kt->pfrkt_shadow != NULL)
1508 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1509 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1510 pfr_insert_ktables(&tableq);
1511 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1512 xaddr : NO_ADDRESSES;
1513 kt->pfrkt_shadow = shadow;
1514 } else {
1515 pfr_clean_node_mask(shadow, &addrq);
1516 pfr_destroy_ktable(shadow, 0);
1517 pfr_destroy_ktables(&tableq, 0);
1518 pfr_destroy_kentries(&addrq);
1519 }
1520 if (nadd != NULL)
1521 *nadd = xadd;
1522 if (naddr != NULL)
1523 *naddr = xaddr;
1524 return (0);
1525 _bad:
1526 pfr_destroy_ktable(shadow, 0);
1527 pfr_destroy_ktables(&tableq, 0);
1528 pfr_destroy_kentries(&addrq);
1529 return (rv);
1530 }
1531
1532 int
1533 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1534 {
1535 struct pfr_ktableworkq workq;
1536 struct pfr_ktable *p;
1537 struct pf_ruleset *rs;
1538 int xdel = 0;
1539
1540 ACCEPT_FLAGS(PFR_FLAG_DUMMY);
1541 rs = pf_find_ruleset(trs->pfrt_anchor);
1542 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1543 return (0);
1544 SLIST_INIT(&workq);
1545 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1546 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1547 pfr_skip_table(trs, p, 0))
1548 continue;
1549 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1550 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1551 xdel++;
1552 }
1553 if (!(flags & PFR_FLAG_DUMMY)) {
1554 pfr_setflags_ktables(&workq);
1555 rs->topen = 0;
1556 pf_remove_if_empty_ruleset(rs);
1557 }
1558 if (ndel != NULL)
1559 *ndel = xdel;
1560 return (0);
1561 }
1562
1563 int
1564 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1565 int *nchange, int flags)
1566 {
1567 struct pfr_ktable *p, *q;
1568 struct pfr_ktableworkq workq;
1569 struct pf_ruleset *rs;
1570 int s, xadd = 0, xchange = 0;
1571 long tzero = time_second;
1572
1573 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1574 rs = pf_find_ruleset(trs->pfrt_anchor);
1575 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1576 return (EBUSY);
1577
1578 SLIST_INIT(&workq);
1579 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1580 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1581 pfr_skip_table(trs, p, 0))
1582 continue;
1583 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1584 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1585 xchange++;
1586 else
1587 xadd++;
1588 }
1589
1590 if (!(flags & PFR_FLAG_DUMMY)) {
1591 if (flags & PFR_FLAG_ATOMIC)
1592 s = splsoftnet();
1593 for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1594 q = SLIST_NEXT(p, pfrkt_workq);
1595 pfr_commit_ktable(p, tzero);
1596 }
1597 if (flags & PFR_FLAG_ATOMIC)
1598 splx(s);
1599 rs->topen = 0;
1600 pf_remove_if_empty_ruleset(rs);
1601 }
1602 if (nadd != NULL)
1603 *nadd = xadd;
1604 if (nchange != NULL)
1605 *nchange = xchange;
1606
1607 return (0);
1608 }
1609
1610 void
1611 pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1612 {
1613 struct pfr_ktable *shadow = kt->pfrkt_shadow;
1614 int nflags;
1615
1616 if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1617 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1618 pfr_clstats_ktable(kt, tzero, 1);
1619 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1620 /* kt might contain addresses */
1621 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq;
1622 struct pfr_kentry *p, *q, *next;
1623 struct pfr_addr ad;
1624
1625 pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1626 pfr_mark_addrs(kt);
1627 SLIST_INIT(&addq);
1628 SLIST_INIT(&changeq);
1629 SLIST_INIT(&delq);
1630 SLIST_INIT(&garbageq);
1631 pfr_clean_node_mask(shadow, &addrq);
1632 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1633 next = SLIST_NEXT(p, pfrke_workq); /* XXX */
1634 pfr_copyout_addr(&ad, p);
1635 q = pfr_lookup_addr(kt, &ad, 1);
1636 if (q != NULL) {
1637 if (q->pfrke_not != p->pfrke_not)
1638 SLIST_INSERT_HEAD(&changeq, q,
1639 pfrke_workq);
1640 q->pfrke_mark = 1;
1641 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1642 } else {
1643 p->pfrke_tzero = tzero;
1644 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1645 }
1646 }
1647 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1648 pfr_insert_kentries(kt, &addq, tzero);
1649 pfr_remove_kentries(kt, &delq);
1650 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1651 pfr_destroy_kentries(&garbageq);
1652 } else {
1653 /* kt cannot contain addresses */
1654 SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1655 shadow->pfrkt_ip4);
1656 SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1657 shadow->pfrkt_ip6);
1658 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1659 pfr_clstats_ktable(kt, tzero, 1);
1660 }
1661 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1662 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1663 & ~PFR_TFLAG_INACTIVE;
1664 pfr_destroy_ktable(shadow, 0);
1665 kt->pfrkt_shadow = NULL;
1666 pfr_setflags_ktable(kt, nflags);
1667 }
1668
1669 int
1670 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1671 {
1672 int i;
1673
1674 if (!tbl->pfrt_name[0])
1675 return (-1);
1676 if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1677 return (-1);
1678 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1679 return (-1);
1680 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1681 if (tbl->pfrt_name[i])
1682 return (-1);
1683 if (tbl->pfrt_flags & ~allowedflags)
1684 return (-1);
1685 return (0);
1686 }
1687
1688 int
1689 pfr_table_count(struct pfr_table *filter, int flags)
1690 {
1691 struct pf_ruleset *rs;
1692
1693 if (flags & PFR_FLAG_ALLRSETS)
1694 return (pfr_ktable_cnt);
1695 if (filter->pfrt_anchor[0]) {
1696 rs = pf_find_ruleset(filter->pfrt_anchor);
1697 return ((rs != NULL) ? rs->tables : -1);
1698 }
1699 return (pf_main_ruleset.tables);
1700 }
1701
1702 int
1703 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1704 {
1705 if (flags & PFR_FLAG_ALLRSETS)
1706 return (0);
1707 if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1708 return (1);
1709 return (0);
1710 }
1711
1712 void
1713 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1714 {
1715 struct pfr_ktable *p;
1716
1717 SLIST_FOREACH(p, workq, pfrkt_workq)
1718 pfr_insert_ktable(p);
1719 }
1720
1721 void
1722 pfr_insert_ktable(struct pfr_ktable *kt)
1723 {
1724 RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1725 pfr_ktable_cnt++;
1726 if (kt->pfrkt_root != NULL)
1727 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1728 pfr_setflags_ktable(kt->pfrkt_root,
1729 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1730 }
1731
1732 void
1733 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1734 {
1735 struct pfr_ktable *p, *q;
1736
1737 for (p = SLIST_FIRST(workq); p; p = q) {
1738 q = SLIST_NEXT(p, pfrkt_workq);
1739 pfr_setflags_ktable(p, p->pfrkt_nflags);
1740 }
1741 }
1742
1743 void
1744 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1745 {
1746 struct pfr_kentryworkq addrq;
1747
1748 if (!(newf & PFR_TFLAG_REFERENCED) &&
1749 !(newf & PFR_TFLAG_PERSIST))
1750 newf &= ~PFR_TFLAG_ACTIVE;
1751 if (!(newf & PFR_TFLAG_ACTIVE))
1752 newf &= ~PFR_TFLAG_USRMASK;
1753 if (!(newf & PFR_TFLAG_SETMASK)) {
1754 RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
1755 if (kt->pfrkt_root != NULL)
1756 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1757 pfr_setflags_ktable(kt->pfrkt_root,
1758 kt->pfrkt_root->pfrkt_flags &
1759 ~PFR_TFLAG_REFDANCHOR);
1760 pfr_destroy_ktable(kt, 1);
1761 pfr_ktable_cnt--;
1762 return;
1763 }
1764 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1765 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1766 pfr_remove_kentries(kt, &addrq);
1767 }
1768 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1769 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1770 kt->pfrkt_shadow = NULL;
1771 }
1772 kt->pfrkt_flags = newf;
1773 }
1774
1775 void
1776 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1777 {
1778 struct pfr_ktable *p;
1779
1780 SLIST_FOREACH(p, workq, pfrkt_workq)
1781 pfr_clstats_ktable(p, tzero, recurse);
1782 }
1783
1784 void
1785 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1786 {
1787 struct pfr_kentryworkq addrq;
1788 int s;
1789
1790 if (recurse) {
1791 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1792 pfr_clstats_kentries(&addrq, tzero, 0);
1793 }
1794 s = splsoftnet();
1795 bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
1796 bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
1797 kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1798 splx(s);
1799 kt->pfrkt_tzero = tzero;
1800 }
1801
1802 struct pfr_ktable *
1803 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1804 {
1805 struct pfr_ktable *kt;
1806 struct pf_ruleset *rs;
1807
1808 kt = pool_get(&pfr_ktable_pl, PR_NOWAIT);
1809 if (kt == NULL)
1810 return (NULL);
1811 bzero(kt, sizeof(*kt));
1812 kt->pfrkt_t = *tbl;
1813
1814 if (attachruleset) {
1815 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
1816 if (!rs) {
1817 pfr_destroy_ktable(kt, 0);
1818 return (NULL);
1819 }
1820 kt->pfrkt_rs = rs;
1821 rs->tables++;
1822 }
1823
1824 if (!rn_inithead((void **)&kt->pfrkt_ip4,
1825 offsetof(struct sockaddr_in, sin_addr) * 8) ||
1826 !rn_inithead((void **)&kt->pfrkt_ip6,
1827 offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1828 pfr_destroy_ktable(kt, 0);
1829 return (NULL);
1830 }
1831 kt->pfrkt_tzero = tzero;
1832
1833 return (kt);
1834 }
1835
1836 void
1837 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1838 {
1839 struct pfr_ktable *p, *q;
1840
1841 for (p = SLIST_FIRST(workq); p; p = q) {
1842 q = SLIST_NEXT(p, pfrkt_workq);
1843 pfr_destroy_ktable(p, flushaddr);
1844 }
1845 }
1846
1847 void
1848 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1849 {
1850 struct pfr_kentryworkq addrq;
1851
1852 if (flushaddr) {
1853 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1854 pfr_clean_node_mask(kt, &addrq);
1855 pfr_destroy_kentries(&addrq);
1856 }
1857 if (kt->pfrkt_ip4 != NULL)
1858 free((caddr_t)kt->pfrkt_ip4, M_RTABLE);
1859 if (kt->pfrkt_ip6 != NULL)
1860 free((caddr_t)kt->pfrkt_ip6, M_RTABLE);
1861 if (kt->pfrkt_shadow != NULL)
1862 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1863 if (kt->pfrkt_rs != NULL) {
1864 kt->pfrkt_rs->tables--;
1865 pf_remove_if_empty_ruleset(kt->pfrkt_rs);
1866 }
1867 pool_put(&pfr_ktable_pl, kt);
1868 }
1869
1870 int
1871 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
1872 {
1873 int d;
1874
1875 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
1876 return (d);
1877 return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
1878 }
1879
1880 struct pfr_ktable *
1881 pfr_lookup_table(struct pfr_table *tbl)
1882 {
1883 /* struct pfr_ktable start like a struct pfr_table */
1884 return (RB_FIND(pfr_ktablehead, &pfr_ktables,
1885 (struct pfr_ktable *)tbl));
1886 }
1887
1888 int
1889 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
1890 {
1891 struct pfr_kentry *ke = NULL;
1892 int match;
1893
1894 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1895 kt = kt->pfrkt_root;
1896 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1897 return (0);
1898
1899 switch (af) {
1900 #ifdef INET
1901 case AF_INET:
1902 pfr_sin.sin_addr.s_addr = a->addr32[0];
1903 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
1904 if (ke && KENTRY_RNF_ROOT(ke))
1905 ke = NULL;
1906 break;
1907 #endif /* INET */
1908 #ifdef INET6
1909 case AF_INET6:
1910 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
1911 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
1912 if (ke && KENTRY_RNF_ROOT(ke))
1913 ke = NULL;
1914 break;
1915 #endif /* INET6 */
1916 }
1917 match = (ke && !ke->pfrke_not);
1918 if (match)
1919 kt->pfrkt_match++;
1920 else
1921 kt->pfrkt_nomatch++;
1922 return (match);
1923 }
1924
1925 void
1926 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
1927 u_int64_t len, int dir_out, int op_pass, int notrule)
1928 {
1929 struct pfr_kentry *ke = NULL;
1930
1931 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1932 kt = kt->pfrkt_root;
1933 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1934 return;
1935
1936 switch (af) {
1937 #ifdef INET
1938 case AF_INET:
1939 pfr_sin.sin_addr.s_addr = a->addr32[0];
1940 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
1941 if (ke && KENTRY_RNF_ROOT(ke))
1942 ke = NULL;
1943 break;
1944 #endif /* INET */
1945 #ifdef INET6
1946 case AF_INET6:
1947 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
1948 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
1949 if (ke && KENTRY_RNF_ROOT(ke))
1950 ke = NULL;
1951 break;
1952 #endif /* INET6 */
1953 default:
1954 ;
1955 }
1956 if ((ke == NULL || ke->pfrke_not) != notrule) {
1957 if (op_pass != PFR_OP_PASS)
1958 printf("pfr_update_stats: assertion failed.\n");
1959 op_pass = PFR_OP_XPASS;
1960 }
1961 kt->pfrkt_packets[dir_out][op_pass]++;
1962 kt->pfrkt_bytes[dir_out][op_pass] += len;
1963 if (ke != NULL && op_pass != PFR_OP_XPASS) {
1964 ke->pfrke_packets[dir_out][op_pass]++;
1965 ke->pfrke_bytes[dir_out][op_pass] += len;
1966 }
1967 }
1968
1969 struct pfr_ktable *
1970 pfr_attach_table(struct pf_ruleset *rs, char *name)
1971 {
1972 struct pfr_ktable *kt, *rt;
1973 struct pfr_table tbl;
1974 struct pf_anchor *ac = rs->anchor;
1975
1976 bzero(&tbl, sizeof(tbl));
1977 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
1978 if (ac != NULL)
1979 strlcpy(tbl.pfrt_anchor, ac->name, sizeof(tbl.pfrt_anchor));
1980 kt = pfr_lookup_table(&tbl);
1981 if (kt == NULL) {
1982 kt = pfr_create_ktable(&tbl, time_second, 1);
1983 if (kt == NULL)
1984 return (NULL);
1985 if (ac != NULL) {
1986 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
1987 rt = pfr_lookup_table(&tbl);
1988 if (rt == NULL) {
1989 rt = pfr_create_ktable(&tbl, 0, 1);
1990 if (rt == NULL) {
1991 pfr_destroy_ktable(kt, 0);
1992 return (NULL);
1993 }
1994 pfr_insert_ktable(rt);
1995 }
1996 kt->pfrkt_root = rt;
1997 }
1998 pfr_insert_ktable(kt);
1999 }
2000 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2001 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2002 return (kt);
2003 }
2004
2005 void
2006 pfr_detach_table(struct pfr_ktable *kt)
2007 {
2008 if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
2009 printf("pfr_detach_table: refcount = %d.\n",
2010 kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
2011 else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2012 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2013 }
2014
2015 int
2016 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2017 struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af)
2018 {
2019 struct pfr_kentry *ke, *ke2;
2020 struct pf_addr *addr;
2021 union sockaddr_union mask;
2022 int idx = -1, use_counter = 0;
2023
2024 if (af == AF_INET)
2025 addr = (struct pf_addr *)&pfr_sin.sin_addr;
2026 else if (af == AF_INET6)
2027 addr = (struct pf_addr *)&pfr_sin6.sin6_addr;
2028 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2029 kt = kt->pfrkt_root;
2030 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2031 return (-1);
2032
2033 if (pidx != NULL)
2034 idx = *pidx;
2035 if (counter != NULL && idx >= 0)
2036 use_counter = 1;
2037 if (idx < 0)
2038 idx = 0;
2039
2040 _next_block:
2041 ke = pfr_kentry_byidx(kt, idx, af);
2042 if (ke == NULL)
2043 return (1);
2044 pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
2045 *raddr = SUNION2PF(&ke->pfrke_sa, af);
2046 *rmask = SUNION2PF(&pfr_mask, af);
2047
2048 if (use_counter) {
2049 /* is supplied address within block? */
2050 if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
2051 /* no, go to next block in table */
2052 idx++;
2053 use_counter = 0;
2054 goto _next_block;
2055 }
2056 PF_ACPY(addr, counter, af);
2057 } else {
2058 /* use first address of block */
2059 PF_ACPY(addr, *raddr, af);
2060 }
2061
2062 if (!KENTRY_NETWORK(ke)) {
2063 /* this is a single IP address - no possible nested block */
2064 PF_ACPY(counter, addr, af);
2065 *pidx = idx;
2066 return (0);
2067 }
2068 for (;;) {
2069 /* we don't want to use a nested block */
2070 if (af == AF_INET)
2071 ke2 = (struct pfr_kentry *)rn_match(&pfr_sin,
2072 kt->pfrkt_ip4);
2073 else if (af == AF_INET6)
2074 ke2 = (struct pfr_kentry *)rn_match(&pfr_sin6,
2075 kt->pfrkt_ip6);
2076 /* no need to check KENTRY_RNF_ROOT() here */
2077 if (ke2 == ke) {
2078 /* lookup return the same block - perfect */
2079 PF_ACPY(counter, addr, af);
2080 *pidx = idx;
2081 return (0);
2082 }
2083
2084 /* we need to increase the counter past the nested block */
2085 pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
2086 PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
2087 PF_AINC(addr, af);
2088 if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
2089 /* ok, we reached the end of our main block */
2090 /* go to next block in table */
2091 idx++;
2092 use_counter = 0;
2093 goto _next_block;
2094 }
2095 }
2096 }
2097
2098 struct pfr_kentry *
2099 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2100 {
2101 struct pfr_walktree w;
2102
2103 bzero(&w, sizeof(w));
2104 w.pfrw_op = PFRW_POOL_GET;
2105 w.pfrw_cnt = idx;
2106
2107 switch (af) {
2108 #ifdef INET
2109 case AF_INET:
2110 rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2111 return (w.pfrw_kentry);
2112 #endif /* INET */
2113 #ifdef INET6
2114 case AF_INET6:
2115 rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2116 return (w.pfrw_kentry);
2117 #endif /* INET6 */
2118 default:
2119 return (NULL);
2120 }
2121 }
2122
2123 void
2124 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2125 {
2126 struct pfr_walktree w;
2127 int s;
2128
2129 bzero(&w, sizeof(w));
2130 w.pfrw_op = PFRW_DYNADDR_UPDATE;
2131 w.pfrw_dyn = dyn;
2132
2133 s = splsoftnet();
2134 dyn->pfid_acnt4 = 0;
2135 dyn->pfid_acnt6 = 0;
2136 if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2137 rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2138 if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2139 rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2140 splx(s);
2141 }
2142