pf_table.c revision 1.11 1 /* $NetBSD: pf_table.c,v 1.11 2007/03/04 06:02:58 christos Exp $ */
2 /* $OpenBSD: pf_table.c,v 1.62 2004/12/07 18:02:04 mcbride Exp $ */
3
4 /*
5 * Copyright (c) 2002 Cedric Berger
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * - Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * - Redistributions in binary form must reproduce the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer in the documentation and/or other materials provided
17 * with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
34 #ifdef _KERNEL_OPT
35 #include "opt_inet.h"
36 #endif
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/socket.h>
41 #include <sys/mbuf.h>
42 #include <sys/kernel.h>
43
44 #include <net/if.h>
45 #include <net/route.h>
46 #include <netinet/in.h>
47 #ifdef __OpenBSD__
48 #include <netinet/ip_ipsp.h>
49 #endif
50 #include <net/pfvar.h>
51
52 #define ACCEPT_FLAGS(oklist) \
53 do { \
54 if ((flags & ~(oklist)) & \
55 PFR_FLAG_ALLMASK) \
56 return (EINVAL); \
57 } while (0)
58
59 #define COPYIN(from, to, size) \
60 ((flags & PFR_FLAG_USERIOCTL) ? \
61 copyin((from), (to), (size)) : \
62 (bcopy((from), (to), (size)), 0))
63
64 #define COPYOUT(from, to, size) \
65 ((flags & PFR_FLAG_USERIOCTL) ? \
66 copyout((from), (to), (size)) : \
67 (bcopy((from), (to), (size)), 0))
68
69 #define FILLIN_SIN(sin, addr) \
70 do { \
71 (sin).sin_len = sizeof(sin); \
72 (sin).sin_family = AF_INET; \
73 (sin).sin_addr = (addr); \
74 } while (0)
75
76 #define FILLIN_SIN6(sin6, addr) \
77 do { \
78 (sin6).sin6_len = sizeof(sin6); \
79 (sin6).sin6_family = AF_INET6; \
80 (sin6).sin6_addr = (addr); \
81 } while (0)
82
83 #define SWAP(type, a1, a2) \
84 do { \
85 type tmp = a1; \
86 a1 = a2; \
87 a2 = tmp; \
88 } while (0)
89
90 #define SUNION2PF(su, af) (((af)==AF_INET) ? \
91 (struct pf_addr *)&(su)->sin.sin_addr : \
92 (struct pf_addr *)&(su)->sin6.sin6_addr)
93
94 #define AF_BITS(af) (((af)==AF_INET)?32:128)
95 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
96 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
97 #define KENTRY_RNF_ROOT(ke) \
98 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
99
100 #define NO_ADDRESSES (-1)
101 #define ENQUEUE_UNMARKED_ONLY (1)
102 #define INVERT_NEG_FLAG (1)
103
104 struct pfr_walktree {
105 enum pfrw_op {
106 PFRW_MARK,
107 PFRW_SWEEP,
108 PFRW_ENQUEUE,
109 PFRW_GET_ADDRS,
110 PFRW_GET_ASTATS,
111 PFRW_POOL_GET,
112 PFRW_DYNADDR_UPDATE
113 } pfrw_op;
114 union {
115 struct pfr_addr *pfrw1_addr;
116 struct pfr_astats *pfrw1_astats;
117 struct pfr_kentryworkq *pfrw1_workq;
118 struct pfr_kentry *pfrw1_kentry;
119 struct pfi_dynaddr *pfrw1_dyn;
120 } pfrw_1;
121 int pfrw_free;
122 int pfrw_flags;
123 };
124 #define pfrw_addr pfrw_1.pfrw1_addr
125 #define pfrw_astats pfrw_1.pfrw1_astats
126 #define pfrw_workq pfrw_1.pfrw1_workq
127 #define pfrw_kentry pfrw_1.pfrw1_kentry
128 #define pfrw_dyn pfrw_1.pfrw1_dyn
129 #define pfrw_cnt pfrw_free
130
131 #define senderr(e) do { rv = (e); goto _bad; } while (0)
132
133 struct pool pfr_ktable_pl;
134 struct pool pfr_kentry_pl;
135 struct pool pfr_kentry_pl2;
136 struct sockaddr_in pfr_sin;
137 struct sockaddr_in6 pfr_sin6;
138 union sockaddr_union pfr_mask;
139 struct pf_addr pfr_ffaddr;
140
141 void pfr_copyout_addr(struct pfr_addr *,
142 struct pfr_kentry *ke);
143 int pfr_validate_addr(struct pfr_addr *);
144 void pfr_enqueue_addrs(struct pfr_ktable *,
145 struct pfr_kentryworkq *, int *, int);
146 void pfr_mark_addrs(struct pfr_ktable *);
147 struct pfr_kentry *pfr_lookup_addr(struct pfr_ktable *,
148 struct pfr_addr *, int);
149 struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, int);
150 void pfr_destroy_kentries(struct pfr_kentryworkq *);
151 void pfr_destroy_kentry(struct pfr_kentry *);
152 void pfr_insert_kentries(struct pfr_ktable *,
153 struct pfr_kentryworkq *, long);
154 void pfr_remove_kentries(struct pfr_ktable *,
155 struct pfr_kentryworkq *);
156 void pfr_clstats_kentries(struct pfr_kentryworkq *, long,
157 int);
158 void pfr_reset_feedback(struct pfr_addr *, int, int);
159 void pfr_prepare_network(union sockaddr_union *, int, int);
160 int pfr_route_kentry(struct pfr_ktable *,
161 struct pfr_kentry *);
162 int pfr_unroute_kentry(struct pfr_ktable *,
163 struct pfr_kentry *);
164 int pfr_walktree(struct radix_node *, void *);
165 int pfr_validate_table(struct pfr_table *, int, int);
166 int pfr_fix_anchor(char *);
167 void pfr_commit_ktable(struct pfr_ktable *, long);
168 void pfr_insert_ktables(struct pfr_ktableworkq *);
169 void pfr_insert_ktable(struct pfr_ktable *);
170 void pfr_setflags_ktables(struct pfr_ktableworkq *);
171 void pfr_setflags_ktable(struct pfr_ktable *, int);
172 void pfr_clstats_ktables(struct pfr_ktableworkq *, long,
173 int);
174 void pfr_clstats_ktable(struct pfr_ktable *, long, int);
175 struct pfr_ktable *pfr_create_ktable(struct pfr_table *, long, int);
176 void pfr_destroy_ktables(struct pfr_ktableworkq *, int);
177 void pfr_destroy_ktable(struct pfr_ktable *, int);
178 int pfr_ktable_compare(struct pfr_ktable *,
179 struct pfr_ktable *);
180 struct pfr_ktable *pfr_lookup_table(struct pfr_table *);
181 void pfr_clean_node_mask(struct pfr_ktable *,
182 struct pfr_kentryworkq *);
183 int pfr_table_count(struct pfr_table *, int);
184 int pfr_skip_table(struct pfr_table *,
185 struct pfr_ktable *, int);
186 struct pfr_kentry *pfr_kentry_byidx(struct pfr_ktable *, int, int);
187
188 RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
189 RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
190
191 struct pfr_ktablehead pfr_ktables;
192 struct pfr_table pfr_nulltable;
193 int pfr_ktable_cnt;
194
195 void
196 pfr_initialize(void)
197 {
198 pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0,
199 "pfrktable", &pool_allocator_oldnointr);
200 pool_init(&pfr_kentry_pl, sizeof(struct pfr_kentry), 0, 0, 0,
201 "pfrkentry", &pool_allocator_oldnointr);
202 pool_init(&pfr_kentry_pl2, sizeof(struct pfr_kentry), 0, 0, 0,
203 "pfrkentry2", NULL);
204
205 pfr_sin.sin_len = sizeof(pfr_sin);
206 pfr_sin.sin_family = AF_INET;
207 pfr_sin6.sin6_len = sizeof(pfr_sin6);
208 pfr_sin6.sin6_family = AF_INET6;
209
210 memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr));
211 }
212
213 #ifdef _LKM
214 void
215 pfr_destroy(void)
216 {
217 pool_destroy(&pfr_ktable_pl);
218 pool_destroy(&pfr_kentry_pl);
219 pool_destroy(&pfr_kentry_pl2);
220 }
221 #endif
222
223 int
224 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
225 {
226 struct pfr_ktable *kt;
227 struct pfr_kentryworkq workq;
228 int s = 0;
229
230 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
231 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
232 return (EINVAL);
233 kt = pfr_lookup_table(tbl);
234 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
235 return (ESRCH);
236 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
237 return (EPERM);
238 pfr_enqueue_addrs(kt, &workq, ndel, 0);
239
240 if (!(flags & PFR_FLAG_DUMMY)) {
241 if (flags & PFR_FLAG_ATOMIC)
242 s = splsoftnet();
243 pfr_remove_kentries(kt, &workq);
244 if (flags & PFR_FLAG_ATOMIC)
245 splx(s);
246 if (kt->pfrkt_cnt) {
247 printf("pfr_clr_addrs: corruption detected (%d).\n",
248 kt->pfrkt_cnt);
249 kt->pfrkt_cnt = 0;
250 }
251 }
252 return (0);
253 }
254
255 int
256 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
257 int *nadd, int flags)
258 {
259 struct pfr_ktable *kt, *tmpkt;
260 struct pfr_kentryworkq workq;
261 struct pfr_kentry *p, *q;
262 struct pfr_addr ad;
263 int i, rv, s = 0 /* XXX gcc */, xadd = 0;
264 long tzero = time_second;
265
266 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
267 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
268 return (EINVAL);
269 kt = pfr_lookup_table(tbl);
270 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
271 return (ESRCH);
272 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
273 return (EPERM);
274 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
275 if (tmpkt == NULL)
276 return (ENOMEM);
277 SLIST_INIT(&workq);
278 for (i = 0; i < size; i++) {
279 if (COPYIN(addr+i, &ad, sizeof(ad)))
280 senderr(EFAULT);
281 if (pfr_validate_addr(&ad))
282 senderr(EINVAL);
283 p = pfr_lookup_addr(kt, &ad, 1);
284 q = pfr_lookup_addr(tmpkt, &ad, 1);
285 if (flags & PFR_FLAG_FEEDBACK) {
286 if (q != NULL)
287 ad.pfra_fback = PFR_FB_DUPLICATE;
288 else if (p == NULL)
289 ad.pfra_fback = PFR_FB_ADDED;
290 else if (p->pfrke_not != ad.pfra_not)
291 ad.pfra_fback = PFR_FB_CONFLICT;
292 else
293 ad.pfra_fback = PFR_FB_NONE;
294 }
295 if (p == NULL && q == NULL) {
296 p = pfr_create_kentry(&ad, 0);
297 if (p == NULL)
298 senderr(ENOMEM);
299 if (pfr_route_kentry(tmpkt, p)) {
300 pfr_destroy_kentry(p);
301 ad.pfra_fback = PFR_FB_NONE;
302 } else {
303 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
304 xadd++;
305 }
306 }
307 if (flags & PFR_FLAG_FEEDBACK)
308 if (COPYOUT(&ad, addr+i, sizeof(ad)))
309 senderr(EFAULT);
310 }
311 pfr_clean_node_mask(tmpkt, &workq);
312 if (!(flags & PFR_FLAG_DUMMY)) {
313 if (flags & PFR_FLAG_ATOMIC)
314 s = splsoftnet();
315 pfr_insert_kentries(kt, &workq, tzero);
316 if (flags & PFR_FLAG_ATOMIC)
317 splx(s);
318 } else
319 pfr_destroy_kentries(&workq);
320 if (nadd != NULL)
321 *nadd = xadd;
322 pfr_destroy_ktable(tmpkt, 0);
323 return (0);
324 _bad:
325 pfr_clean_node_mask(tmpkt, &workq);
326 pfr_destroy_kentries(&workq);
327 if (flags & PFR_FLAG_FEEDBACK)
328 pfr_reset_feedback(addr, size, flags);
329 pfr_destroy_ktable(tmpkt, 0);
330 return (rv);
331 }
332
333 int
334 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
335 int *ndel, int flags)
336 {
337 struct pfr_ktable *kt;
338 struct pfr_kentryworkq workq;
339 struct pfr_kentry *p;
340 struct pfr_addr ad;
341 int i, rv, s = 0 /* XXX gcc */, xdel = 0, log = 1;
342
343 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
344 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
345 return (EINVAL);
346 kt = pfr_lookup_table(tbl);
347 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
348 return (ESRCH);
349 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
350 return (EPERM);
351 /*
352 * there are two algorithms to choose from here.
353 * with:
354 * n: number of addresses to delete
355 * N: number of addresses in the table
356 *
357 * one is O(N) and is better for large 'n'
358 * one is O(n*LOG(N)) and is better for small 'n'
359 *
360 * following code try to decide which one is best.
361 */
362 for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
363 log++;
364 if (size > kt->pfrkt_cnt/log) {
365 /* full table scan */
366 pfr_mark_addrs(kt);
367 } else {
368 /* iterate over addresses to delete */
369 for (i = 0; i < size; i++) {
370 if (COPYIN(addr+i, &ad, sizeof(ad)))
371 return (EFAULT);
372 if (pfr_validate_addr(&ad))
373 return (EINVAL);
374 p = pfr_lookup_addr(kt, &ad, 1);
375 if (p != NULL)
376 p->pfrke_mark = 0;
377 }
378 }
379 SLIST_INIT(&workq);
380 for (i = 0; i < size; i++) {
381 if (COPYIN(addr+i, &ad, sizeof(ad)))
382 senderr(EFAULT);
383 if (pfr_validate_addr(&ad))
384 senderr(EINVAL);
385 p = pfr_lookup_addr(kt, &ad, 1);
386 if (flags & PFR_FLAG_FEEDBACK) {
387 if (p == NULL)
388 ad.pfra_fback = PFR_FB_NONE;
389 else if (p->pfrke_not != ad.pfra_not)
390 ad.pfra_fback = PFR_FB_CONFLICT;
391 else if (p->pfrke_mark)
392 ad.pfra_fback = PFR_FB_DUPLICATE;
393 else
394 ad.pfra_fback = PFR_FB_DELETED;
395 }
396 if (p != NULL && p->pfrke_not == ad.pfra_not &&
397 !p->pfrke_mark) {
398 p->pfrke_mark = 1;
399 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
400 xdel++;
401 }
402 if (flags & PFR_FLAG_FEEDBACK)
403 if (COPYOUT(&ad, addr+i, sizeof(ad)))
404 senderr(EFAULT);
405 }
406 if (!(flags & PFR_FLAG_DUMMY)) {
407 if (flags & PFR_FLAG_ATOMIC)
408 s = splsoftnet();
409 pfr_remove_kentries(kt, &workq);
410 if (flags & PFR_FLAG_ATOMIC)
411 splx(s);
412 }
413 if (ndel != NULL)
414 *ndel = xdel;
415 return (0);
416 _bad:
417 if (flags & PFR_FLAG_FEEDBACK)
418 pfr_reset_feedback(addr, size, flags);
419 return (rv);
420 }
421
422 int
423 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
424 int *size2, int *nadd, int *ndel, int *nchange, int flags)
425 {
426 struct pfr_ktable *kt, *tmpkt;
427 struct pfr_kentryworkq addq, delq, changeq;
428 struct pfr_kentry *p, *q;
429 struct pfr_addr ad;
430 int i, rv, s = 0 /* XXX gcc */, xadd = 0, xdel = 0,
431 xchange = 0;
432 long tzero = time_second;
433
434 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
435 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
436 return (EINVAL);
437 kt = pfr_lookup_table(tbl);
438 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
439 return (ESRCH);
440 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
441 return (EPERM);
442 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
443 if (tmpkt == NULL)
444 return (ENOMEM);
445 pfr_mark_addrs(kt);
446 SLIST_INIT(&addq);
447 SLIST_INIT(&delq);
448 SLIST_INIT(&changeq);
449 for (i = 0; i < size; i++) {
450 if (COPYIN(addr+i, &ad, sizeof(ad)))
451 senderr(EFAULT);
452 if (pfr_validate_addr(&ad))
453 senderr(EINVAL);
454 ad.pfra_fback = PFR_FB_NONE;
455 p = pfr_lookup_addr(kt, &ad, 1);
456 if (p != NULL) {
457 if (p->pfrke_mark) {
458 ad.pfra_fback = PFR_FB_DUPLICATE;
459 goto _skip;
460 }
461 p->pfrke_mark = 1;
462 if (p->pfrke_not != ad.pfra_not) {
463 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
464 ad.pfra_fback = PFR_FB_CHANGED;
465 xchange++;
466 }
467 } else {
468 q = pfr_lookup_addr(tmpkt, &ad, 1);
469 if (q != NULL) {
470 ad.pfra_fback = PFR_FB_DUPLICATE;
471 goto _skip;
472 }
473 p = pfr_create_kentry(&ad, 0);
474 if (p == NULL)
475 senderr(ENOMEM);
476 if (pfr_route_kentry(tmpkt, p)) {
477 pfr_destroy_kentry(p);
478 ad.pfra_fback = PFR_FB_NONE;
479 } else {
480 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
481 ad.pfra_fback = PFR_FB_ADDED;
482 xadd++;
483 }
484 }
485 _skip:
486 if (flags & PFR_FLAG_FEEDBACK)
487 if (COPYOUT(&ad, addr+i, sizeof(ad)))
488 senderr(EFAULT);
489 }
490 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
491 if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
492 if (*size2 < size+xdel) {
493 *size2 = size+xdel;
494 senderr(0);
495 }
496 i = 0;
497 SLIST_FOREACH(p, &delq, pfrke_workq) {
498 pfr_copyout_addr(&ad, p);
499 ad.pfra_fback = PFR_FB_DELETED;
500 if (COPYOUT(&ad, addr+size+i, sizeof(ad)))
501 senderr(EFAULT);
502 i++;
503 }
504 }
505 pfr_clean_node_mask(tmpkt, &addq);
506 if (!(flags & PFR_FLAG_DUMMY)) {
507 if (flags & PFR_FLAG_ATOMIC)
508 s = splsoftnet();
509 pfr_insert_kentries(kt, &addq, tzero);
510 pfr_remove_kentries(kt, &delq);
511 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
512 if (flags & PFR_FLAG_ATOMIC)
513 splx(s);
514 } else
515 pfr_destroy_kentries(&addq);
516 if (nadd != NULL)
517 *nadd = xadd;
518 if (ndel != NULL)
519 *ndel = xdel;
520 if (nchange != NULL)
521 *nchange = xchange;
522 if ((flags & PFR_FLAG_FEEDBACK) && size2)
523 *size2 = size+xdel;
524 pfr_destroy_ktable(tmpkt, 0);
525 return (0);
526 _bad:
527 pfr_clean_node_mask(tmpkt, &addq);
528 pfr_destroy_kentries(&addq);
529 if (flags & PFR_FLAG_FEEDBACK)
530 pfr_reset_feedback(addr, size, flags);
531 pfr_destroy_ktable(tmpkt, 0);
532 return (rv);
533 }
534
535 int
536 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
537 int *nmatch, int flags)
538 {
539 struct pfr_ktable *kt;
540 struct pfr_kentry *p;
541 struct pfr_addr ad;
542 int i, xmatch = 0;
543
544 ACCEPT_FLAGS(PFR_FLAG_REPLACE);
545 if (pfr_validate_table(tbl, 0, 0))
546 return (EINVAL);
547 kt = pfr_lookup_table(tbl);
548 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
549 return (ESRCH);
550
551 for (i = 0; i < size; i++) {
552 if (COPYIN(addr+i, &ad, sizeof(ad)))
553 return (EFAULT);
554 if (pfr_validate_addr(&ad))
555 return (EINVAL);
556 if (ADDR_NETWORK(&ad))
557 return (EINVAL);
558 p = pfr_lookup_addr(kt, &ad, 0);
559 if (flags & PFR_FLAG_REPLACE)
560 pfr_copyout_addr(&ad, p);
561 ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
562 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
563 if (p != NULL && !p->pfrke_not)
564 xmatch++;
565 if (COPYOUT(&ad, addr+i, sizeof(ad)))
566 return (EFAULT);
567 }
568 if (nmatch != NULL)
569 *nmatch = xmatch;
570 return (0);
571 }
572
573 int
574 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
575 int flags)
576 {
577 struct pfr_ktable *kt;
578 struct pfr_walktree w;
579 int rv;
580
581 ACCEPT_FLAGS(0);
582 if (pfr_validate_table(tbl, 0, 0))
583 return (EINVAL);
584 kt = pfr_lookup_table(tbl);
585 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
586 return (ESRCH);
587 if (kt->pfrkt_cnt > *size) {
588 *size = kt->pfrkt_cnt;
589 return (0);
590 }
591
592 bzero(&w, sizeof(w));
593 w.pfrw_op = PFRW_GET_ADDRS;
594 w.pfrw_addr = addr;
595 w.pfrw_free = kt->pfrkt_cnt;
596 w.pfrw_flags = flags;
597 rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
598 if (!rv)
599 rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
600 if (rv)
601 return (rv);
602
603 if (w.pfrw_free) {
604 printf("pfr_get_addrs: corruption detected (%d).\n",
605 w.pfrw_free);
606 return (ENOTTY);
607 }
608 *size = kt->pfrkt_cnt;
609 return (0);
610 }
611
612 int
613 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
614 int flags)
615 {
616 struct pfr_ktable *kt;
617 struct pfr_walktree w;
618 struct pfr_kentryworkq workq;
619 int rv, s = 0 /* XXX gcc */;
620 long tzero = time_second;
621
622 ACCEPT_FLAGS(PFR_FLAG_ATOMIC); /* XXX PFR_FLAG_CLSTATS disabled */
623 if (pfr_validate_table(tbl, 0, 0))
624 return (EINVAL);
625 kt = pfr_lookup_table(tbl);
626 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
627 return (ESRCH);
628 if (kt->pfrkt_cnt > *size) {
629 *size = kt->pfrkt_cnt;
630 return (0);
631 }
632
633 bzero(&w, sizeof(w));
634 w.pfrw_op = PFRW_GET_ASTATS;
635 w.pfrw_astats = addr;
636 w.pfrw_free = kt->pfrkt_cnt;
637 w.pfrw_flags = flags;
638 if (flags & PFR_FLAG_ATOMIC)
639 s = splsoftnet();
640 rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
641 if (!rv)
642 rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
643 if (!rv && (flags & PFR_FLAG_CLSTATS)) {
644 pfr_enqueue_addrs(kt, &workq, NULL, 0);
645 pfr_clstats_kentries(&workq, tzero, 0);
646 }
647 if (flags & PFR_FLAG_ATOMIC)
648 splx(s);
649 if (rv)
650 return (rv);
651
652 if (w.pfrw_free) {
653 printf("pfr_get_astats: corruption detected (%d).\n",
654 w.pfrw_free);
655 return (ENOTTY);
656 }
657 *size = kt->pfrkt_cnt;
658 return (0);
659 }
660
661 int
662 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
663 int *nzero, int flags)
664 {
665 struct pfr_ktable *kt;
666 struct pfr_kentryworkq workq;
667 struct pfr_kentry *p;
668 struct pfr_addr ad;
669 int i, rv, s = 0, xzero = 0;
670
671 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
672 if (pfr_validate_table(tbl, 0, 0))
673 return (EINVAL);
674 kt = pfr_lookup_table(tbl);
675 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
676 return (ESRCH);
677 SLIST_INIT(&workq);
678 for (i = 0; i < size; i++) {
679 if (COPYIN(addr+i, &ad, sizeof(ad)))
680 senderr(EFAULT);
681 if (pfr_validate_addr(&ad))
682 senderr(EINVAL);
683 p = pfr_lookup_addr(kt, &ad, 1);
684 if (flags & PFR_FLAG_FEEDBACK) {
685 ad.pfra_fback = (p != NULL) ?
686 PFR_FB_CLEARED : PFR_FB_NONE;
687 if (COPYOUT(&ad, addr+i, sizeof(ad)))
688 senderr(EFAULT);
689 }
690 if (p != NULL) {
691 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
692 xzero++;
693 }
694 }
695
696 if (!(flags & PFR_FLAG_DUMMY)) {
697 if (flags & PFR_FLAG_ATOMIC)
698 s = splsoftnet();
699 pfr_clstats_kentries(&workq, 0, 0);
700 if (flags & PFR_FLAG_ATOMIC)
701 splx(s);
702 }
703 if (nzero != NULL)
704 *nzero = xzero;
705 return (0);
706 _bad:
707 if (flags & PFR_FLAG_FEEDBACK)
708 pfr_reset_feedback(addr, size, flags);
709 return (rv);
710 }
711
712 int
713 pfr_validate_addr(struct pfr_addr *ad)
714 {
715 int i;
716
717 switch (ad->pfra_af) {
718 #ifdef INET
719 case AF_INET:
720 if (ad->pfra_net > 32)
721 return (-1);
722 break;
723 #endif /* INET */
724 #ifdef INET6
725 case AF_INET6:
726 if (ad->pfra_net > 128)
727 return (-1);
728 break;
729 #endif /* INET6 */
730 default:
731 return (-1);
732 }
733 if (ad->pfra_net < 128 &&
734 (((char *)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
735 return (-1);
736 for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
737 if (((char *)ad)[i])
738 return (-1);
739 if (ad->pfra_not && ad->pfra_not != 1)
740 return (-1);
741 if (ad->pfra_fback)
742 return (-1);
743 return (0);
744 }
745
746 void
747 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
748 int *naddr, int sweep)
749 {
750 struct pfr_walktree w;
751
752 SLIST_INIT(workq);
753 bzero(&w, sizeof(w));
754 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
755 w.pfrw_workq = workq;
756 if (kt->pfrkt_ip4 != NULL)
757 if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
758 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
759 if (kt->pfrkt_ip6 != NULL)
760 if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
761 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
762 if (naddr != NULL)
763 *naddr = w.pfrw_cnt;
764 }
765
766 void
767 pfr_mark_addrs(struct pfr_ktable *kt)
768 {
769 struct pfr_walktree w;
770
771 bzero(&w, sizeof(w));
772 w.pfrw_op = PFRW_MARK;
773 if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
774 printf("pfr_mark_addrs: IPv4 walktree failed.\n");
775 if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
776 printf("pfr_mark_addrs: IPv6 walktree failed.\n");
777 }
778
779
780 struct pfr_kentry *
781 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
782 {
783 union sockaddr_union sa, mask;
784 struct radix_node_head *head = (void *)0xdeadb;
785 struct pfr_kentry *ke;
786 int s;
787
788 bzero(&sa, sizeof(sa));
789 if (ad->pfra_af == AF_INET) {
790 FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
791 head = kt->pfrkt_ip4;
792 } else if ( ad->pfra_af == AF_INET6 ) {
793 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
794 head = kt->pfrkt_ip6;
795 }
796 if (ADDR_NETWORK(ad)) {
797 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
798 s = splsoftnet(); /* rn_lookup makes use of globals */
799 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
800 splx(s);
801 if (ke && KENTRY_RNF_ROOT(ke))
802 ke = NULL;
803 } else {
804 ke = (struct pfr_kentry *)rn_match(&sa, head);
805 if (ke && KENTRY_RNF_ROOT(ke))
806 ke = NULL;
807 if (exact && ke && KENTRY_NETWORK(ke))
808 ke = NULL;
809 }
810 return (ke);
811 }
812
813 struct pfr_kentry *
814 pfr_create_kentry(struct pfr_addr *ad, int intr)
815 {
816 struct pfr_kentry *ke;
817
818 if (intr)
819 ke = pool_get(&pfr_kentry_pl2, PR_NOWAIT);
820 else
821 ke = pool_get(&pfr_kentry_pl, PR_NOWAIT);
822 if (ke == NULL)
823 return (NULL);
824 bzero(ke, sizeof(*ke));
825
826 if (ad->pfra_af == AF_INET)
827 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
828 else if (ad->pfra_af == AF_INET6)
829 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
830 ke->pfrke_af = ad->pfra_af;
831 ke->pfrke_net = ad->pfra_net;
832 ke->pfrke_not = ad->pfra_not;
833 ke->pfrke_intrpool = intr;
834 return (ke);
835 }
836
837 void
838 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
839 {
840 struct pfr_kentry *p, *q;
841
842 for (p = SLIST_FIRST(workq); p != NULL; p = q) {
843 q = SLIST_NEXT(p, pfrke_workq);
844 pfr_destroy_kentry(p);
845 }
846 }
847
848 void
849 pfr_destroy_kentry(struct pfr_kentry *ke)
850 {
851 if (ke->pfrke_intrpool)
852 pool_put(&pfr_kentry_pl2, ke);
853 else
854 pool_put(&pfr_kentry_pl, ke);
855 }
856
857 void
858 pfr_insert_kentries(struct pfr_ktable *kt,
859 struct pfr_kentryworkq *workq, long tzero)
860 {
861 struct pfr_kentry *p;
862 int rv, n = 0;
863
864 SLIST_FOREACH(p, workq, pfrke_workq) {
865 rv = pfr_route_kentry(kt, p);
866 if (rv) {
867 printf("pfr_insert_kentries: cannot route entry "
868 "(code=%d).\n", rv);
869 break;
870 }
871 p->pfrke_tzero = tzero;
872 n++;
873 }
874 kt->pfrkt_cnt += n;
875 }
876
877 int
878 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero)
879 {
880 struct pfr_kentry *p;
881 int rv;
882
883 p = pfr_lookup_addr(kt, ad, 1);
884 if (p != NULL)
885 return (0);
886 p = pfr_create_kentry(ad, 1);
887 if (p == NULL)
888 return (EINVAL);
889
890 rv = pfr_route_kentry(kt, p);
891 if (rv)
892 return (rv);
893
894 p->pfrke_tzero = tzero;
895 kt->pfrkt_cnt++;
896
897 return (0);
898 }
899
900 void
901 pfr_remove_kentries(struct pfr_ktable *kt,
902 struct pfr_kentryworkq *workq)
903 {
904 struct pfr_kentry *p;
905 int n = 0;
906
907 SLIST_FOREACH(p, workq, pfrke_workq) {
908 pfr_unroute_kentry(kt, p);
909 n++;
910 }
911 kt->pfrkt_cnt -= n;
912 pfr_destroy_kentries(workq);
913 }
914
915 void
916 pfr_clean_node_mask(struct pfr_ktable *kt,
917 struct pfr_kentryworkq *workq)
918 {
919 struct pfr_kentry *p;
920
921 SLIST_FOREACH(p, workq, pfrke_workq)
922 pfr_unroute_kentry(kt, p);
923 }
924
925 void
926 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
927 {
928 struct pfr_kentry *p;
929 int s;
930
931 SLIST_FOREACH(p, workq, pfrke_workq) {
932 s = splsoftnet();
933 if (negchange)
934 p->pfrke_not = !p->pfrke_not;
935 bzero(p->pfrke_packets, sizeof(p->pfrke_packets));
936 bzero(p->pfrke_bytes, sizeof(p->pfrke_bytes));
937 splx(s);
938 p->pfrke_tzero = tzero;
939 }
940 }
941
942 void
943 pfr_reset_feedback(struct pfr_addr *addr, int size, int flags)
944 {
945 struct pfr_addr ad;
946 int i;
947
948 for (i = 0; i < size; i++) {
949 if (COPYIN(addr+i, &ad, sizeof(ad)))
950 break;
951 ad.pfra_fback = PFR_FB_NONE;
952 if (COPYOUT(&ad, addr+i, sizeof(ad)))
953 break;
954 }
955 }
956
957 void
958 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
959 {
960 int i;
961
962 bzero(sa, sizeof(*sa));
963 if (af == AF_INET) {
964 sa->sin.sin_len = sizeof(sa->sin);
965 sa->sin.sin_family = AF_INET;
966 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
967 } else if (af == AF_INET6) {
968 sa->sin6.sin6_len = sizeof(sa->sin6);
969 sa->sin6.sin6_family = AF_INET6;
970 for (i = 0; i < 4; i++) {
971 if (net <= 32) {
972 sa->sin6.sin6_addr.s6_addr32[i] =
973 net ? htonl(-1 << (32-net)) : 0;
974 break;
975 }
976 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
977 net -= 32;
978 }
979 }
980 }
981
982 int
983 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
984 {
985 union sockaddr_union mask;
986 struct radix_node *rn;
987 struct radix_node_head *head = (void *)0xdeadb;
988 int s;
989
990 bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
991 if (ke->pfrke_af == AF_INET)
992 head = kt->pfrkt_ip4;
993 else if (ke->pfrke_af == AF_INET6)
994 head = kt->pfrkt_ip6;
995
996 s = splsoftnet();
997 if (KENTRY_NETWORK(ke)) {
998 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
999 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
1000 } else
1001 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
1002 splx(s);
1003
1004 return (rn == NULL ? -1 : 0);
1005 }
1006
1007 int
1008 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1009 {
1010 union sockaddr_union mask;
1011 struct radix_node *rn;
1012 struct radix_node_head *head = (void *)0xdeadb;
1013 int s;
1014
1015 if (ke->pfrke_af == AF_INET)
1016 head = kt->pfrkt_ip4;
1017 else if (ke->pfrke_af == AF_INET6)
1018 head = kt->pfrkt_ip6;
1019
1020 s = splsoftnet();
1021 if (KENTRY_NETWORK(ke)) {
1022 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1023 rn = rn_delete(&ke->pfrke_sa, &mask, head);
1024 } else
1025 rn = rn_delete(&ke->pfrke_sa, NULL, head);
1026 splx(s);
1027
1028 if (rn == NULL) {
1029 printf("pfr_unroute_kentry: delete failed.\n");
1030 return (-1);
1031 }
1032 return (0);
1033 }
1034
1035 void
1036 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
1037 {
1038 bzero(ad, sizeof(*ad));
1039 if (ke == NULL)
1040 return;
1041 ad->pfra_af = ke->pfrke_af;
1042 ad->pfra_net = ke->pfrke_net;
1043 ad->pfra_not = ke->pfrke_not;
1044 if (ad->pfra_af == AF_INET)
1045 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1046 else if (ad->pfra_af == AF_INET6)
1047 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1048 }
1049
1050 int
1051 pfr_walktree(struct radix_node *rn, void *arg)
1052 {
1053 struct pfr_kentry *ke = (struct pfr_kentry *)rn;
1054 struct pfr_walktree *w = arg;
1055 int s, flags = w->pfrw_flags;
1056
1057 switch (w->pfrw_op) {
1058 case PFRW_MARK:
1059 ke->pfrke_mark = 0;
1060 break;
1061 case PFRW_SWEEP:
1062 if (ke->pfrke_mark)
1063 break;
1064 /* FALLTHROUGH */
1065 case PFRW_ENQUEUE:
1066 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1067 w->pfrw_cnt++;
1068 break;
1069 case PFRW_GET_ADDRS:
1070 if (w->pfrw_free-- > 0) {
1071 struct pfr_addr ad;
1072
1073 pfr_copyout_addr(&ad, ke);
1074 if (copyout(&ad, w->pfrw_addr, sizeof(ad)))
1075 return (EFAULT);
1076 w->pfrw_addr++;
1077 }
1078 break;
1079 case PFRW_GET_ASTATS:
1080 if (w->pfrw_free-- > 0) {
1081 struct pfr_astats as;
1082
1083 pfr_copyout_addr(&as.pfras_a, ke);
1084
1085 s = splsoftnet();
1086 bcopy(ke->pfrke_packets, as.pfras_packets,
1087 sizeof(as.pfras_packets));
1088 bcopy(ke->pfrke_bytes, as.pfras_bytes,
1089 sizeof(as.pfras_bytes));
1090 splx(s);
1091 as.pfras_tzero = ke->pfrke_tzero;
1092
1093 if (COPYOUT(&as, w->pfrw_astats, sizeof(as)))
1094 return (EFAULT);
1095 w->pfrw_astats++;
1096 }
1097 break;
1098 case PFRW_POOL_GET:
1099 if (ke->pfrke_not)
1100 break; /* negative entries are ignored */
1101 if (!w->pfrw_cnt--) {
1102 w->pfrw_kentry = ke;
1103 return (1); /* finish search */
1104 }
1105 break;
1106 case PFRW_DYNADDR_UPDATE:
1107 if (ke->pfrke_af == AF_INET) {
1108 if (w->pfrw_dyn->pfid_acnt4++ > 0)
1109 break;
1110 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1111 w->pfrw_dyn->pfid_addr4 = *SUNION2PF(
1112 &ke->pfrke_sa, AF_INET);
1113 w->pfrw_dyn->pfid_mask4 = *SUNION2PF(
1114 &pfr_mask, AF_INET);
1115 } else if (ke->pfrke_af == AF_INET6){
1116 if (w->pfrw_dyn->pfid_acnt6++ > 0)
1117 break;
1118 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1119 w->pfrw_dyn->pfid_addr6 = *SUNION2PF(
1120 &ke->pfrke_sa, AF_INET6);
1121 w->pfrw_dyn->pfid_mask6 = *SUNION2PF(
1122 &pfr_mask, AF_INET6);
1123 }
1124 break;
1125 }
1126 return (0);
1127 }
1128
1129 int
1130 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1131 {
1132 struct pfr_ktableworkq workq;
1133 struct pfr_ktable *p;
1134 int s = 0, xdel = 0;
1135
1136 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ALLRSETS);
1137 if (pfr_fix_anchor(filter->pfrt_anchor))
1138 return (EINVAL);
1139 if (pfr_table_count(filter, flags) < 0)
1140 return (ENOENT);
1141
1142 SLIST_INIT(&workq);
1143 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1144 if (pfr_skip_table(filter, p, flags))
1145 continue;
1146 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1147 continue;
1148 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1149 continue;
1150 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1151 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1152 xdel++;
1153 }
1154 if (!(flags & PFR_FLAG_DUMMY)) {
1155 if (flags & PFR_FLAG_ATOMIC)
1156 s = splsoftnet();
1157 pfr_setflags_ktables(&workq);
1158 if (flags & PFR_FLAG_ATOMIC)
1159 splx(s);
1160 }
1161 if (ndel != NULL)
1162 *ndel = xdel;
1163 return (0);
1164 }
1165
1166 int
1167 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1168 {
1169 struct pfr_ktableworkq addq, changeq;
1170 struct pfr_ktable *p, *q, *r, key;
1171 int i, rv, s = 0 /* XXX gcc */, xadd = 0;
1172 long tzero = time_second;
1173
1174 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1175 SLIST_INIT(&addq);
1176 SLIST_INIT(&changeq);
1177 for (i = 0; i < size; i++) {
1178 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1179 senderr(EFAULT);
1180 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1181 flags & PFR_FLAG_USERIOCTL))
1182 senderr(EINVAL);
1183 key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1184 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1185 if (p == NULL) {
1186 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1187 if (p == NULL)
1188 senderr(ENOMEM);
1189 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1190 if (!pfr_ktable_compare(p, q))
1191 goto _skip;
1192 }
1193 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1194 xadd++;
1195 if (!key.pfrkt_anchor[0])
1196 goto _skip;
1197
1198 /* find or create root table */
1199 bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1200 r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1201 if (r != NULL) {
1202 p->pfrkt_root = r;
1203 goto _skip;
1204 }
1205 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1206 if (!pfr_ktable_compare(&key, q)) {
1207 p->pfrkt_root = q;
1208 goto _skip;
1209 }
1210 }
1211 key.pfrkt_flags = 0;
1212 r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1213 if (r == NULL)
1214 senderr(ENOMEM);
1215 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1216 p->pfrkt_root = r;
1217 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1218 SLIST_FOREACH(q, &changeq, pfrkt_workq)
1219 if (!pfr_ktable_compare(&key, q))
1220 goto _skip;
1221 p->pfrkt_nflags = (p->pfrkt_flags &
1222 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1223 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1224 xadd++;
1225 }
1226 _skip:
1227 ;
1228 }
1229 if (!(flags & PFR_FLAG_DUMMY)) {
1230 if (flags & PFR_FLAG_ATOMIC)
1231 s = splsoftnet();
1232 pfr_insert_ktables(&addq);
1233 pfr_setflags_ktables(&changeq);
1234 if (flags & PFR_FLAG_ATOMIC)
1235 splx(s);
1236 } else
1237 pfr_destroy_ktables(&addq, 0);
1238 if (nadd != NULL)
1239 *nadd = xadd;
1240 return (0);
1241 _bad:
1242 pfr_destroy_ktables(&addq, 0);
1243 return (rv);
1244 }
1245
1246 int
1247 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1248 {
1249 struct pfr_ktableworkq workq;
1250 struct pfr_ktable *p, *q, key;
1251 int i, s = 0, xdel = 0;
1252
1253 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1254 SLIST_INIT(&workq);
1255 for (i = 0; i < size; i++) {
1256 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1257 return (EFAULT);
1258 if (pfr_validate_table(&key.pfrkt_t, 0,
1259 flags & PFR_FLAG_USERIOCTL))
1260 return (EINVAL);
1261 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1262 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1263 SLIST_FOREACH(q, &workq, pfrkt_workq)
1264 if (!pfr_ktable_compare(p, q))
1265 goto _skip;
1266 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1267 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1268 xdel++;
1269 }
1270 _skip:
1271 ;
1272 }
1273
1274 if (!(flags & PFR_FLAG_DUMMY)) {
1275 if (flags & PFR_FLAG_ATOMIC)
1276 s = splsoftnet();
1277 pfr_setflags_ktables(&workq);
1278 if (flags & PFR_FLAG_ATOMIC)
1279 splx(s);
1280 }
1281 if (ndel != NULL)
1282 *ndel = xdel;
1283 return (0);
1284 }
1285
1286 int
1287 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1288 int flags)
1289 {
1290 struct pfr_ktable *p;
1291 int n, nn;
1292
1293 ACCEPT_FLAGS(PFR_FLAG_ALLRSETS);
1294 if (pfr_fix_anchor(filter->pfrt_anchor))
1295 return (EINVAL);
1296 n = nn = pfr_table_count(filter, flags);
1297 if (n < 0)
1298 return (ENOENT);
1299 if (n > *size) {
1300 *size = n;
1301 return (0);
1302 }
1303 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1304 if (pfr_skip_table(filter, p, flags))
1305 continue;
1306 if (n-- <= 0)
1307 continue;
1308 if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl)))
1309 return (EFAULT);
1310 }
1311 if (n) {
1312 printf("pfr_get_tables: corruption detected (%d).\n", n);
1313 return (ENOTTY);
1314 }
1315 *size = nn;
1316 return (0);
1317 }
1318
1319 int
1320 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1321 int flags)
1322 {
1323 struct pfr_ktable *p;
1324 struct pfr_ktableworkq workq;
1325 int s = 0 /* XXX gcc */, n, nn;
1326 long tzero = time_second;
1327
1328 ACCEPT_FLAGS(PFR_FLAG_ATOMIC|PFR_FLAG_ALLRSETS);
1329 /* XXX PFR_FLAG_CLSTATS disabled */
1330 if (pfr_fix_anchor(filter->pfrt_anchor))
1331 return (EINVAL);
1332 n = nn = pfr_table_count(filter, flags);
1333 if (n < 0)
1334 return (ENOENT);
1335 if (n > *size) {
1336 *size = n;
1337 return (0);
1338 }
1339 SLIST_INIT(&workq);
1340 if (flags & PFR_FLAG_ATOMIC)
1341 s = splsoftnet();
1342 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1343 if (pfr_skip_table(filter, p, flags))
1344 continue;
1345 if (n-- <= 0)
1346 continue;
1347 if (!(flags & PFR_FLAG_ATOMIC))
1348 s = splsoftnet();
1349 if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl))) {
1350 splx(s);
1351 return (EFAULT);
1352 }
1353 if (!(flags & PFR_FLAG_ATOMIC))
1354 splx(s);
1355 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1356 }
1357 if (flags & PFR_FLAG_CLSTATS)
1358 pfr_clstats_ktables(&workq, tzero,
1359 flags & PFR_FLAG_ADDRSTOO);
1360 if (flags & PFR_FLAG_ATOMIC)
1361 splx(s);
1362 if (n) {
1363 printf("pfr_get_tstats: corruption detected (%d).\n", n);
1364 return (ENOTTY);
1365 }
1366 *size = nn;
1367 return (0);
1368 }
1369
1370 int
1371 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1372 {
1373 struct pfr_ktableworkq workq;
1374 struct pfr_ktable *p, key;
1375 int i, s = 0 /* XXX gcc */, xzero = 0;
1376 long tzero = time_second;
1377
1378 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ADDRSTOO);
1379 SLIST_INIT(&workq);
1380 for (i = 0; i < size; i++) {
1381 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1382 return (EFAULT);
1383 if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1384 return (EINVAL);
1385 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1386 if (p != NULL) {
1387 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1388 xzero++;
1389 }
1390 }
1391 if (!(flags & PFR_FLAG_DUMMY)) {
1392 if (flags & PFR_FLAG_ATOMIC)
1393 s = splsoftnet();
1394 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1395 if (flags & PFR_FLAG_ATOMIC)
1396 splx(s);
1397 }
1398 if (nzero != NULL)
1399 *nzero = xzero;
1400 return (0);
1401 }
1402
1403 int
1404 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1405 int *nchange, int *ndel, int flags)
1406 {
1407 struct pfr_ktableworkq workq;
1408 struct pfr_ktable *p, *q, key;
1409 int i, s = 0, xchange = 0, xdel = 0;
1410
1411 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1412 if ((setflag & ~PFR_TFLAG_USRMASK) ||
1413 (clrflag & ~PFR_TFLAG_USRMASK) ||
1414 (setflag & clrflag))
1415 return (EINVAL);
1416 SLIST_INIT(&workq);
1417 for (i = 0; i < size; i++) {
1418 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1419 return (EFAULT);
1420 if (pfr_validate_table(&key.pfrkt_t, 0,
1421 flags & PFR_FLAG_USERIOCTL))
1422 return (EINVAL);
1423 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1424 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1425 p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1426 ~clrflag;
1427 if (p->pfrkt_nflags == p->pfrkt_flags)
1428 goto _skip;
1429 SLIST_FOREACH(q, &workq, pfrkt_workq)
1430 if (!pfr_ktable_compare(p, q))
1431 goto _skip;
1432 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1433 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1434 (clrflag & PFR_TFLAG_PERSIST) &&
1435 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1436 xdel++;
1437 else
1438 xchange++;
1439 }
1440 _skip:
1441 ;
1442 }
1443 if (!(flags & PFR_FLAG_DUMMY)) {
1444 if (flags & PFR_FLAG_ATOMIC)
1445 s = splsoftnet();
1446 pfr_setflags_ktables(&workq);
1447 if (flags & PFR_FLAG_ATOMIC)
1448 splx(s);
1449 }
1450 if (nchange != NULL)
1451 *nchange = xchange;
1452 if (ndel != NULL)
1453 *ndel = xdel;
1454 return (0);
1455 }
1456
1457 int
1458 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1459 {
1460 struct pfr_ktableworkq workq;
1461 struct pfr_ktable *p;
1462 struct pf_ruleset *rs;
1463 int xdel = 0;
1464
1465 ACCEPT_FLAGS(PFR_FLAG_DUMMY);
1466 rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
1467 if (rs == NULL)
1468 return (ENOMEM);
1469 SLIST_INIT(&workq);
1470 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1471 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1472 pfr_skip_table(trs, p, 0))
1473 continue;
1474 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1475 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1476 xdel++;
1477 }
1478 if (!(flags & PFR_FLAG_DUMMY)) {
1479 pfr_setflags_ktables(&workq);
1480 if (ticket != NULL)
1481 *ticket = ++rs->tticket;
1482 rs->topen = 1;
1483 } else
1484 pf_remove_if_empty_ruleset(rs);
1485 if (ndel != NULL)
1486 *ndel = xdel;
1487 return (0);
1488 }
1489
1490 int
1491 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1492 int *nadd, int *naddr, u_int32_t ticket, int flags)
1493 {
1494 struct pfr_ktableworkq tableq;
1495 struct pfr_kentryworkq addrq;
1496 struct pfr_ktable *kt, *rt, *shadow, key;
1497 struct pfr_kentry *p;
1498 struct pfr_addr ad;
1499 struct pf_ruleset *rs;
1500 int i, rv, xadd = 0, xaddr = 0;
1501
1502 ACCEPT_FLAGS(PFR_FLAG_DUMMY|PFR_FLAG_ADDRSTOO);
1503 if (size && !(flags & PFR_FLAG_ADDRSTOO))
1504 return (EINVAL);
1505 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1506 flags & PFR_FLAG_USERIOCTL))
1507 return (EINVAL);
1508 rs = pf_find_ruleset(tbl->pfrt_anchor);
1509 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1510 return (EBUSY);
1511 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1512 SLIST_INIT(&tableq);
1513 kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1514 if (kt == NULL) {
1515 kt = pfr_create_ktable(tbl, 0, 1);
1516 if (kt == NULL)
1517 return (ENOMEM);
1518 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1519 xadd++;
1520 if (!tbl->pfrt_anchor[0])
1521 goto _skip;
1522
1523 /* find or create root table */
1524 bzero(&key, sizeof(key));
1525 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1526 rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1527 if (rt != NULL) {
1528 kt->pfrkt_root = rt;
1529 goto _skip;
1530 }
1531 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1532 if (rt == NULL) {
1533 pfr_destroy_ktables(&tableq, 0);
1534 return (ENOMEM);
1535 }
1536 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1537 kt->pfrkt_root = rt;
1538 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1539 xadd++;
1540 _skip:
1541 shadow = pfr_create_ktable(tbl, 0, 0);
1542 if (shadow == NULL) {
1543 pfr_destroy_ktables(&tableq, 0);
1544 return (ENOMEM);
1545 }
1546 SLIST_INIT(&addrq);
1547 for (i = 0; i < size; i++) {
1548 if (COPYIN(addr+i, &ad, sizeof(ad)))
1549 senderr(EFAULT);
1550 if (pfr_validate_addr(&ad))
1551 senderr(EINVAL);
1552 if (pfr_lookup_addr(shadow, &ad, 1) != NULL)
1553 continue;
1554 p = pfr_create_kentry(&ad, 0);
1555 if (p == NULL)
1556 senderr(ENOMEM);
1557 if (pfr_route_kentry(shadow, p)) {
1558 pfr_destroy_kentry(p);
1559 continue;
1560 }
1561 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1562 xaddr++;
1563 }
1564 if (!(flags & PFR_FLAG_DUMMY)) {
1565 if (kt->pfrkt_shadow != NULL)
1566 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1567 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1568 pfr_insert_ktables(&tableq);
1569 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1570 xaddr : NO_ADDRESSES;
1571 kt->pfrkt_shadow = shadow;
1572 } else {
1573 pfr_clean_node_mask(shadow, &addrq);
1574 pfr_destroy_ktable(shadow, 0);
1575 pfr_destroy_ktables(&tableq, 0);
1576 pfr_destroy_kentries(&addrq);
1577 }
1578 if (nadd != NULL)
1579 *nadd = xadd;
1580 if (naddr != NULL)
1581 *naddr = xaddr;
1582 return (0);
1583 _bad:
1584 pfr_destroy_ktable(shadow, 0);
1585 pfr_destroy_ktables(&tableq, 0);
1586 pfr_destroy_kentries(&addrq);
1587 return (rv);
1588 }
1589
1590 int
1591 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1592 {
1593 struct pfr_ktableworkq workq;
1594 struct pfr_ktable *p;
1595 struct pf_ruleset *rs;
1596 int xdel = 0;
1597
1598 ACCEPT_FLAGS(PFR_FLAG_DUMMY);
1599 rs = pf_find_ruleset(trs->pfrt_anchor);
1600 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1601 return (0);
1602 SLIST_INIT(&workq);
1603 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1604 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1605 pfr_skip_table(trs, p, 0))
1606 continue;
1607 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1608 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1609 xdel++;
1610 }
1611 if (!(flags & PFR_FLAG_DUMMY)) {
1612 pfr_setflags_ktables(&workq);
1613 rs->topen = 0;
1614 pf_remove_if_empty_ruleset(rs);
1615 }
1616 if (ndel != NULL)
1617 *ndel = xdel;
1618 return (0);
1619 }
1620
1621 int
1622 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1623 int *nchange, int flags)
1624 {
1625 struct pfr_ktable *p, *q;
1626 struct pfr_ktableworkq workq;
1627 struct pf_ruleset *rs;
1628 int s = 0 /* XXX gcc */, xadd = 0, xchange = 0;
1629 long tzero = time_second;
1630
1631 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1632 rs = pf_find_ruleset(trs->pfrt_anchor);
1633 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1634 return (EBUSY);
1635
1636 SLIST_INIT(&workq);
1637 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1638 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1639 pfr_skip_table(trs, p, 0))
1640 continue;
1641 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1642 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1643 xchange++;
1644 else
1645 xadd++;
1646 }
1647
1648 if (!(flags & PFR_FLAG_DUMMY)) {
1649 if (flags & PFR_FLAG_ATOMIC)
1650 s = splsoftnet();
1651 for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1652 q = SLIST_NEXT(p, pfrkt_workq);
1653 pfr_commit_ktable(p, tzero);
1654 }
1655 if (flags & PFR_FLAG_ATOMIC)
1656 splx(s);
1657 rs->topen = 0;
1658 pf_remove_if_empty_ruleset(rs);
1659 }
1660 if (nadd != NULL)
1661 *nadd = xadd;
1662 if (nchange != NULL)
1663 *nchange = xchange;
1664
1665 return (0);
1666 }
1667
1668 void
1669 pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1670 {
1671 struct pfr_ktable *shadow = kt->pfrkt_shadow;
1672 int nflags;
1673
1674 if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1675 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1676 pfr_clstats_ktable(kt, tzero, 1);
1677 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1678 /* kt might contain addresses */
1679 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq;
1680 struct pfr_kentry *p, *q, *next;
1681 struct pfr_addr ad;
1682
1683 pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1684 pfr_mark_addrs(kt);
1685 SLIST_INIT(&addq);
1686 SLIST_INIT(&changeq);
1687 SLIST_INIT(&delq);
1688 SLIST_INIT(&garbageq);
1689 pfr_clean_node_mask(shadow, &addrq);
1690 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1691 next = SLIST_NEXT(p, pfrke_workq); /* XXX */
1692 pfr_copyout_addr(&ad, p);
1693 q = pfr_lookup_addr(kt, &ad, 1);
1694 if (q != NULL) {
1695 if (q->pfrke_not != p->pfrke_not)
1696 SLIST_INSERT_HEAD(&changeq, q,
1697 pfrke_workq);
1698 q->pfrke_mark = 1;
1699 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1700 } else {
1701 p->pfrke_tzero = tzero;
1702 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1703 }
1704 }
1705 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1706 pfr_insert_kentries(kt, &addq, tzero);
1707 pfr_remove_kentries(kt, &delq);
1708 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1709 pfr_destroy_kentries(&garbageq);
1710 } else {
1711 /* kt cannot contain addresses */
1712 SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1713 shadow->pfrkt_ip4);
1714 SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1715 shadow->pfrkt_ip6);
1716 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1717 pfr_clstats_ktable(kt, tzero, 1);
1718 }
1719 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1720 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1721 & ~PFR_TFLAG_INACTIVE;
1722 pfr_destroy_ktable(shadow, 0);
1723 kt->pfrkt_shadow = NULL;
1724 pfr_setflags_ktable(kt, nflags);
1725 }
1726
1727 int
1728 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1729 {
1730 int i;
1731
1732 if (!tbl->pfrt_name[0])
1733 return (-1);
1734 if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1735 return (-1);
1736 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1737 return (-1);
1738 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1739 if (tbl->pfrt_name[i])
1740 return (-1);
1741 if (pfr_fix_anchor(tbl->pfrt_anchor))
1742 return (-1);
1743 if (tbl->pfrt_flags & ~allowedflags)
1744 return (-1);
1745 return (0);
1746 }
1747
1748 /*
1749 * Rewrite anchors referenced by tables to remove slashes
1750 * and check for validity.
1751 */
1752 int
1753 pfr_fix_anchor(char *anchor)
1754 {
1755 size_t siz = MAXPATHLEN;
1756 int i;
1757
1758 if (anchor[0] == '/') {
1759 char *path;
1760 int off;
1761
1762 path = anchor;
1763 off = 1;
1764 while (*++path == '/')
1765 off++;
1766 bcopy(path, anchor, siz - off);
1767 memset(anchor + siz - off, 0, off);
1768 }
1769 if (anchor[siz - 1])
1770 return (-1);
1771 for (i = strlen(anchor); i < siz; i++)
1772 if (anchor[i])
1773 return (-1);
1774 return (0);
1775 }
1776
1777 int
1778 pfr_table_count(struct pfr_table *filter, int flags)
1779 {
1780 struct pf_ruleset *rs;
1781
1782 if (flags & PFR_FLAG_ALLRSETS)
1783 return (pfr_ktable_cnt);
1784 if (filter->pfrt_anchor[0]) {
1785 rs = pf_find_ruleset(filter->pfrt_anchor);
1786 return ((rs != NULL) ? rs->tables : -1);
1787 }
1788 return (pf_main_ruleset.tables);
1789 }
1790
1791 int
1792 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1793 {
1794 if (flags & PFR_FLAG_ALLRSETS)
1795 return (0);
1796 if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1797 return (1);
1798 return (0);
1799 }
1800
1801 void
1802 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1803 {
1804 struct pfr_ktable *p;
1805
1806 SLIST_FOREACH(p, workq, pfrkt_workq)
1807 pfr_insert_ktable(p);
1808 }
1809
1810 void
1811 pfr_insert_ktable(struct pfr_ktable *kt)
1812 {
1813 RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1814 pfr_ktable_cnt++;
1815 if (kt->pfrkt_root != NULL)
1816 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1817 pfr_setflags_ktable(kt->pfrkt_root,
1818 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1819 }
1820
1821 void
1822 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1823 {
1824 struct pfr_ktable *p, *q;
1825
1826 for (p = SLIST_FIRST(workq); p; p = q) {
1827 q = SLIST_NEXT(p, pfrkt_workq);
1828 pfr_setflags_ktable(p, p->pfrkt_nflags);
1829 }
1830 }
1831
1832 void
1833 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1834 {
1835 struct pfr_kentryworkq addrq;
1836
1837 if (!(newf & PFR_TFLAG_REFERENCED) &&
1838 !(newf & PFR_TFLAG_PERSIST))
1839 newf &= ~PFR_TFLAG_ACTIVE;
1840 if (!(newf & PFR_TFLAG_ACTIVE))
1841 newf &= ~PFR_TFLAG_USRMASK;
1842 if (!(newf & PFR_TFLAG_SETMASK)) {
1843 RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
1844 if (kt->pfrkt_root != NULL)
1845 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1846 pfr_setflags_ktable(kt->pfrkt_root,
1847 kt->pfrkt_root->pfrkt_flags &
1848 ~PFR_TFLAG_REFDANCHOR);
1849 pfr_destroy_ktable(kt, 1);
1850 pfr_ktable_cnt--;
1851 return;
1852 }
1853 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1854 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1855 pfr_remove_kentries(kt, &addrq);
1856 }
1857 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1858 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1859 kt->pfrkt_shadow = NULL;
1860 }
1861 kt->pfrkt_flags = newf;
1862 }
1863
1864 void
1865 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1866 {
1867 struct pfr_ktable *p;
1868
1869 SLIST_FOREACH(p, workq, pfrkt_workq)
1870 pfr_clstats_ktable(p, tzero, recurse);
1871 }
1872
1873 void
1874 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1875 {
1876 struct pfr_kentryworkq addrq;
1877 int s;
1878
1879 if (recurse) {
1880 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1881 pfr_clstats_kentries(&addrq, tzero, 0);
1882 }
1883 s = splsoftnet();
1884 bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
1885 bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
1886 kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1887 splx(s);
1888 kt->pfrkt_tzero = tzero;
1889 }
1890
1891 struct pfr_ktable *
1892 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1893 {
1894 struct pfr_ktable *kt;
1895 struct pf_ruleset *rs;
1896 void *h4 = NULL, *h6 = NULL;
1897
1898 kt = pool_get(&pfr_ktable_pl, PR_NOWAIT);
1899 if (kt == NULL)
1900 return (NULL);
1901 bzero(kt, sizeof(*kt));
1902 kt->pfrkt_t = *tbl;
1903
1904 if (attachruleset) {
1905 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
1906 if (!rs) {
1907 pfr_destroy_ktable(kt, 0);
1908 return (NULL);
1909 }
1910 kt->pfrkt_rs = rs;
1911 rs->tables++;
1912 }
1913
1914 if (!rn_inithead(&h4, offsetof(struct sockaddr_in, sin_addr) * 8))
1915 goto out;
1916
1917 if (!rn_inithead(&h6, offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1918 Free(h4);
1919 goto out;
1920 }
1921 kt->pfrkt_ip4 = h4;
1922 kt->pfrkt_ip6 = h6;
1923 kt->pfrkt_tzero = tzero;
1924
1925 return (kt);
1926 out:
1927 pfr_destroy_ktable(kt, 0);
1928 return (NULL);
1929 }
1930
1931 void
1932 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1933 {
1934 struct pfr_ktable *p, *q;
1935
1936 for (p = SLIST_FIRST(workq); p; p = q) {
1937 q = SLIST_NEXT(p, pfrkt_workq);
1938 pfr_destroy_ktable(p, flushaddr);
1939 }
1940 }
1941
1942 void
1943 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1944 {
1945 struct pfr_kentryworkq addrq;
1946
1947 if (flushaddr) {
1948 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1949 pfr_clean_node_mask(kt, &addrq);
1950 pfr_destroy_kentries(&addrq);
1951 }
1952 if (kt->pfrkt_ip4 != NULL)
1953 free((void *)kt->pfrkt_ip4, M_RTABLE);
1954 if (kt->pfrkt_ip6 != NULL)
1955 free((void *)kt->pfrkt_ip6, M_RTABLE);
1956 if (kt->pfrkt_shadow != NULL)
1957 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1958 if (kt->pfrkt_rs != NULL) {
1959 kt->pfrkt_rs->tables--;
1960 pf_remove_if_empty_ruleset(kt->pfrkt_rs);
1961 }
1962 pool_put(&pfr_ktable_pl, kt);
1963 }
1964
1965 int
1966 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
1967 {
1968 int d;
1969
1970 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
1971 return (d);
1972 return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
1973 }
1974
1975 struct pfr_ktable *
1976 pfr_lookup_table(struct pfr_table *tbl)
1977 {
1978 /* struct pfr_ktable start like a struct pfr_table */
1979 return (RB_FIND(pfr_ktablehead, &pfr_ktables,
1980 (struct pfr_ktable *)tbl));
1981 }
1982
1983 int
1984 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
1985 {
1986 struct pfr_kentry *ke = NULL;
1987 int match;
1988
1989 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1990 kt = kt->pfrkt_root;
1991 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1992 return (0);
1993
1994 switch (af) {
1995 #ifdef INET
1996 case AF_INET:
1997 pfr_sin.sin_addr.s_addr = a->addr32[0];
1998 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
1999 if (ke && KENTRY_RNF_ROOT(ke))
2000 ke = NULL;
2001 break;
2002 #endif /* INET */
2003 #ifdef INET6
2004 case AF_INET6:
2005 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2006 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
2007 if (ke && KENTRY_RNF_ROOT(ke))
2008 ke = NULL;
2009 break;
2010 #endif /* INET6 */
2011 }
2012 match = (ke && !ke->pfrke_not);
2013 if (match)
2014 kt->pfrkt_match++;
2015 else
2016 kt->pfrkt_nomatch++;
2017 return (match);
2018 }
2019
2020 void
2021 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2022 u_int64_t len, int dir_out, int op_pass, int notrule)
2023 {
2024 struct pfr_kentry *ke = NULL;
2025
2026 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2027 kt = kt->pfrkt_root;
2028 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2029 return;
2030
2031 switch (af) {
2032 #ifdef INET
2033 case AF_INET:
2034 pfr_sin.sin_addr.s_addr = a->addr32[0];
2035 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
2036 if (ke && KENTRY_RNF_ROOT(ke))
2037 ke = NULL;
2038 break;
2039 #endif /* INET */
2040 #ifdef INET6
2041 case AF_INET6:
2042 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2043 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
2044 if (ke && KENTRY_RNF_ROOT(ke))
2045 ke = NULL;
2046 break;
2047 #endif /* INET6 */
2048 default:
2049 ;
2050 }
2051 if ((ke == NULL || ke->pfrke_not) != notrule) {
2052 if (op_pass != PFR_OP_PASS)
2053 printf("pfr_update_stats: assertion failed.\n");
2054 op_pass = PFR_OP_XPASS;
2055 }
2056 kt->pfrkt_packets[dir_out][op_pass]++;
2057 kt->pfrkt_bytes[dir_out][op_pass] += len;
2058 if (ke != NULL && op_pass != PFR_OP_XPASS) {
2059 ke->pfrke_packets[dir_out][op_pass]++;
2060 ke->pfrke_bytes[dir_out][op_pass] += len;
2061 }
2062 }
2063
2064 struct pfr_ktable *
2065 pfr_attach_table(struct pf_ruleset *rs, char *name)
2066 {
2067 struct pfr_ktable *kt, *rt;
2068 struct pfr_table tbl;
2069 struct pf_anchor *ac = rs->anchor;
2070
2071 bzero(&tbl, sizeof(tbl));
2072 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2073 if (ac != NULL)
2074 strlcpy(tbl.pfrt_anchor, ac->name, sizeof(tbl.pfrt_anchor));
2075 kt = pfr_lookup_table(&tbl);
2076 if (kt == NULL) {
2077 kt = pfr_create_ktable(&tbl, time_second, 1);
2078 if (kt == NULL)
2079 return (NULL);
2080 if (ac != NULL) {
2081 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2082 rt = pfr_lookup_table(&tbl);
2083 if (rt == NULL) {
2084 rt = pfr_create_ktable(&tbl, 0, 1);
2085 if (rt == NULL) {
2086 pfr_destroy_ktable(kt, 0);
2087 return (NULL);
2088 }
2089 pfr_insert_ktable(rt);
2090 }
2091 kt->pfrkt_root = rt;
2092 }
2093 pfr_insert_ktable(kt);
2094 }
2095 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2096 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2097 return (kt);
2098 }
2099
2100 void
2101 pfr_detach_table(struct pfr_ktable *kt)
2102 {
2103 if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
2104 printf("pfr_detach_table: refcount = %d.\n",
2105 kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
2106 else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2107 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2108 }
2109
2110 int
2111 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2112 struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af)
2113 {
2114 struct pfr_kentry *ke, *ke2 = (void *)0xdeadb;
2115 struct pf_addr *addr = (void *)0xdeadb;
2116 union sockaddr_union mask;
2117 int idx = -1, use_counter = 0;
2118
2119 if (af == AF_INET)
2120 addr = (struct pf_addr *)&pfr_sin.sin_addr;
2121 else if (af == AF_INET6)
2122 addr = (struct pf_addr *)&pfr_sin6.sin6_addr;
2123 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2124 kt = kt->pfrkt_root;
2125 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2126 return (-1);
2127
2128 if (pidx != NULL)
2129 idx = *pidx;
2130 if (counter != NULL && idx >= 0)
2131 use_counter = 1;
2132 if (idx < 0)
2133 idx = 0;
2134
2135 _next_block:
2136 ke = pfr_kentry_byidx(kt, idx, af);
2137 if (ke == NULL)
2138 return (1);
2139 pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
2140 *raddr = SUNION2PF(&ke->pfrke_sa, af);
2141 *rmask = SUNION2PF(&pfr_mask, af);
2142
2143 if (use_counter) {
2144 /* is supplied address within block? */
2145 if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
2146 /* no, go to next block in table */
2147 idx++;
2148 use_counter = 0;
2149 goto _next_block;
2150 }
2151 PF_ACPY(addr, counter, af);
2152 } else {
2153 /* use first address of block */
2154 PF_ACPY(addr, *raddr, af);
2155 }
2156
2157 if (!KENTRY_NETWORK(ke)) {
2158 /* this is a single IP address - no possible nested block */
2159 PF_ACPY(counter, addr, af);
2160 *pidx = idx;
2161 return (0);
2162 }
2163 for (;;) {
2164 /* we don't want to use a nested block */
2165 if (af == AF_INET)
2166 ke2 = (struct pfr_kentry *)rn_match(&pfr_sin,
2167 kt->pfrkt_ip4);
2168 else if (af == AF_INET6)
2169 ke2 = (struct pfr_kentry *)rn_match(&pfr_sin6,
2170 kt->pfrkt_ip6);
2171 /* no need to check KENTRY_RNF_ROOT() here */
2172 if (ke2 == ke) {
2173 /* lookup return the same block - perfect */
2174 PF_ACPY(counter, addr, af);
2175 *pidx = idx;
2176 return (0);
2177 }
2178
2179 /* we need to increase the counter past the nested block */
2180 pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
2181 PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
2182 PF_AINC(addr, af);
2183 if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
2184 /* ok, we reached the end of our main block */
2185 /* go to next block in table */
2186 idx++;
2187 use_counter = 0;
2188 goto _next_block;
2189 }
2190 }
2191 }
2192
2193 struct pfr_kentry *
2194 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2195 {
2196 struct pfr_walktree w;
2197
2198 bzero(&w, sizeof(w));
2199 w.pfrw_op = PFRW_POOL_GET;
2200 w.pfrw_cnt = idx;
2201
2202 switch (af) {
2203 #ifdef INET
2204 case AF_INET:
2205 rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2206 return (w.pfrw_kentry);
2207 #endif /* INET */
2208 #ifdef INET6
2209 case AF_INET6:
2210 rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2211 return (w.pfrw_kentry);
2212 #endif /* INET6 */
2213 default:
2214 return (NULL);
2215 }
2216 }
2217
2218 void
2219 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2220 {
2221 struct pfr_walktree w;
2222 int s;
2223
2224 bzero(&w, sizeof(w));
2225 w.pfrw_op = PFRW_DYNADDR_UPDATE;
2226 w.pfrw_dyn = dyn;
2227
2228 s = splsoftnet();
2229 dyn->pfid_acnt4 = 0;
2230 dyn->pfid_acnt6 = 0;
2231 if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2232 rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2233 if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2234 rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2235 splx(s);
2236 }
2237