pf_table.c revision 1.1.1.3 1 /* $OpenBSD: pf_table.c,v 1.62 2004/12/07 18:02:04 mcbride Exp $ */
2
3 /*
4 * Copyright (c) 2002 Cedric Berger
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/socket.h>
36 #include <sys/mbuf.h>
37 #include <sys/kernel.h>
38
39 #include <net/if.h>
40 #include <net/route.h>
41 #include <netinet/in.h>
42 #include <netinet/ip_ipsp.h>
43 #include <net/pfvar.h>
44
45 #define ACCEPT_FLAGS(oklist) \
46 do { \
47 if ((flags & ~(oklist)) & \
48 PFR_FLAG_ALLMASK) \
49 return (EINVAL); \
50 } while (0)
51
52 #define COPYIN(from, to, size) \
53 ((flags & PFR_FLAG_USERIOCTL) ? \
54 copyin((from), (to), (size)) : \
55 (bcopy((from), (to), (size)), 0))
56
57 #define COPYOUT(from, to, size) \
58 ((flags & PFR_FLAG_USERIOCTL) ? \
59 copyout((from), (to), (size)) : \
60 (bcopy((from), (to), (size)), 0))
61
62 #define FILLIN_SIN(sin, addr) \
63 do { \
64 (sin).sin_len = sizeof(sin); \
65 (sin).sin_family = AF_INET; \
66 (sin).sin_addr = (addr); \
67 } while (0)
68
69 #define FILLIN_SIN6(sin6, addr) \
70 do { \
71 (sin6).sin6_len = sizeof(sin6); \
72 (sin6).sin6_family = AF_INET6; \
73 (sin6).sin6_addr = (addr); \
74 } while (0)
75
76 #define SWAP(type, a1, a2) \
77 do { \
78 type tmp = a1; \
79 a1 = a2; \
80 a2 = tmp; \
81 } while (0)
82
83 #define SUNION2PF(su, af) (((af)==AF_INET) ? \
84 (struct pf_addr *)&(su)->sin.sin_addr : \
85 (struct pf_addr *)&(su)->sin6.sin6_addr)
86
87 #define AF_BITS(af) (((af)==AF_INET)?32:128)
88 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
89 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
90 #define KENTRY_RNF_ROOT(ke) \
91 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
92
93 #define NO_ADDRESSES (-1)
94 #define ENQUEUE_UNMARKED_ONLY (1)
95 #define INVERT_NEG_FLAG (1)
96
97 struct pfr_walktree {
98 enum pfrw_op {
99 PFRW_MARK,
100 PFRW_SWEEP,
101 PFRW_ENQUEUE,
102 PFRW_GET_ADDRS,
103 PFRW_GET_ASTATS,
104 PFRW_POOL_GET,
105 PFRW_DYNADDR_UPDATE
106 } pfrw_op;
107 union {
108 struct pfr_addr *pfrw1_addr;
109 struct pfr_astats *pfrw1_astats;
110 struct pfr_kentryworkq *pfrw1_workq;
111 struct pfr_kentry *pfrw1_kentry;
112 struct pfi_dynaddr *pfrw1_dyn;
113 } pfrw_1;
114 int pfrw_free;
115 int pfrw_flags;
116 };
117 #define pfrw_addr pfrw_1.pfrw1_addr
118 #define pfrw_astats pfrw_1.pfrw1_astats
119 #define pfrw_workq pfrw_1.pfrw1_workq
120 #define pfrw_kentry pfrw_1.pfrw1_kentry
121 #define pfrw_dyn pfrw_1.pfrw1_dyn
122 #define pfrw_cnt pfrw_free
123
124 #define senderr(e) do { rv = (e); goto _bad; } while (0)
125
126 struct pool pfr_ktable_pl;
127 struct pool pfr_kentry_pl;
128 struct pool pfr_kentry_pl2;
129 struct sockaddr_in pfr_sin;
130 struct sockaddr_in6 pfr_sin6;
131 union sockaddr_union pfr_mask;
132 struct pf_addr pfr_ffaddr;
133
134 void pfr_copyout_addr(struct pfr_addr *,
135 struct pfr_kentry *ke);
136 int pfr_validate_addr(struct pfr_addr *);
137 void pfr_enqueue_addrs(struct pfr_ktable *,
138 struct pfr_kentryworkq *, int *, int);
139 void pfr_mark_addrs(struct pfr_ktable *);
140 struct pfr_kentry *pfr_lookup_addr(struct pfr_ktable *,
141 struct pfr_addr *, int);
142 struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, int);
143 void pfr_destroy_kentries(struct pfr_kentryworkq *);
144 void pfr_destroy_kentry(struct pfr_kentry *);
145 void pfr_insert_kentries(struct pfr_ktable *,
146 struct pfr_kentryworkq *, long);
147 void pfr_remove_kentries(struct pfr_ktable *,
148 struct pfr_kentryworkq *);
149 void pfr_clstats_kentries(struct pfr_kentryworkq *, long,
150 int);
151 void pfr_reset_feedback(struct pfr_addr *, int, int);
152 void pfr_prepare_network(union sockaddr_union *, int, int);
153 int pfr_route_kentry(struct pfr_ktable *,
154 struct pfr_kentry *);
155 int pfr_unroute_kentry(struct pfr_ktable *,
156 struct pfr_kentry *);
157 int pfr_walktree(struct radix_node *, void *);
158 int pfr_validate_table(struct pfr_table *, int, int);
159 int pfr_fix_anchor(char *);
160 void pfr_commit_ktable(struct pfr_ktable *, long);
161 void pfr_insert_ktables(struct pfr_ktableworkq *);
162 void pfr_insert_ktable(struct pfr_ktable *);
163 void pfr_setflags_ktables(struct pfr_ktableworkq *);
164 void pfr_setflags_ktable(struct pfr_ktable *, int);
165 void pfr_clstats_ktables(struct pfr_ktableworkq *, long,
166 int);
167 void pfr_clstats_ktable(struct pfr_ktable *, long, int);
168 struct pfr_ktable *pfr_create_ktable(struct pfr_table *, long, int);
169 void pfr_destroy_ktables(struct pfr_ktableworkq *, int);
170 void pfr_destroy_ktable(struct pfr_ktable *, int);
171 int pfr_ktable_compare(struct pfr_ktable *,
172 struct pfr_ktable *);
173 struct pfr_ktable *pfr_lookup_table(struct pfr_table *);
174 void pfr_clean_node_mask(struct pfr_ktable *,
175 struct pfr_kentryworkq *);
176 int pfr_table_count(struct pfr_table *, int);
177 int pfr_skip_table(struct pfr_table *,
178 struct pfr_ktable *, int);
179 struct pfr_kentry *pfr_kentry_byidx(struct pfr_ktable *, int, int);
180
181 RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
182 RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
183
184 struct pfr_ktablehead pfr_ktables;
185 struct pfr_table pfr_nulltable;
186 int pfr_ktable_cnt;
187
188 void
189 pfr_initialize(void)
190 {
191 pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0,
192 "pfrktable", &pool_allocator_oldnointr);
193 pool_init(&pfr_kentry_pl, sizeof(struct pfr_kentry), 0, 0, 0,
194 "pfrkentry", &pool_allocator_oldnointr);
195 pool_init(&pfr_kentry_pl2, sizeof(struct pfr_kentry), 0, 0, 0,
196 "pfrkentry2", NULL);
197
198 pfr_sin.sin_len = sizeof(pfr_sin);
199 pfr_sin.sin_family = AF_INET;
200 pfr_sin6.sin6_len = sizeof(pfr_sin6);
201 pfr_sin6.sin6_family = AF_INET6;
202
203 memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr));
204 }
205
206 int
207 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
208 {
209 struct pfr_ktable *kt;
210 struct pfr_kentryworkq workq;
211 int s;
212
213 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
214 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
215 return (EINVAL);
216 kt = pfr_lookup_table(tbl);
217 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
218 return (ESRCH);
219 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
220 return (EPERM);
221 pfr_enqueue_addrs(kt, &workq, ndel, 0);
222
223 if (!(flags & PFR_FLAG_DUMMY)) {
224 if (flags & PFR_FLAG_ATOMIC)
225 s = splsoftnet();
226 pfr_remove_kentries(kt, &workq);
227 if (flags & PFR_FLAG_ATOMIC)
228 splx(s);
229 if (kt->pfrkt_cnt) {
230 printf("pfr_clr_addrs: corruption detected (%d).\n",
231 kt->pfrkt_cnt);
232 kt->pfrkt_cnt = 0;
233 }
234 }
235 return (0);
236 }
237
238 int
239 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
240 int *nadd, int flags)
241 {
242 struct pfr_ktable *kt, *tmpkt;
243 struct pfr_kentryworkq workq;
244 struct pfr_kentry *p, *q;
245 struct pfr_addr ad;
246 int i, rv, s, xadd = 0;
247 long tzero = time_second;
248
249 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
250 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
251 return (EINVAL);
252 kt = pfr_lookup_table(tbl);
253 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
254 return (ESRCH);
255 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
256 return (EPERM);
257 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
258 if (tmpkt == NULL)
259 return (ENOMEM);
260 SLIST_INIT(&workq);
261 for (i = 0; i < size; i++) {
262 if (COPYIN(addr+i, &ad, sizeof(ad)))
263 senderr(EFAULT);
264 if (pfr_validate_addr(&ad))
265 senderr(EINVAL);
266 p = pfr_lookup_addr(kt, &ad, 1);
267 q = pfr_lookup_addr(tmpkt, &ad, 1);
268 if (flags & PFR_FLAG_FEEDBACK) {
269 if (q != NULL)
270 ad.pfra_fback = PFR_FB_DUPLICATE;
271 else if (p == NULL)
272 ad.pfra_fback = PFR_FB_ADDED;
273 else if (p->pfrke_not != ad.pfra_not)
274 ad.pfra_fback = PFR_FB_CONFLICT;
275 else
276 ad.pfra_fback = PFR_FB_NONE;
277 }
278 if (p == NULL && q == NULL) {
279 p = pfr_create_kentry(&ad, 0);
280 if (p == NULL)
281 senderr(ENOMEM);
282 if (pfr_route_kentry(tmpkt, p)) {
283 pfr_destroy_kentry(p);
284 ad.pfra_fback = PFR_FB_NONE;
285 } else {
286 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
287 xadd++;
288 }
289 }
290 if (flags & PFR_FLAG_FEEDBACK)
291 if (COPYOUT(&ad, addr+i, sizeof(ad)))
292 senderr(EFAULT);
293 }
294 pfr_clean_node_mask(tmpkt, &workq);
295 if (!(flags & PFR_FLAG_DUMMY)) {
296 if (flags & PFR_FLAG_ATOMIC)
297 s = splsoftnet();
298 pfr_insert_kentries(kt, &workq, tzero);
299 if (flags & PFR_FLAG_ATOMIC)
300 splx(s);
301 } else
302 pfr_destroy_kentries(&workq);
303 if (nadd != NULL)
304 *nadd = xadd;
305 pfr_destroy_ktable(tmpkt, 0);
306 return (0);
307 _bad:
308 pfr_clean_node_mask(tmpkt, &workq);
309 pfr_destroy_kentries(&workq);
310 if (flags & PFR_FLAG_FEEDBACK)
311 pfr_reset_feedback(addr, size, flags);
312 pfr_destroy_ktable(tmpkt, 0);
313 return (rv);
314 }
315
316 int
317 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
318 int *ndel, int flags)
319 {
320 struct pfr_ktable *kt;
321 struct pfr_kentryworkq workq;
322 struct pfr_kentry *p;
323 struct pfr_addr ad;
324 int i, rv, s, xdel = 0, log = 1;
325
326 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
327 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
328 return (EINVAL);
329 kt = pfr_lookup_table(tbl);
330 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
331 return (ESRCH);
332 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
333 return (EPERM);
334 /*
335 * there are two algorithms to choose from here.
336 * with:
337 * n: number of addresses to delete
338 * N: number of addresses in the table
339 *
340 * one is O(N) and is better for large 'n'
341 * one is O(n*LOG(N)) and is better for small 'n'
342 *
343 * following code try to decide which one is best.
344 */
345 for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
346 log++;
347 if (size > kt->pfrkt_cnt/log) {
348 /* full table scan */
349 pfr_mark_addrs(kt);
350 } else {
351 /* iterate over addresses to delete */
352 for (i = 0; i < size; i++) {
353 if (COPYIN(addr+i, &ad, sizeof(ad)))
354 return (EFAULT);
355 if (pfr_validate_addr(&ad))
356 return (EINVAL);
357 p = pfr_lookup_addr(kt, &ad, 1);
358 if (p != NULL)
359 p->pfrke_mark = 0;
360 }
361 }
362 SLIST_INIT(&workq);
363 for (i = 0; i < size; i++) {
364 if (COPYIN(addr+i, &ad, sizeof(ad)))
365 senderr(EFAULT);
366 if (pfr_validate_addr(&ad))
367 senderr(EINVAL);
368 p = pfr_lookup_addr(kt, &ad, 1);
369 if (flags & PFR_FLAG_FEEDBACK) {
370 if (p == NULL)
371 ad.pfra_fback = PFR_FB_NONE;
372 else if (p->pfrke_not != ad.pfra_not)
373 ad.pfra_fback = PFR_FB_CONFLICT;
374 else if (p->pfrke_mark)
375 ad.pfra_fback = PFR_FB_DUPLICATE;
376 else
377 ad.pfra_fback = PFR_FB_DELETED;
378 }
379 if (p != NULL && p->pfrke_not == ad.pfra_not &&
380 !p->pfrke_mark) {
381 p->pfrke_mark = 1;
382 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
383 xdel++;
384 }
385 if (flags & PFR_FLAG_FEEDBACK)
386 if (COPYOUT(&ad, addr+i, sizeof(ad)))
387 senderr(EFAULT);
388 }
389 if (!(flags & PFR_FLAG_DUMMY)) {
390 if (flags & PFR_FLAG_ATOMIC)
391 s = splsoftnet();
392 pfr_remove_kentries(kt, &workq);
393 if (flags & PFR_FLAG_ATOMIC)
394 splx(s);
395 }
396 if (ndel != NULL)
397 *ndel = xdel;
398 return (0);
399 _bad:
400 if (flags & PFR_FLAG_FEEDBACK)
401 pfr_reset_feedback(addr, size, flags);
402 return (rv);
403 }
404
405 int
406 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
407 int *size2, int *nadd, int *ndel, int *nchange, int flags)
408 {
409 struct pfr_ktable *kt, *tmpkt;
410 struct pfr_kentryworkq addq, delq, changeq;
411 struct pfr_kentry *p, *q;
412 struct pfr_addr ad;
413 int i, rv, s, xadd = 0, xdel = 0, xchange = 0;
414 long tzero = time_second;
415
416 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
417 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
418 return (EINVAL);
419 kt = pfr_lookup_table(tbl);
420 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
421 return (ESRCH);
422 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
423 return (EPERM);
424 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
425 if (tmpkt == NULL)
426 return (ENOMEM);
427 pfr_mark_addrs(kt);
428 SLIST_INIT(&addq);
429 SLIST_INIT(&delq);
430 SLIST_INIT(&changeq);
431 for (i = 0; i < size; i++) {
432 if (COPYIN(addr+i, &ad, sizeof(ad)))
433 senderr(EFAULT);
434 if (pfr_validate_addr(&ad))
435 senderr(EINVAL);
436 ad.pfra_fback = PFR_FB_NONE;
437 p = pfr_lookup_addr(kt, &ad, 1);
438 if (p != NULL) {
439 if (p->pfrke_mark) {
440 ad.pfra_fback = PFR_FB_DUPLICATE;
441 goto _skip;
442 }
443 p->pfrke_mark = 1;
444 if (p->pfrke_not != ad.pfra_not) {
445 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
446 ad.pfra_fback = PFR_FB_CHANGED;
447 xchange++;
448 }
449 } else {
450 q = pfr_lookup_addr(tmpkt, &ad, 1);
451 if (q != NULL) {
452 ad.pfra_fback = PFR_FB_DUPLICATE;
453 goto _skip;
454 }
455 p = pfr_create_kentry(&ad, 0);
456 if (p == NULL)
457 senderr(ENOMEM);
458 if (pfr_route_kentry(tmpkt, p)) {
459 pfr_destroy_kentry(p);
460 ad.pfra_fback = PFR_FB_NONE;
461 } else {
462 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
463 ad.pfra_fback = PFR_FB_ADDED;
464 xadd++;
465 }
466 }
467 _skip:
468 if (flags & PFR_FLAG_FEEDBACK)
469 if (COPYOUT(&ad, addr+i, sizeof(ad)))
470 senderr(EFAULT);
471 }
472 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
473 if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
474 if (*size2 < size+xdel) {
475 *size2 = size+xdel;
476 senderr(0);
477 }
478 i = 0;
479 SLIST_FOREACH(p, &delq, pfrke_workq) {
480 pfr_copyout_addr(&ad, p);
481 ad.pfra_fback = PFR_FB_DELETED;
482 if (COPYOUT(&ad, addr+size+i, sizeof(ad)))
483 senderr(EFAULT);
484 i++;
485 }
486 }
487 pfr_clean_node_mask(tmpkt, &addq);
488 if (!(flags & PFR_FLAG_DUMMY)) {
489 if (flags & PFR_FLAG_ATOMIC)
490 s = splsoftnet();
491 pfr_insert_kentries(kt, &addq, tzero);
492 pfr_remove_kentries(kt, &delq);
493 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
494 if (flags & PFR_FLAG_ATOMIC)
495 splx(s);
496 } else
497 pfr_destroy_kentries(&addq);
498 if (nadd != NULL)
499 *nadd = xadd;
500 if (ndel != NULL)
501 *ndel = xdel;
502 if (nchange != NULL)
503 *nchange = xchange;
504 if ((flags & PFR_FLAG_FEEDBACK) && size2)
505 *size2 = size+xdel;
506 pfr_destroy_ktable(tmpkt, 0);
507 return (0);
508 _bad:
509 pfr_clean_node_mask(tmpkt, &addq);
510 pfr_destroy_kentries(&addq);
511 if (flags & PFR_FLAG_FEEDBACK)
512 pfr_reset_feedback(addr, size, flags);
513 pfr_destroy_ktable(tmpkt, 0);
514 return (rv);
515 }
516
517 int
518 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
519 int *nmatch, int flags)
520 {
521 struct pfr_ktable *kt;
522 struct pfr_kentry *p;
523 struct pfr_addr ad;
524 int i, xmatch = 0;
525
526 ACCEPT_FLAGS(PFR_FLAG_REPLACE);
527 if (pfr_validate_table(tbl, 0, 0))
528 return (EINVAL);
529 kt = pfr_lookup_table(tbl);
530 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
531 return (ESRCH);
532
533 for (i = 0; i < size; i++) {
534 if (COPYIN(addr+i, &ad, sizeof(ad)))
535 return (EFAULT);
536 if (pfr_validate_addr(&ad))
537 return (EINVAL);
538 if (ADDR_NETWORK(&ad))
539 return (EINVAL);
540 p = pfr_lookup_addr(kt, &ad, 0);
541 if (flags & PFR_FLAG_REPLACE)
542 pfr_copyout_addr(&ad, p);
543 ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
544 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
545 if (p != NULL && !p->pfrke_not)
546 xmatch++;
547 if (COPYOUT(&ad, addr+i, sizeof(ad)))
548 return (EFAULT);
549 }
550 if (nmatch != NULL)
551 *nmatch = xmatch;
552 return (0);
553 }
554
555 int
556 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
557 int flags)
558 {
559 struct pfr_ktable *kt;
560 struct pfr_walktree w;
561 int rv;
562
563 ACCEPT_FLAGS(0);
564 if (pfr_validate_table(tbl, 0, 0))
565 return (EINVAL);
566 kt = pfr_lookup_table(tbl);
567 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
568 return (ESRCH);
569 if (kt->pfrkt_cnt > *size) {
570 *size = kt->pfrkt_cnt;
571 return (0);
572 }
573
574 bzero(&w, sizeof(w));
575 w.pfrw_op = PFRW_GET_ADDRS;
576 w.pfrw_addr = addr;
577 w.pfrw_free = kt->pfrkt_cnt;
578 w.pfrw_flags = flags;
579 rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
580 if (!rv)
581 rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
582 if (rv)
583 return (rv);
584
585 if (w.pfrw_free) {
586 printf("pfr_get_addrs: corruption detected (%d).\n",
587 w.pfrw_free);
588 return (ENOTTY);
589 }
590 *size = kt->pfrkt_cnt;
591 return (0);
592 }
593
594 int
595 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
596 int flags)
597 {
598 struct pfr_ktable *kt;
599 struct pfr_walktree w;
600 struct pfr_kentryworkq workq;
601 int rv, s;
602 long tzero = time_second;
603
604 ACCEPT_FLAGS(PFR_FLAG_ATOMIC); /* XXX PFR_FLAG_CLSTATS disabled */
605 if (pfr_validate_table(tbl, 0, 0))
606 return (EINVAL);
607 kt = pfr_lookup_table(tbl);
608 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
609 return (ESRCH);
610 if (kt->pfrkt_cnt > *size) {
611 *size = kt->pfrkt_cnt;
612 return (0);
613 }
614
615 bzero(&w, sizeof(w));
616 w.pfrw_op = PFRW_GET_ASTATS;
617 w.pfrw_astats = addr;
618 w.pfrw_free = kt->pfrkt_cnt;
619 w.pfrw_flags = flags;
620 if (flags & PFR_FLAG_ATOMIC)
621 s = splsoftnet();
622 rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
623 if (!rv)
624 rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
625 if (!rv && (flags & PFR_FLAG_CLSTATS)) {
626 pfr_enqueue_addrs(kt, &workq, NULL, 0);
627 pfr_clstats_kentries(&workq, tzero, 0);
628 }
629 if (flags & PFR_FLAG_ATOMIC)
630 splx(s);
631 if (rv)
632 return (rv);
633
634 if (w.pfrw_free) {
635 printf("pfr_get_astats: corruption detected (%d).\n",
636 w.pfrw_free);
637 return (ENOTTY);
638 }
639 *size = kt->pfrkt_cnt;
640 return (0);
641 }
642
643 int
644 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
645 int *nzero, int flags)
646 {
647 struct pfr_ktable *kt;
648 struct pfr_kentryworkq workq;
649 struct pfr_kentry *p;
650 struct pfr_addr ad;
651 int i, rv, s, xzero = 0;
652
653 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
654 if (pfr_validate_table(tbl, 0, 0))
655 return (EINVAL);
656 kt = pfr_lookup_table(tbl);
657 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
658 return (ESRCH);
659 SLIST_INIT(&workq);
660 for (i = 0; i < size; i++) {
661 if (COPYIN(addr+i, &ad, sizeof(ad)))
662 senderr(EFAULT);
663 if (pfr_validate_addr(&ad))
664 senderr(EINVAL);
665 p = pfr_lookup_addr(kt, &ad, 1);
666 if (flags & PFR_FLAG_FEEDBACK) {
667 ad.pfra_fback = (p != NULL) ?
668 PFR_FB_CLEARED : PFR_FB_NONE;
669 if (COPYOUT(&ad, addr+i, sizeof(ad)))
670 senderr(EFAULT);
671 }
672 if (p != NULL) {
673 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
674 xzero++;
675 }
676 }
677
678 if (!(flags & PFR_FLAG_DUMMY)) {
679 if (flags & PFR_FLAG_ATOMIC)
680 s = splsoftnet();
681 pfr_clstats_kentries(&workq, 0, 0);
682 if (flags & PFR_FLAG_ATOMIC)
683 splx(s);
684 }
685 if (nzero != NULL)
686 *nzero = xzero;
687 return (0);
688 _bad:
689 if (flags & PFR_FLAG_FEEDBACK)
690 pfr_reset_feedback(addr, size, flags);
691 return (rv);
692 }
693
694 int
695 pfr_validate_addr(struct pfr_addr *ad)
696 {
697 int i;
698
699 switch (ad->pfra_af) {
700 #ifdef INET
701 case AF_INET:
702 if (ad->pfra_net > 32)
703 return (-1);
704 break;
705 #endif /* INET */
706 #ifdef INET6
707 case AF_INET6:
708 if (ad->pfra_net > 128)
709 return (-1);
710 break;
711 #endif /* INET6 */
712 default:
713 return (-1);
714 }
715 if (ad->pfra_net < 128 &&
716 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
717 return (-1);
718 for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
719 if (((caddr_t)ad)[i])
720 return (-1);
721 if (ad->pfra_not && ad->pfra_not != 1)
722 return (-1);
723 if (ad->pfra_fback)
724 return (-1);
725 return (0);
726 }
727
728 void
729 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
730 int *naddr, int sweep)
731 {
732 struct pfr_walktree w;
733
734 SLIST_INIT(workq);
735 bzero(&w, sizeof(w));
736 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
737 w.pfrw_workq = workq;
738 if (kt->pfrkt_ip4 != NULL)
739 if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
740 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
741 if (kt->pfrkt_ip6 != NULL)
742 if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
743 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
744 if (naddr != NULL)
745 *naddr = w.pfrw_cnt;
746 }
747
748 void
749 pfr_mark_addrs(struct pfr_ktable *kt)
750 {
751 struct pfr_walktree w;
752
753 bzero(&w, sizeof(w));
754 w.pfrw_op = PFRW_MARK;
755 if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
756 printf("pfr_mark_addrs: IPv4 walktree failed.\n");
757 if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
758 printf("pfr_mark_addrs: IPv6 walktree failed.\n");
759 }
760
761
762 struct pfr_kentry *
763 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
764 {
765 union sockaddr_union sa, mask;
766 struct radix_node_head *head;
767 struct pfr_kentry *ke;
768 int s;
769
770 bzero(&sa, sizeof(sa));
771 if (ad->pfra_af == AF_INET) {
772 FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
773 head = kt->pfrkt_ip4;
774 } else if ( ad->pfra_af == AF_INET6 ) {
775 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
776 head = kt->pfrkt_ip6;
777 }
778 if (ADDR_NETWORK(ad)) {
779 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
780 s = splsoftnet(); /* rn_lookup makes use of globals */
781 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
782 splx(s);
783 if (ke && KENTRY_RNF_ROOT(ke))
784 ke = NULL;
785 } else {
786 ke = (struct pfr_kentry *)rn_match(&sa, head);
787 if (ke && KENTRY_RNF_ROOT(ke))
788 ke = NULL;
789 if (exact && ke && KENTRY_NETWORK(ke))
790 ke = NULL;
791 }
792 return (ke);
793 }
794
795 struct pfr_kentry *
796 pfr_create_kentry(struct pfr_addr *ad, int intr)
797 {
798 struct pfr_kentry *ke;
799
800 if (intr)
801 ke = pool_get(&pfr_kentry_pl2, PR_NOWAIT);
802 else
803 ke = pool_get(&pfr_kentry_pl, PR_NOWAIT);
804 if (ke == NULL)
805 return (NULL);
806 bzero(ke, sizeof(*ke));
807
808 if (ad->pfra_af == AF_INET)
809 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
810 else if (ad->pfra_af == AF_INET6)
811 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
812 ke->pfrke_af = ad->pfra_af;
813 ke->pfrke_net = ad->pfra_net;
814 ke->pfrke_not = ad->pfra_not;
815 ke->pfrke_intrpool = intr;
816 return (ke);
817 }
818
819 void
820 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
821 {
822 struct pfr_kentry *p, *q;
823
824 for (p = SLIST_FIRST(workq); p != NULL; p = q) {
825 q = SLIST_NEXT(p, pfrke_workq);
826 pfr_destroy_kentry(p);
827 }
828 }
829
830 void
831 pfr_destroy_kentry(struct pfr_kentry *ke)
832 {
833 if (ke->pfrke_intrpool)
834 pool_put(&pfr_kentry_pl2, ke);
835 else
836 pool_put(&pfr_kentry_pl, ke);
837 }
838
839 void
840 pfr_insert_kentries(struct pfr_ktable *kt,
841 struct pfr_kentryworkq *workq, long tzero)
842 {
843 struct pfr_kentry *p;
844 int rv, n = 0;
845
846 SLIST_FOREACH(p, workq, pfrke_workq) {
847 rv = pfr_route_kentry(kt, p);
848 if (rv) {
849 printf("pfr_insert_kentries: cannot route entry "
850 "(code=%d).\n", rv);
851 break;
852 }
853 p->pfrke_tzero = tzero;
854 n++;
855 }
856 kt->pfrkt_cnt += n;
857 }
858
859 int
860 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero)
861 {
862 struct pfr_kentry *p;
863 int rv;
864
865 p = pfr_lookup_addr(kt, ad, 1);
866 if (p != NULL)
867 return (0);
868 p = pfr_create_kentry(ad, 1);
869 if (p == NULL)
870 return (EINVAL);
871
872 rv = pfr_route_kentry(kt, p);
873 if (rv)
874 return (rv);
875
876 p->pfrke_tzero = tzero;
877 kt->pfrkt_cnt++;
878
879 return (0);
880 }
881
882 void
883 pfr_remove_kentries(struct pfr_ktable *kt,
884 struct pfr_kentryworkq *workq)
885 {
886 struct pfr_kentry *p;
887 int n = 0;
888
889 SLIST_FOREACH(p, workq, pfrke_workq) {
890 pfr_unroute_kentry(kt, p);
891 n++;
892 }
893 kt->pfrkt_cnt -= n;
894 pfr_destroy_kentries(workq);
895 }
896
897 void
898 pfr_clean_node_mask(struct pfr_ktable *kt,
899 struct pfr_kentryworkq *workq)
900 {
901 struct pfr_kentry *p;
902
903 SLIST_FOREACH(p, workq, pfrke_workq)
904 pfr_unroute_kentry(kt, p);
905 }
906
907 void
908 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
909 {
910 struct pfr_kentry *p;
911 int s;
912
913 SLIST_FOREACH(p, workq, pfrke_workq) {
914 s = splsoftnet();
915 if (negchange)
916 p->pfrke_not = !p->pfrke_not;
917 bzero(p->pfrke_packets, sizeof(p->pfrke_packets));
918 bzero(p->pfrke_bytes, sizeof(p->pfrke_bytes));
919 splx(s);
920 p->pfrke_tzero = tzero;
921 }
922 }
923
924 void
925 pfr_reset_feedback(struct pfr_addr *addr, int size, int flags)
926 {
927 struct pfr_addr ad;
928 int i;
929
930 for (i = 0; i < size; i++) {
931 if (COPYIN(addr+i, &ad, sizeof(ad)))
932 break;
933 ad.pfra_fback = PFR_FB_NONE;
934 if (COPYOUT(&ad, addr+i, sizeof(ad)))
935 break;
936 }
937 }
938
939 void
940 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
941 {
942 int i;
943
944 bzero(sa, sizeof(*sa));
945 if (af == AF_INET) {
946 sa->sin.sin_len = sizeof(sa->sin);
947 sa->sin.sin_family = AF_INET;
948 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
949 } else if (af == AF_INET6) {
950 sa->sin6.sin6_len = sizeof(sa->sin6);
951 sa->sin6.sin6_family = AF_INET6;
952 for (i = 0; i < 4; i++) {
953 if (net <= 32) {
954 sa->sin6.sin6_addr.s6_addr32[i] =
955 net ? htonl(-1 << (32-net)) : 0;
956 break;
957 }
958 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
959 net -= 32;
960 }
961 }
962 }
963
964 int
965 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
966 {
967 union sockaddr_union mask;
968 struct radix_node *rn;
969 struct radix_node_head *head;
970 int s;
971
972 bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
973 if (ke->pfrke_af == AF_INET)
974 head = kt->pfrkt_ip4;
975 else if (ke->pfrke_af == AF_INET6)
976 head = kt->pfrkt_ip6;
977
978 s = splsoftnet();
979 if (KENTRY_NETWORK(ke)) {
980 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
981 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
982 } else
983 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
984 splx(s);
985
986 return (rn == NULL ? -1 : 0);
987 }
988
989 int
990 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
991 {
992 union sockaddr_union mask;
993 struct radix_node *rn;
994 struct radix_node_head *head;
995 int s;
996
997 if (ke->pfrke_af == AF_INET)
998 head = kt->pfrkt_ip4;
999 else if (ke->pfrke_af == AF_INET6)
1000 head = kt->pfrkt_ip6;
1001
1002 s = splsoftnet();
1003 if (KENTRY_NETWORK(ke)) {
1004 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1005 rn = rn_delete(&ke->pfrke_sa, &mask, head, NULL);
1006 } else
1007 rn = rn_delete(&ke->pfrke_sa, NULL, head, NULL);
1008 splx(s);
1009
1010 if (rn == NULL) {
1011 printf("pfr_unroute_kentry: delete failed.\n");
1012 return (-1);
1013 }
1014 return (0);
1015 }
1016
1017 void
1018 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
1019 {
1020 bzero(ad, sizeof(*ad));
1021 if (ke == NULL)
1022 return;
1023 ad->pfra_af = ke->pfrke_af;
1024 ad->pfra_net = ke->pfrke_net;
1025 ad->pfra_not = ke->pfrke_not;
1026 if (ad->pfra_af == AF_INET)
1027 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1028 else if (ad->pfra_af == AF_INET6)
1029 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1030 }
1031
1032 int
1033 pfr_walktree(struct radix_node *rn, void *arg)
1034 {
1035 struct pfr_kentry *ke = (struct pfr_kentry *)rn;
1036 struct pfr_walktree *w = arg;
1037 int s, flags = w->pfrw_flags;
1038
1039 switch (w->pfrw_op) {
1040 case PFRW_MARK:
1041 ke->pfrke_mark = 0;
1042 break;
1043 case PFRW_SWEEP:
1044 if (ke->pfrke_mark)
1045 break;
1046 /* FALLTHROUGH */
1047 case PFRW_ENQUEUE:
1048 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1049 w->pfrw_cnt++;
1050 break;
1051 case PFRW_GET_ADDRS:
1052 if (w->pfrw_free-- > 0) {
1053 struct pfr_addr ad;
1054
1055 pfr_copyout_addr(&ad, ke);
1056 if (copyout(&ad, w->pfrw_addr, sizeof(ad)))
1057 return (EFAULT);
1058 w->pfrw_addr++;
1059 }
1060 break;
1061 case PFRW_GET_ASTATS:
1062 if (w->pfrw_free-- > 0) {
1063 struct pfr_astats as;
1064
1065 pfr_copyout_addr(&as.pfras_a, ke);
1066
1067 s = splsoftnet();
1068 bcopy(ke->pfrke_packets, as.pfras_packets,
1069 sizeof(as.pfras_packets));
1070 bcopy(ke->pfrke_bytes, as.pfras_bytes,
1071 sizeof(as.pfras_bytes));
1072 splx(s);
1073 as.pfras_tzero = ke->pfrke_tzero;
1074
1075 if (COPYOUT(&as, w->pfrw_astats, sizeof(as)))
1076 return (EFAULT);
1077 w->pfrw_astats++;
1078 }
1079 break;
1080 case PFRW_POOL_GET:
1081 if (ke->pfrke_not)
1082 break; /* negative entries are ignored */
1083 if (!w->pfrw_cnt--) {
1084 w->pfrw_kentry = ke;
1085 return (1); /* finish search */
1086 }
1087 break;
1088 case PFRW_DYNADDR_UPDATE:
1089 if (ke->pfrke_af == AF_INET) {
1090 if (w->pfrw_dyn->pfid_acnt4++ > 0)
1091 break;
1092 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1093 w->pfrw_dyn->pfid_addr4 = *SUNION2PF(
1094 &ke->pfrke_sa, AF_INET);
1095 w->pfrw_dyn->pfid_mask4 = *SUNION2PF(
1096 &pfr_mask, AF_INET);
1097 } else if (ke->pfrke_af == AF_INET6){
1098 if (w->pfrw_dyn->pfid_acnt6++ > 0)
1099 break;
1100 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1101 w->pfrw_dyn->pfid_addr6 = *SUNION2PF(
1102 &ke->pfrke_sa, AF_INET6);
1103 w->pfrw_dyn->pfid_mask6 = *SUNION2PF(
1104 &pfr_mask, AF_INET6);
1105 }
1106 break;
1107 }
1108 return (0);
1109 }
1110
1111 int
1112 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1113 {
1114 struct pfr_ktableworkq workq;
1115 struct pfr_ktable *p;
1116 int s, xdel = 0;
1117
1118 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ALLRSETS);
1119 if (pfr_fix_anchor(filter->pfrt_anchor))
1120 return (EINVAL);
1121 if (pfr_table_count(filter, flags) < 0)
1122 return (ENOENT);
1123
1124 SLIST_INIT(&workq);
1125 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1126 if (pfr_skip_table(filter, p, flags))
1127 continue;
1128 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1129 continue;
1130 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1131 continue;
1132 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1133 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1134 xdel++;
1135 }
1136 if (!(flags & PFR_FLAG_DUMMY)) {
1137 if (flags & PFR_FLAG_ATOMIC)
1138 s = splsoftnet();
1139 pfr_setflags_ktables(&workq);
1140 if (flags & PFR_FLAG_ATOMIC)
1141 splx(s);
1142 }
1143 if (ndel != NULL)
1144 *ndel = xdel;
1145 return (0);
1146 }
1147
1148 int
1149 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1150 {
1151 struct pfr_ktableworkq addq, changeq;
1152 struct pfr_ktable *p, *q, *r, key;
1153 int i, rv, s, xadd = 0;
1154 long tzero = time_second;
1155
1156 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1157 SLIST_INIT(&addq);
1158 SLIST_INIT(&changeq);
1159 for (i = 0; i < size; i++) {
1160 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1161 senderr(EFAULT);
1162 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1163 flags & PFR_FLAG_USERIOCTL))
1164 senderr(EINVAL);
1165 key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1166 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1167 if (p == NULL) {
1168 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1169 if (p == NULL)
1170 senderr(ENOMEM);
1171 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1172 if (!pfr_ktable_compare(p, q))
1173 goto _skip;
1174 }
1175 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1176 xadd++;
1177 if (!key.pfrkt_anchor[0])
1178 goto _skip;
1179
1180 /* find or create root table */
1181 bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1182 r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1183 if (r != NULL) {
1184 p->pfrkt_root = r;
1185 goto _skip;
1186 }
1187 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1188 if (!pfr_ktable_compare(&key, q)) {
1189 p->pfrkt_root = q;
1190 goto _skip;
1191 }
1192 }
1193 key.pfrkt_flags = 0;
1194 r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1195 if (r == NULL)
1196 senderr(ENOMEM);
1197 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1198 p->pfrkt_root = r;
1199 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1200 SLIST_FOREACH(q, &changeq, pfrkt_workq)
1201 if (!pfr_ktable_compare(&key, q))
1202 goto _skip;
1203 p->pfrkt_nflags = (p->pfrkt_flags &
1204 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1205 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1206 xadd++;
1207 }
1208 _skip:
1209 ;
1210 }
1211 if (!(flags & PFR_FLAG_DUMMY)) {
1212 if (flags & PFR_FLAG_ATOMIC)
1213 s = splsoftnet();
1214 pfr_insert_ktables(&addq);
1215 pfr_setflags_ktables(&changeq);
1216 if (flags & PFR_FLAG_ATOMIC)
1217 splx(s);
1218 } else
1219 pfr_destroy_ktables(&addq, 0);
1220 if (nadd != NULL)
1221 *nadd = xadd;
1222 return (0);
1223 _bad:
1224 pfr_destroy_ktables(&addq, 0);
1225 return (rv);
1226 }
1227
1228 int
1229 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1230 {
1231 struct pfr_ktableworkq workq;
1232 struct pfr_ktable *p, *q, key;
1233 int i, s, xdel = 0;
1234
1235 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1236 SLIST_INIT(&workq);
1237 for (i = 0; i < size; i++) {
1238 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1239 return (EFAULT);
1240 if (pfr_validate_table(&key.pfrkt_t, 0,
1241 flags & PFR_FLAG_USERIOCTL))
1242 return (EINVAL);
1243 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1244 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1245 SLIST_FOREACH(q, &workq, pfrkt_workq)
1246 if (!pfr_ktable_compare(p, q))
1247 goto _skip;
1248 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1249 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1250 xdel++;
1251 }
1252 _skip:
1253 ;
1254 }
1255
1256 if (!(flags & PFR_FLAG_DUMMY)) {
1257 if (flags & PFR_FLAG_ATOMIC)
1258 s = splsoftnet();
1259 pfr_setflags_ktables(&workq);
1260 if (flags & PFR_FLAG_ATOMIC)
1261 splx(s);
1262 }
1263 if (ndel != NULL)
1264 *ndel = xdel;
1265 return (0);
1266 }
1267
1268 int
1269 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1270 int flags)
1271 {
1272 struct pfr_ktable *p;
1273 int n, nn;
1274
1275 ACCEPT_FLAGS(PFR_FLAG_ALLRSETS);
1276 if (pfr_fix_anchor(filter->pfrt_anchor))
1277 return (EINVAL);
1278 n = nn = pfr_table_count(filter, flags);
1279 if (n < 0)
1280 return (ENOENT);
1281 if (n > *size) {
1282 *size = n;
1283 return (0);
1284 }
1285 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1286 if (pfr_skip_table(filter, p, flags))
1287 continue;
1288 if (n-- <= 0)
1289 continue;
1290 if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl)))
1291 return (EFAULT);
1292 }
1293 if (n) {
1294 printf("pfr_get_tables: corruption detected (%d).\n", n);
1295 return (ENOTTY);
1296 }
1297 *size = nn;
1298 return (0);
1299 }
1300
1301 int
1302 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1303 int flags)
1304 {
1305 struct pfr_ktable *p;
1306 struct pfr_ktableworkq workq;
1307 int s, n, nn;
1308 long tzero = time_second;
1309
1310 ACCEPT_FLAGS(PFR_FLAG_ATOMIC|PFR_FLAG_ALLRSETS);
1311 /* XXX PFR_FLAG_CLSTATS disabled */
1312 if (pfr_fix_anchor(filter->pfrt_anchor))
1313 return (EINVAL);
1314 n = nn = pfr_table_count(filter, flags);
1315 if (n < 0)
1316 return (ENOENT);
1317 if (n > *size) {
1318 *size = n;
1319 return (0);
1320 }
1321 SLIST_INIT(&workq);
1322 if (flags & PFR_FLAG_ATOMIC)
1323 s = splsoftnet();
1324 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1325 if (pfr_skip_table(filter, p, flags))
1326 continue;
1327 if (n-- <= 0)
1328 continue;
1329 if (!(flags & PFR_FLAG_ATOMIC))
1330 s = splsoftnet();
1331 if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl))) {
1332 splx(s);
1333 return (EFAULT);
1334 }
1335 if (!(flags & PFR_FLAG_ATOMIC))
1336 splx(s);
1337 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1338 }
1339 if (flags & PFR_FLAG_CLSTATS)
1340 pfr_clstats_ktables(&workq, tzero,
1341 flags & PFR_FLAG_ADDRSTOO);
1342 if (flags & PFR_FLAG_ATOMIC)
1343 splx(s);
1344 if (n) {
1345 printf("pfr_get_tstats: corruption detected (%d).\n", n);
1346 return (ENOTTY);
1347 }
1348 *size = nn;
1349 return (0);
1350 }
1351
1352 int
1353 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1354 {
1355 struct pfr_ktableworkq workq;
1356 struct pfr_ktable *p, key;
1357 int i, s, xzero = 0;
1358 long tzero = time_second;
1359
1360 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ADDRSTOO);
1361 SLIST_INIT(&workq);
1362 for (i = 0; i < size; i++) {
1363 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1364 return (EFAULT);
1365 if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1366 return (EINVAL);
1367 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1368 if (p != NULL) {
1369 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1370 xzero++;
1371 }
1372 }
1373 if (!(flags & PFR_FLAG_DUMMY)) {
1374 if (flags & PFR_FLAG_ATOMIC)
1375 s = splsoftnet();
1376 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1377 if (flags & PFR_FLAG_ATOMIC)
1378 splx(s);
1379 }
1380 if (nzero != NULL)
1381 *nzero = xzero;
1382 return (0);
1383 }
1384
1385 int
1386 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1387 int *nchange, int *ndel, int flags)
1388 {
1389 struct pfr_ktableworkq workq;
1390 struct pfr_ktable *p, *q, key;
1391 int i, s, xchange = 0, xdel = 0;
1392
1393 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1394 if ((setflag & ~PFR_TFLAG_USRMASK) ||
1395 (clrflag & ~PFR_TFLAG_USRMASK) ||
1396 (setflag & clrflag))
1397 return (EINVAL);
1398 SLIST_INIT(&workq);
1399 for (i = 0; i < size; i++) {
1400 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1401 return (EFAULT);
1402 if (pfr_validate_table(&key.pfrkt_t, 0,
1403 flags & PFR_FLAG_USERIOCTL))
1404 return (EINVAL);
1405 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1406 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1407 p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1408 ~clrflag;
1409 if (p->pfrkt_nflags == p->pfrkt_flags)
1410 goto _skip;
1411 SLIST_FOREACH(q, &workq, pfrkt_workq)
1412 if (!pfr_ktable_compare(p, q))
1413 goto _skip;
1414 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1415 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1416 (clrflag & PFR_TFLAG_PERSIST) &&
1417 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1418 xdel++;
1419 else
1420 xchange++;
1421 }
1422 _skip:
1423 ;
1424 }
1425 if (!(flags & PFR_FLAG_DUMMY)) {
1426 if (flags & PFR_FLAG_ATOMIC)
1427 s = splsoftnet();
1428 pfr_setflags_ktables(&workq);
1429 if (flags & PFR_FLAG_ATOMIC)
1430 splx(s);
1431 }
1432 if (nchange != NULL)
1433 *nchange = xchange;
1434 if (ndel != NULL)
1435 *ndel = xdel;
1436 return (0);
1437 }
1438
1439 int
1440 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1441 {
1442 struct pfr_ktableworkq workq;
1443 struct pfr_ktable *p;
1444 struct pf_ruleset *rs;
1445 int xdel = 0;
1446
1447 ACCEPT_FLAGS(PFR_FLAG_DUMMY);
1448 rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
1449 if (rs == NULL)
1450 return (ENOMEM);
1451 SLIST_INIT(&workq);
1452 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1453 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1454 pfr_skip_table(trs, p, 0))
1455 continue;
1456 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1457 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1458 xdel++;
1459 }
1460 if (!(flags & PFR_FLAG_DUMMY)) {
1461 pfr_setflags_ktables(&workq);
1462 if (ticket != NULL)
1463 *ticket = ++rs->tticket;
1464 rs->topen = 1;
1465 } else
1466 pf_remove_if_empty_ruleset(rs);
1467 if (ndel != NULL)
1468 *ndel = xdel;
1469 return (0);
1470 }
1471
1472 int
1473 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1474 int *nadd, int *naddr, u_int32_t ticket, int flags)
1475 {
1476 struct pfr_ktableworkq tableq;
1477 struct pfr_kentryworkq addrq;
1478 struct pfr_ktable *kt, *rt, *shadow, key;
1479 struct pfr_kentry *p;
1480 struct pfr_addr ad;
1481 struct pf_ruleset *rs;
1482 int i, rv, xadd = 0, xaddr = 0;
1483
1484 ACCEPT_FLAGS(PFR_FLAG_DUMMY|PFR_FLAG_ADDRSTOO);
1485 if (size && !(flags & PFR_FLAG_ADDRSTOO))
1486 return (EINVAL);
1487 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1488 flags & PFR_FLAG_USERIOCTL))
1489 return (EINVAL);
1490 rs = pf_find_ruleset(tbl->pfrt_anchor);
1491 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1492 return (EBUSY);
1493 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1494 SLIST_INIT(&tableq);
1495 kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1496 if (kt == NULL) {
1497 kt = pfr_create_ktable(tbl, 0, 1);
1498 if (kt == NULL)
1499 return (ENOMEM);
1500 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1501 xadd++;
1502 if (!tbl->pfrt_anchor[0])
1503 goto _skip;
1504
1505 /* find or create root table */
1506 bzero(&key, sizeof(key));
1507 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1508 rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1509 if (rt != NULL) {
1510 kt->pfrkt_root = rt;
1511 goto _skip;
1512 }
1513 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1514 if (rt == NULL) {
1515 pfr_destroy_ktables(&tableq, 0);
1516 return (ENOMEM);
1517 }
1518 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1519 kt->pfrkt_root = rt;
1520 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1521 xadd++;
1522 _skip:
1523 shadow = pfr_create_ktable(tbl, 0, 0);
1524 if (shadow == NULL) {
1525 pfr_destroy_ktables(&tableq, 0);
1526 return (ENOMEM);
1527 }
1528 SLIST_INIT(&addrq);
1529 for (i = 0; i < size; i++) {
1530 if (COPYIN(addr+i, &ad, sizeof(ad)))
1531 senderr(EFAULT);
1532 if (pfr_validate_addr(&ad))
1533 senderr(EINVAL);
1534 if (pfr_lookup_addr(shadow, &ad, 1) != NULL)
1535 continue;
1536 p = pfr_create_kentry(&ad, 0);
1537 if (p == NULL)
1538 senderr(ENOMEM);
1539 if (pfr_route_kentry(shadow, p)) {
1540 pfr_destroy_kentry(p);
1541 continue;
1542 }
1543 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1544 xaddr++;
1545 }
1546 if (!(flags & PFR_FLAG_DUMMY)) {
1547 if (kt->pfrkt_shadow != NULL)
1548 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1549 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1550 pfr_insert_ktables(&tableq);
1551 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1552 xaddr : NO_ADDRESSES;
1553 kt->pfrkt_shadow = shadow;
1554 } else {
1555 pfr_clean_node_mask(shadow, &addrq);
1556 pfr_destroy_ktable(shadow, 0);
1557 pfr_destroy_ktables(&tableq, 0);
1558 pfr_destroy_kentries(&addrq);
1559 }
1560 if (nadd != NULL)
1561 *nadd = xadd;
1562 if (naddr != NULL)
1563 *naddr = xaddr;
1564 return (0);
1565 _bad:
1566 pfr_destroy_ktable(shadow, 0);
1567 pfr_destroy_ktables(&tableq, 0);
1568 pfr_destroy_kentries(&addrq);
1569 return (rv);
1570 }
1571
1572 int
1573 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1574 {
1575 struct pfr_ktableworkq workq;
1576 struct pfr_ktable *p;
1577 struct pf_ruleset *rs;
1578 int xdel = 0;
1579
1580 ACCEPT_FLAGS(PFR_FLAG_DUMMY);
1581 rs = pf_find_ruleset(trs->pfrt_anchor);
1582 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1583 return (0);
1584 SLIST_INIT(&workq);
1585 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1586 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1587 pfr_skip_table(trs, p, 0))
1588 continue;
1589 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1590 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1591 xdel++;
1592 }
1593 if (!(flags & PFR_FLAG_DUMMY)) {
1594 pfr_setflags_ktables(&workq);
1595 rs->topen = 0;
1596 pf_remove_if_empty_ruleset(rs);
1597 }
1598 if (ndel != NULL)
1599 *ndel = xdel;
1600 return (0);
1601 }
1602
1603 int
1604 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1605 int *nchange, int flags)
1606 {
1607 struct pfr_ktable *p, *q;
1608 struct pfr_ktableworkq workq;
1609 struct pf_ruleset *rs;
1610 int s, xadd = 0, xchange = 0;
1611 long tzero = time_second;
1612
1613 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1614 rs = pf_find_ruleset(trs->pfrt_anchor);
1615 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1616 return (EBUSY);
1617
1618 SLIST_INIT(&workq);
1619 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1620 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1621 pfr_skip_table(trs, p, 0))
1622 continue;
1623 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1624 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1625 xchange++;
1626 else
1627 xadd++;
1628 }
1629
1630 if (!(flags & PFR_FLAG_DUMMY)) {
1631 if (flags & PFR_FLAG_ATOMIC)
1632 s = splsoftnet();
1633 for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1634 q = SLIST_NEXT(p, pfrkt_workq);
1635 pfr_commit_ktable(p, tzero);
1636 }
1637 if (flags & PFR_FLAG_ATOMIC)
1638 splx(s);
1639 rs->topen = 0;
1640 pf_remove_if_empty_ruleset(rs);
1641 }
1642 if (nadd != NULL)
1643 *nadd = xadd;
1644 if (nchange != NULL)
1645 *nchange = xchange;
1646
1647 return (0);
1648 }
1649
1650 void
1651 pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1652 {
1653 struct pfr_ktable *shadow = kt->pfrkt_shadow;
1654 int nflags;
1655
1656 if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1657 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1658 pfr_clstats_ktable(kt, tzero, 1);
1659 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1660 /* kt might contain addresses */
1661 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq;
1662 struct pfr_kentry *p, *q, *next;
1663 struct pfr_addr ad;
1664
1665 pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1666 pfr_mark_addrs(kt);
1667 SLIST_INIT(&addq);
1668 SLIST_INIT(&changeq);
1669 SLIST_INIT(&delq);
1670 SLIST_INIT(&garbageq);
1671 pfr_clean_node_mask(shadow, &addrq);
1672 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1673 next = SLIST_NEXT(p, pfrke_workq); /* XXX */
1674 pfr_copyout_addr(&ad, p);
1675 q = pfr_lookup_addr(kt, &ad, 1);
1676 if (q != NULL) {
1677 if (q->pfrke_not != p->pfrke_not)
1678 SLIST_INSERT_HEAD(&changeq, q,
1679 pfrke_workq);
1680 q->pfrke_mark = 1;
1681 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1682 } else {
1683 p->pfrke_tzero = tzero;
1684 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1685 }
1686 }
1687 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1688 pfr_insert_kentries(kt, &addq, tzero);
1689 pfr_remove_kentries(kt, &delq);
1690 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1691 pfr_destroy_kentries(&garbageq);
1692 } else {
1693 /* kt cannot contain addresses */
1694 SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1695 shadow->pfrkt_ip4);
1696 SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1697 shadow->pfrkt_ip6);
1698 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1699 pfr_clstats_ktable(kt, tzero, 1);
1700 }
1701 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1702 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1703 & ~PFR_TFLAG_INACTIVE;
1704 pfr_destroy_ktable(shadow, 0);
1705 kt->pfrkt_shadow = NULL;
1706 pfr_setflags_ktable(kt, nflags);
1707 }
1708
1709 int
1710 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1711 {
1712 int i;
1713
1714 if (!tbl->pfrt_name[0])
1715 return (-1);
1716 if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1717 return (-1);
1718 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1719 return (-1);
1720 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1721 if (tbl->pfrt_name[i])
1722 return (-1);
1723 if (pfr_fix_anchor(tbl->pfrt_anchor))
1724 return (-1);
1725 if (tbl->pfrt_flags & ~allowedflags)
1726 return (-1);
1727 return (0);
1728 }
1729
1730 /*
1731 * Rewrite anchors referenced by tables to remove slashes
1732 * and check for validity.
1733 */
1734 int
1735 pfr_fix_anchor(char *anchor)
1736 {
1737 size_t siz = MAXPATHLEN;
1738 int i;
1739
1740 if (anchor[0] == '/') {
1741 char *path;
1742 int off;
1743
1744 path = anchor;
1745 off = 1;
1746 while (*++path == '/')
1747 off++;
1748 bcopy(path, anchor, siz - off);
1749 memset(anchor + siz - off, 0, off);
1750 }
1751 if (anchor[siz - 1])
1752 return (-1);
1753 for (i = strlen(anchor); i < siz; i++)
1754 if (anchor[i])
1755 return (-1);
1756 return (0);
1757 }
1758
1759 int
1760 pfr_table_count(struct pfr_table *filter, int flags)
1761 {
1762 struct pf_ruleset *rs;
1763
1764 if (flags & PFR_FLAG_ALLRSETS)
1765 return (pfr_ktable_cnt);
1766 if (filter->pfrt_anchor[0]) {
1767 rs = pf_find_ruleset(filter->pfrt_anchor);
1768 return ((rs != NULL) ? rs->tables : -1);
1769 }
1770 return (pf_main_ruleset.tables);
1771 }
1772
1773 int
1774 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1775 {
1776 if (flags & PFR_FLAG_ALLRSETS)
1777 return (0);
1778 if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1779 return (1);
1780 return (0);
1781 }
1782
1783 void
1784 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1785 {
1786 struct pfr_ktable *p;
1787
1788 SLIST_FOREACH(p, workq, pfrkt_workq)
1789 pfr_insert_ktable(p);
1790 }
1791
1792 void
1793 pfr_insert_ktable(struct pfr_ktable *kt)
1794 {
1795 RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1796 pfr_ktable_cnt++;
1797 if (kt->pfrkt_root != NULL)
1798 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1799 pfr_setflags_ktable(kt->pfrkt_root,
1800 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1801 }
1802
1803 void
1804 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1805 {
1806 struct pfr_ktable *p, *q;
1807
1808 for (p = SLIST_FIRST(workq); p; p = q) {
1809 q = SLIST_NEXT(p, pfrkt_workq);
1810 pfr_setflags_ktable(p, p->pfrkt_nflags);
1811 }
1812 }
1813
1814 void
1815 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1816 {
1817 struct pfr_kentryworkq addrq;
1818
1819 if (!(newf & PFR_TFLAG_REFERENCED) &&
1820 !(newf & PFR_TFLAG_PERSIST))
1821 newf &= ~PFR_TFLAG_ACTIVE;
1822 if (!(newf & PFR_TFLAG_ACTIVE))
1823 newf &= ~PFR_TFLAG_USRMASK;
1824 if (!(newf & PFR_TFLAG_SETMASK)) {
1825 RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
1826 if (kt->pfrkt_root != NULL)
1827 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1828 pfr_setflags_ktable(kt->pfrkt_root,
1829 kt->pfrkt_root->pfrkt_flags &
1830 ~PFR_TFLAG_REFDANCHOR);
1831 pfr_destroy_ktable(kt, 1);
1832 pfr_ktable_cnt--;
1833 return;
1834 }
1835 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1836 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1837 pfr_remove_kentries(kt, &addrq);
1838 }
1839 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1840 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1841 kt->pfrkt_shadow = NULL;
1842 }
1843 kt->pfrkt_flags = newf;
1844 }
1845
1846 void
1847 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1848 {
1849 struct pfr_ktable *p;
1850
1851 SLIST_FOREACH(p, workq, pfrkt_workq)
1852 pfr_clstats_ktable(p, tzero, recurse);
1853 }
1854
1855 void
1856 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1857 {
1858 struct pfr_kentryworkq addrq;
1859 int s;
1860
1861 if (recurse) {
1862 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1863 pfr_clstats_kentries(&addrq, tzero, 0);
1864 }
1865 s = splsoftnet();
1866 bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
1867 bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
1868 kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1869 splx(s);
1870 kt->pfrkt_tzero = tzero;
1871 }
1872
1873 struct pfr_ktable *
1874 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1875 {
1876 struct pfr_ktable *kt;
1877 struct pf_ruleset *rs;
1878
1879 kt = pool_get(&pfr_ktable_pl, PR_NOWAIT);
1880 if (kt == NULL)
1881 return (NULL);
1882 bzero(kt, sizeof(*kt));
1883 kt->pfrkt_t = *tbl;
1884
1885 if (attachruleset) {
1886 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
1887 if (!rs) {
1888 pfr_destroy_ktable(kt, 0);
1889 return (NULL);
1890 }
1891 kt->pfrkt_rs = rs;
1892 rs->tables++;
1893 }
1894
1895 if (!rn_inithead((void **)&kt->pfrkt_ip4,
1896 offsetof(struct sockaddr_in, sin_addr) * 8) ||
1897 !rn_inithead((void **)&kt->pfrkt_ip6,
1898 offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1899 pfr_destroy_ktable(kt, 0);
1900 return (NULL);
1901 }
1902 kt->pfrkt_tzero = tzero;
1903
1904 return (kt);
1905 }
1906
1907 void
1908 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1909 {
1910 struct pfr_ktable *p, *q;
1911
1912 for (p = SLIST_FIRST(workq); p; p = q) {
1913 q = SLIST_NEXT(p, pfrkt_workq);
1914 pfr_destroy_ktable(p, flushaddr);
1915 }
1916 }
1917
1918 void
1919 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1920 {
1921 struct pfr_kentryworkq addrq;
1922
1923 if (flushaddr) {
1924 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1925 pfr_clean_node_mask(kt, &addrq);
1926 pfr_destroy_kentries(&addrq);
1927 }
1928 if (kt->pfrkt_ip4 != NULL)
1929 free((caddr_t)kt->pfrkt_ip4, M_RTABLE);
1930 if (kt->pfrkt_ip6 != NULL)
1931 free((caddr_t)kt->pfrkt_ip6, M_RTABLE);
1932 if (kt->pfrkt_shadow != NULL)
1933 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1934 if (kt->pfrkt_rs != NULL) {
1935 kt->pfrkt_rs->tables--;
1936 pf_remove_if_empty_ruleset(kt->pfrkt_rs);
1937 }
1938 pool_put(&pfr_ktable_pl, kt);
1939 }
1940
1941 int
1942 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
1943 {
1944 int d;
1945
1946 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
1947 return (d);
1948 return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
1949 }
1950
1951 struct pfr_ktable *
1952 pfr_lookup_table(struct pfr_table *tbl)
1953 {
1954 /* struct pfr_ktable start like a struct pfr_table */
1955 return (RB_FIND(pfr_ktablehead, &pfr_ktables,
1956 (struct pfr_ktable *)tbl));
1957 }
1958
1959 int
1960 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
1961 {
1962 struct pfr_kentry *ke = NULL;
1963 int match;
1964
1965 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1966 kt = kt->pfrkt_root;
1967 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1968 return (0);
1969
1970 switch (af) {
1971 #ifdef INET
1972 case AF_INET:
1973 pfr_sin.sin_addr.s_addr = a->addr32[0];
1974 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
1975 if (ke && KENTRY_RNF_ROOT(ke))
1976 ke = NULL;
1977 break;
1978 #endif /* INET */
1979 #ifdef INET6
1980 case AF_INET6:
1981 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
1982 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
1983 if (ke && KENTRY_RNF_ROOT(ke))
1984 ke = NULL;
1985 break;
1986 #endif /* INET6 */
1987 }
1988 match = (ke && !ke->pfrke_not);
1989 if (match)
1990 kt->pfrkt_match++;
1991 else
1992 kt->pfrkt_nomatch++;
1993 return (match);
1994 }
1995
1996 void
1997 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
1998 u_int64_t len, int dir_out, int op_pass, int notrule)
1999 {
2000 struct pfr_kentry *ke = NULL;
2001
2002 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2003 kt = kt->pfrkt_root;
2004 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2005 return;
2006
2007 switch (af) {
2008 #ifdef INET
2009 case AF_INET:
2010 pfr_sin.sin_addr.s_addr = a->addr32[0];
2011 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
2012 if (ke && KENTRY_RNF_ROOT(ke))
2013 ke = NULL;
2014 break;
2015 #endif /* INET */
2016 #ifdef INET6
2017 case AF_INET6:
2018 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2019 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
2020 if (ke && KENTRY_RNF_ROOT(ke))
2021 ke = NULL;
2022 break;
2023 #endif /* INET6 */
2024 default:
2025 ;
2026 }
2027 if ((ke == NULL || ke->pfrke_not) != notrule) {
2028 if (op_pass != PFR_OP_PASS)
2029 printf("pfr_update_stats: assertion failed.\n");
2030 op_pass = PFR_OP_XPASS;
2031 }
2032 kt->pfrkt_packets[dir_out][op_pass]++;
2033 kt->pfrkt_bytes[dir_out][op_pass] += len;
2034 if (ke != NULL && op_pass != PFR_OP_XPASS) {
2035 ke->pfrke_packets[dir_out][op_pass]++;
2036 ke->pfrke_bytes[dir_out][op_pass] += len;
2037 }
2038 }
2039
2040 struct pfr_ktable *
2041 pfr_attach_table(struct pf_ruleset *rs, char *name)
2042 {
2043 struct pfr_ktable *kt, *rt;
2044 struct pfr_table tbl;
2045 struct pf_anchor *ac = rs->anchor;
2046
2047 bzero(&tbl, sizeof(tbl));
2048 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2049 if (ac != NULL)
2050 strlcpy(tbl.pfrt_anchor, ac->name, sizeof(tbl.pfrt_anchor));
2051 kt = pfr_lookup_table(&tbl);
2052 if (kt == NULL) {
2053 kt = pfr_create_ktable(&tbl, time_second, 1);
2054 if (kt == NULL)
2055 return (NULL);
2056 if (ac != NULL) {
2057 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2058 rt = pfr_lookup_table(&tbl);
2059 if (rt == NULL) {
2060 rt = pfr_create_ktable(&tbl, 0, 1);
2061 if (rt == NULL) {
2062 pfr_destroy_ktable(kt, 0);
2063 return (NULL);
2064 }
2065 pfr_insert_ktable(rt);
2066 }
2067 kt->pfrkt_root = rt;
2068 }
2069 pfr_insert_ktable(kt);
2070 }
2071 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2072 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2073 return (kt);
2074 }
2075
2076 void
2077 pfr_detach_table(struct pfr_ktable *kt)
2078 {
2079 if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
2080 printf("pfr_detach_table: refcount = %d.\n",
2081 kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
2082 else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2083 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2084 }
2085
2086 int
2087 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2088 struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af)
2089 {
2090 struct pfr_kentry *ke, *ke2;
2091 struct pf_addr *addr;
2092 union sockaddr_union mask;
2093 int idx = -1, use_counter = 0;
2094
2095 if (af == AF_INET)
2096 addr = (struct pf_addr *)&pfr_sin.sin_addr;
2097 else if (af == AF_INET6)
2098 addr = (struct pf_addr *)&pfr_sin6.sin6_addr;
2099 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2100 kt = kt->pfrkt_root;
2101 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2102 return (-1);
2103
2104 if (pidx != NULL)
2105 idx = *pidx;
2106 if (counter != NULL && idx >= 0)
2107 use_counter = 1;
2108 if (idx < 0)
2109 idx = 0;
2110
2111 _next_block:
2112 ke = pfr_kentry_byidx(kt, idx, af);
2113 if (ke == NULL)
2114 return (1);
2115 pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
2116 *raddr = SUNION2PF(&ke->pfrke_sa, af);
2117 *rmask = SUNION2PF(&pfr_mask, af);
2118
2119 if (use_counter) {
2120 /* is supplied address within block? */
2121 if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
2122 /* no, go to next block in table */
2123 idx++;
2124 use_counter = 0;
2125 goto _next_block;
2126 }
2127 PF_ACPY(addr, counter, af);
2128 } else {
2129 /* use first address of block */
2130 PF_ACPY(addr, *raddr, af);
2131 }
2132
2133 if (!KENTRY_NETWORK(ke)) {
2134 /* this is a single IP address - no possible nested block */
2135 PF_ACPY(counter, addr, af);
2136 *pidx = idx;
2137 return (0);
2138 }
2139 for (;;) {
2140 /* we don't want to use a nested block */
2141 if (af == AF_INET)
2142 ke2 = (struct pfr_kentry *)rn_match(&pfr_sin,
2143 kt->pfrkt_ip4);
2144 else if (af == AF_INET6)
2145 ke2 = (struct pfr_kentry *)rn_match(&pfr_sin6,
2146 kt->pfrkt_ip6);
2147 /* no need to check KENTRY_RNF_ROOT() here */
2148 if (ke2 == ke) {
2149 /* lookup return the same block - perfect */
2150 PF_ACPY(counter, addr, af);
2151 *pidx = idx;
2152 return (0);
2153 }
2154
2155 /* we need to increase the counter past the nested block */
2156 pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
2157 PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
2158 PF_AINC(addr, af);
2159 if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
2160 /* ok, we reached the end of our main block */
2161 /* go to next block in table */
2162 idx++;
2163 use_counter = 0;
2164 goto _next_block;
2165 }
2166 }
2167 }
2168
2169 struct pfr_kentry *
2170 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2171 {
2172 struct pfr_walktree w;
2173
2174 bzero(&w, sizeof(w));
2175 w.pfrw_op = PFRW_POOL_GET;
2176 w.pfrw_cnt = idx;
2177
2178 switch (af) {
2179 #ifdef INET
2180 case AF_INET:
2181 rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2182 return (w.pfrw_kentry);
2183 #endif /* INET */
2184 #ifdef INET6
2185 case AF_INET6:
2186 rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2187 return (w.pfrw_kentry);
2188 #endif /* INET6 */
2189 default:
2190 return (NULL);
2191 }
2192 }
2193
2194 void
2195 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2196 {
2197 struct pfr_walktree w;
2198 int s;
2199
2200 bzero(&w, sizeof(w));
2201 w.pfrw_op = PFRW_DYNADDR_UPDATE;
2202 w.pfrw_dyn = dyn;
2203
2204 s = splsoftnet();
2205 dyn->pfid_acnt4 = 0;
2206 dyn->pfid_acnt6 = 0;
2207 if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2208 rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2209 if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2210 rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2211 splx(s);
2212 }
2213