pf_ioctl.c revision 1.27 1 /* $NetBSD: pf_ioctl.c,v 1.27 2006/10/12 19:59:08 peter Exp $ */
2 /* $OpenBSD: pf_ioctl.c,v 1.139 2005/03/03 07:13:39 dhartmei Exp $ */
3
4 /*
5 * Copyright (c) 2001 Daniel Hartmeier
6 * Copyright (c) 2002,2003 Henning Brauer
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 *
13 * - Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * - Redistributions in binary form must reproduce the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer in the documentation and/or other materials provided
18 * with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Effort sponsored in part by the Defense Advanced Research Projects
34 * Agency (DARPA) and Air Force Research Laboratory, Air Force
35 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36 *
37 */
38
39 #ifdef _KERNEL_OPT
40 #include "opt_inet.h"
41 #include "opt_pfil_hooks.h"
42 #endif
43
44 #ifdef __OpenBSD__
45 #include "pfsync.h"
46 #else
47 #define NPFSYNC 0
48 #endif
49
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/mbuf.h>
53 #include <sys/filio.h>
54 #include <sys/fcntl.h>
55 #include <sys/socket.h>
56 #include <sys/socketvar.h>
57 #include <sys/kernel.h>
58 #include <sys/time.h>
59 #ifdef __OpenBSD__
60 #include <sys/timeout.h>
61 #else
62 #include <sys/callout.h>
63 #endif
64 #include <sys/pool.h>
65 #include <sys/malloc.h>
66 #ifdef __NetBSD__
67 #include <sys/conf.h>
68 #include <sys/lwp.h>
69 #include <sys/kauth.h>
70 #endif
71
72 #include <net/if.h>
73 #include <net/if_types.h>
74 #include <net/route.h>
75
76 #include <netinet/in.h>
77 #include <netinet/in_var.h>
78 #include <netinet/in_systm.h>
79 #include <netinet/ip.h>
80 #include <netinet/ip_var.h>
81 #include <netinet/ip_icmp.h>
82
83 #ifdef __OpenBSD__
84 #include <dev/rndvar.h>
85 #endif
86 #include <net/pfvar.h>
87
88 #if NPFSYNC > 0
89 #include <net/if_pfsync.h>
90 #endif /* NPFSYNC > 0 */
91
92 #ifdef INET6
93 #include <netinet/ip6.h>
94 #include <netinet/in_pcb.h>
95 #endif /* INET6 */
96
97 #ifdef ALTQ
98 #include <altq/altq.h>
99 #endif
100
101 void pfattach(int);
102 #ifdef _LKM
103 void pfdetach(void);
104 #endif
105 int pfopen(dev_t, int, int, struct lwp *);
106 int pfclose(dev_t, int, int, struct lwp *);
107 struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
108 u_int8_t, u_int8_t, u_int8_t);
109 int pf_get_ruleset_number(u_int8_t);
110 void pf_init_ruleset(struct pf_ruleset *);
111 int pf_anchor_setup(struct pf_rule *,
112 const struct pf_ruleset *, const char *);
113 int pf_anchor_copyout(const struct pf_ruleset *,
114 const struct pf_rule *, struct pfioc_rule *);
115 void pf_anchor_remove(struct pf_rule *);
116
117 void pf_mv_pool(struct pf_palist *, struct pf_palist *);
118 void pf_empty_pool(struct pf_palist *);
119 int pfioctl(dev_t, u_long, caddr_t, int, struct lwp *);
120 #ifdef ALTQ
121 int pf_begin_altq(u_int32_t *);
122 int pf_rollback_altq(u_int32_t);
123 int pf_commit_altq(u_int32_t);
124 int pf_enable_altq(struct pf_altq *);
125 int pf_disable_altq(struct pf_altq *);
126 #endif /* ALTQ */
127 int pf_begin_rules(u_int32_t *, int, const char *);
128 int pf_rollback_rules(u_int32_t, int, char *);
129 int pf_commit_rules(u_int32_t, int, char *);
130
131 #ifdef __NetBSD__
132 const struct cdevsw pf_cdevsw = {
133 pfopen, pfclose, noread, nowrite, pfioctl,
134 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER,
135 };
136
137 static int pf_pfil_attach(void);
138 static int pf_pfil_detach(void);
139
140 static int pf_pfil_attached = 0;
141 #endif
142
143 #ifdef __OpenBSD__
144 extern struct timeout pf_expire_to;
145 #else
146 extern struct callout pf_expire_to;
147 #endif
148
149 struct pf_rule pf_default_rule;
150 #ifdef ALTQ
151 static int pf_altq_running;
152 #endif
153
154 #define TAGID_MAX 50000
155 TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
156 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
157
158 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
159 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
160 #endif
161 static u_int16_t tagname2tag(struct pf_tags *, char *);
162 static void tag2tagname(struct pf_tags *, u_int16_t, char *);
163 static void tag_unref(struct pf_tags *, u_int16_t);
164 int pf_rtlabel_add(struct pf_addr_wrap *);
165 void pf_rtlabel_remove(struct pf_addr_wrap *);
166 void pf_rtlabel_copyout(struct pf_addr_wrap *);
167
168 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
169
170 #ifdef __NetBSD__
171 extern struct pfil_head if_pfil;
172 #endif
173
174 void
175 pfattach(int num __unused)
176 {
177 u_int32_t *timeout = pf_default_rule.timeout;
178
179 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl",
180 &pool_allocator_nointr);
181 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
182 "pfsrctrpl", NULL);
183 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl",
184 NULL);
185 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl",
186 &pool_allocator_nointr);
187 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0,
188 "pfpooladdrpl", &pool_allocator_nointr);
189 pfr_initialize();
190 pfi_initialize();
191 pf_osfp_initialize();
192
193 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
194 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
195
196 RB_INIT(&tree_src_tracking);
197 RB_INIT(&pf_anchors);
198 pf_init_ruleset(&pf_main_ruleset);
199 TAILQ_INIT(&pf_altqs[0]);
200 TAILQ_INIT(&pf_altqs[1]);
201 TAILQ_INIT(&pf_pabuf);
202 pf_altqs_active = &pf_altqs[0];
203 pf_altqs_inactive = &pf_altqs[1];
204 TAILQ_INIT(&state_updates);
205
206 /* default rule should never be garbage collected */
207 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
208 pf_default_rule.action = PF_PASS;
209 pf_default_rule.nr = -1;
210
211 /* initialize default timeouts */
212 timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
213 timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
214 timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
215 timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
216 timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
217 timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
218 timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
219 timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
220 timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
221 timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
222 timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
223 timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
224 timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
225 timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
226 timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
227 timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
228 timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
229 timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
230
231 #ifdef __OpenBSD__
232 timeout_set(&pf_expire_to, pf_purge_timeout, &pf_expire_to);
233 timeout_add(&pf_expire_to, timeout[PFTM_INTERVAL] * hz);
234 #else
235 callout_init(&pf_expire_to);
236 callout_reset(&pf_expire_to, timeout[PFTM_INTERVAL] * hz,
237 pf_purge_timeout, &pf_expire_to);
238 #endif
239
240 pf_normalize_init();
241 bzero(&pf_status, sizeof(pf_status));
242 pf_status.debug = PF_DEBUG_URGENT;
243
244 /* XXX do our best to avoid a conflict */
245 pf_status.hostid = arc4random();
246 }
247
248 #ifdef _LKM
249 void
250 pfdetach(void)
251 {
252 struct pf_anchor *anchor;
253 struct pf_state *state;
254 struct pf_src_node *node;
255 struct pfioc_table pt;
256 u_int32_t ticket;
257 int i;
258 char r = '\0';
259
260 (void)pf_pfil_detach();
261
262 callout_stop(&pf_expire_to);
263 pf_status.running = 0;
264
265 /* clear the rulesets */
266 for (i = 0; i < PF_RULESET_MAX; i++)
267 if (pf_begin_rules(&ticket, i, &r) == 0)
268 pf_commit_rules(ticket, i, &r);
269 #ifdef ALTQ
270 if (pf_begin_altq(&ticket) == 0)
271 pf_commit_altq(ticket);
272 #endif
273
274 /* clear states */
275 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
276 state->timeout = PFTM_PURGE;
277 #if NPFSYNC
278 state->sync_flags = PFSTATE_NOSYNC;
279 #endif
280 }
281 pf_purge_expired_states();
282 #if NPFSYNC
283 pfsync_clear_states(pf_status.hostid, NULL);
284 #endif
285
286 /* clear source nodes */
287 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
288 state->src_node = NULL;
289 state->nat_src_node = NULL;
290 }
291 RB_FOREACH(node, pf_src_tree, &tree_src_tracking) {
292 node->expire = 1;
293 node->states = 0;
294 }
295 pf_purge_expired_src_nodes();
296
297 /* clear tables */
298 memset(&pt, '\0', sizeof(pt));
299 pfr_clr_tables(&pt.pfrio_table, &pt.pfrio_ndel, pt.pfrio_flags);
300
301 /* destroy anchors */
302 while ((anchor = RB_MIN(pf_anchor_global, &pf_anchors)) != NULL) {
303 for (i = 0; i < PF_RULESET_MAX; i++)
304 if (pf_begin_rules(&ticket, i, anchor->name) == 0)
305 pf_commit_rules(ticket, i, anchor->name);
306 }
307
308 /* destroy main ruleset */
309 pf_remove_if_empty_ruleset(&pf_main_ruleset);
310
311 /* destroy the pools */
312 pool_destroy(&pf_pooladdr_pl);
313 pool_destroy(&pf_altq_pl);
314 pool_destroy(&pf_state_pl);
315 pool_destroy(&pf_rule_pl);
316 pool_destroy(&pf_src_tree_pl);
317
318 /* destroy subsystems */
319 pf_normalize_destroy();
320 pf_osfp_destroy();
321 pfr_destroy();
322 pfi_destroy();
323 }
324 #endif
325
326 int
327 pfopen(dev_t dev, int flags __unused, int fmt __unused, struct lwp *l __unused)
328 {
329 if (minor(dev) >= 1)
330 return (ENXIO);
331 return (0);
332 }
333
334 int
335 pfclose(dev_t dev, int flags __unused, int fmt __unused, struct lwp *l __unused)
336 {
337 if (minor(dev) >= 1)
338 return (ENXIO);
339 return (0);
340 }
341
342 struct pf_pool *
343 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
344 u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
345 u_int8_t check_ticket)
346 {
347 struct pf_ruleset *ruleset;
348 struct pf_rule *rule;
349 int rs_num;
350
351 ruleset = pf_find_ruleset(anchor);
352 if (ruleset == NULL)
353 return (NULL);
354 rs_num = pf_get_ruleset_number(rule_action);
355 if (rs_num >= PF_RULESET_MAX)
356 return (NULL);
357 if (active) {
358 if (check_ticket && ticket !=
359 ruleset->rules[rs_num].active.ticket)
360 return (NULL);
361 if (r_last)
362 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
363 pf_rulequeue);
364 else
365 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
366 } else {
367 if (check_ticket && ticket !=
368 ruleset->rules[rs_num].inactive.ticket)
369 return (NULL);
370 if (r_last)
371 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
372 pf_rulequeue);
373 else
374 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
375 }
376 if (!r_last) {
377 while ((rule != NULL) && (rule->nr != rule_number))
378 rule = TAILQ_NEXT(rule, entries);
379 }
380 if (rule == NULL)
381 return (NULL);
382
383 return (&rule->rpool);
384 }
385
386 int
387 pf_get_ruleset_number(u_int8_t action)
388 {
389 switch (action) {
390 case PF_SCRUB:
391 case PF_NOSCRUB:
392 return (PF_RULESET_SCRUB);
393 break;
394 case PF_PASS:
395 case PF_DROP:
396 return (PF_RULESET_FILTER);
397 break;
398 case PF_NAT:
399 case PF_NONAT:
400 return (PF_RULESET_NAT);
401 break;
402 case PF_BINAT:
403 case PF_NOBINAT:
404 return (PF_RULESET_BINAT);
405 break;
406 case PF_RDR:
407 case PF_NORDR:
408 return (PF_RULESET_RDR);
409 break;
410 default:
411 return (PF_RULESET_MAX);
412 break;
413 }
414 }
415
416 void
417 pf_init_ruleset(struct pf_ruleset *ruleset)
418 {
419 int i;
420
421 memset(ruleset, 0, sizeof(struct pf_ruleset));
422 for (i = 0; i < PF_RULESET_MAX; i++) {
423 TAILQ_INIT(&ruleset->rules[i].queues[0]);
424 TAILQ_INIT(&ruleset->rules[i].queues[1]);
425 ruleset->rules[i].active.ptr = &ruleset->rules[i].queues[0];
426 ruleset->rules[i].inactive.ptr = &ruleset->rules[i].queues[1];
427 }
428 }
429
430 struct pf_anchor *
431 pf_find_anchor(const char *path)
432 {
433 static struct pf_anchor key;
434
435 memset(&key, 0, sizeof(key));
436 strlcpy(key.path, path, sizeof(key.path));
437 return (RB_FIND(pf_anchor_global, &pf_anchors, &key));
438 }
439
440 struct pf_ruleset *
441 pf_find_ruleset(const char *path)
442 {
443 struct pf_anchor *anchor;
444
445 while (*path == '/')
446 path++;
447 if (!*path)
448 return (&pf_main_ruleset);
449 anchor = pf_find_anchor(path);
450 if (anchor == NULL)
451 return (NULL);
452 else
453 return (&anchor->ruleset);
454 }
455
456 struct pf_ruleset *
457 pf_find_or_create_ruleset(const char *path)
458 {
459 static char p[MAXPATHLEN];
460 char *q = NULL /* XXX gcc */, *r;
461 struct pf_ruleset *ruleset;
462 struct pf_anchor *anchor = NULL /* XXX gcc */,
463 *dup, *parent = NULL;
464
465 while (*path == '/')
466 path++;
467 ruleset = pf_find_ruleset(path);
468 if (ruleset != NULL)
469 return (ruleset);
470 strlcpy(p, path, sizeof(p));
471 while (parent == NULL && (q = strrchr(p, '/')) != NULL) {
472 *q = 0;
473 if ((ruleset = pf_find_ruleset(p)) != NULL) {
474 parent = ruleset->anchor;
475 break;
476 }
477 }
478 if (q == NULL)
479 q = p;
480 else
481 q++;
482 strlcpy(p, path, sizeof(p));
483 if (!*q)
484 return (NULL);
485 while ((r = strchr(q, '/')) != NULL || *q) {
486 if (r != NULL)
487 *r = 0;
488 if (!*q || strlen(q) >= PF_ANCHOR_NAME_SIZE ||
489 (parent != NULL && strlen(parent->path) >=
490 MAXPATHLEN - PF_ANCHOR_NAME_SIZE - 1))
491 return (NULL);
492 anchor = (struct pf_anchor *)malloc(sizeof(*anchor), M_TEMP,
493 M_NOWAIT);
494 if (anchor == NULL)
495 return (NULL);
496 memset(anchor, 0, sizeof(*anchor));
497 RB_INIT(&anchor->children);
498 strlcpy(anchor->name, q, sizeof(anchor->name));
499 if (parent != NULL) {
500 strlcpy(anchor->path, parent->path,
501 sizeof(anchor->path));
502 strlcat(anchor->path, "/", sizeof(anchor->path));
503 }
504 strlcat(anchor->path, anchor->name, sizeof(anchor->path));
505 if ((dup = RB_INSERT(pf_anchor_global, &pf_anchors, anchor)) !=
506 NULL) {
507 printf("pf_find_or_create_ruleset: RB_INSERT1 "
508 "'%s' '%s' collides with '%s' '%s'\n",
509 anchor->path, anchor->name, dup->path, dup->name);
510 free(anchor, M_TEMP);
511 return (NULL);
512 }
513 if (parent != NULL) {
514 anchor->parent = parent;
515 if ((dup = RB_INSERT(pf_anchor_node, &parent->children,
516 anchor)) != NULL) {
517 printf("pf_find_or_create_ruleset: "
518 "RB_INSERT2 '%s' '%s' collides with "
519 "'%s' '%s'\n", anchor->path, anchor->name,
520 dup->path, dup->name);
521 RB_REMOVE(pf_anchor_global, &pf_anchors,
522 anchor);
523 free(anchor, M_TEMP);
524 return (NULL);
525 }
526 }
527 pf_init_ruleset(&anchor->ruleset);
528 anchor->ruleset.anchor = anchor;
529 parent = anchor;
530 if (r != NULL)
531 q = r + 1;
532 else
533 *q = 0;
534 }
535 return (&anchor->ruleset);
536 }
537
538 void
539 pf_remove_if_empty_ruleset(struct pf_ruleset *ruleset)
540 {
541 struct pf_anchor *parent;
542 int i;
543
544 while (ruleset != NULL) {
545 if (ruleset == &pf_main_ruleset || ruleset->anchor == NULL ||
546 !RB_EMPTY(&ruleset->anchor->children) ||
547 ruleset->anchor->refcnt > 0 || ruleset->tables > 0 ||
548 ruleset->topen)
549 return;
550 for (i = 0; i < PF_RULESET_MAX; ++i)
551 if (!TAILQ_EMPTY(ruleset->rules[i].active.ptr) ||
552 !TAILQ_EMPTY(ruleset->rules[i].inactive.ptr) ||
553 ruleset->rules[i].inactive.open)
554 return;
555 RB_REMOVE(pf_anchor_global, &pf_anchors, ruleset->anchor);
556 if ((parent = ruleset->anchor->parent) != NULL)
557 RB_REMOVE(pf_anchor_node, &parent->children,
558 ruleset->anchor);
559 free(ruleset->anchor, M_TEMP);
560 if (parent == NULL)
561 return;
562 ruleset = &parent->ruleset;
563 }
564 }
565
566 int
567 pf_anchor_setup(struct pf_rule *r, const struct pf_ruleset *s,
568 const char *name)
569 {
570 static char *p, path[MAXPATHLEN];
571 struct pf_ruleset *ruleset;
572
573 r->anchor = NULL;
574 r->anchor_relative = 0;
575 r->anchor_wildcard = 0;
576 if (!name[0])
577 return (0);
578 if (name[0] == '/')
579 strlcpy(path, name + 1, sizeof(path));
580 else {
581 /* relative path */
582 r->anchor_relative = 1;
583 if (s->anchor == NULL || !s->anchor->path[0])
584 path[0] = 0;
585 else
586 strlcpy(path, s->anchor->path, sizeof(path));
587 while (name[0] == '.' && name[1] == '.' && name[2] == '/') {
588 if (!path[0]) {
589 printf("pf_anchor_setup: .. beyond root\n");
590 return (1);
591 }
592 if ((p = strrchr(path, '/')) != NULL)
593 *p = 0;
594 else
595 path[0] = 0;
596 r->anchor_relative++;
597 name += 3;
598 }
599 if (path[0])
600 strlcat(path, "/", sizeof(path));
601 strlcat(path, name, sizeof(path));
602 }
603 if ((p = strrchr(path, '/')) != NULL && !strcmp(p, "/*")) {
604 r->anchor_wildcard = 1;
605 *p = 0;
606 }
607 ruleset = pf_find_or_create_ruleset(path);
608 if (ruleset == NULL || ruleset->anchor == NULL) {
609 printf("pf_anchor_setup: ruleset\n");
610 return (1);
611 }
612 r->anchor = ruleset->anchor;
613 r->anchor->refcnt++;
614 return (0);
615 }
616
617 int
618 pf_anchor_copyout(const struct pf_ruleset *rs, const struct pf_rule *r,
619 struct pfioc_rule *pr)
620 {
621 pr->anchor_call[0] = 0;
622 if (r->anchor == NULL)
623 return (0);
624 if (!r->anchor_relative) {
625 strlcpy(pr->anchor_call, "/", sizeof(pr->anchor_call));
626 strlcat(pr->anchor_call, r->anchor->path,
627 sizeof(pr->anchor_call));
628 } else {
629 char a[MAXPATHLEN], b[MAXPATHLEN], *p;
630 int i;
631
632 if (rs->anchor == NULL)
633 a[0] = 0;
634 else
635 strlcpy(a, rs->anchor->path, sizeof(a));
636 strlcpy(b, r->anchor->path, sizeof(b));
637 for (i = 1; i < r->anchor_relative; ++i) {
638 if ((p = strrchr(a, '/')) == NULL)
639 p = a;
640 *p = 0;
641 strlcat(pr->anchor_call, "../",
642 sizeof(pr->anchor_call));
643 }
644 if (strncmp(a, b, strlen(a))) {
645 printf("pf_anchor_copyout: '%s' '%s'\n", a, b);
646 return (1);
647 }
648 if (strlen(b) > strlen(a))
649 strlcat(pr->anchor_call, b + (a[0] ? strlen(a) + 1 : 0),
650 sizeof(pr->anchor_call));
651 }
652 if (r->anchor_wildcard)
653 strlcat(pr->anchor_call, pr->anchor_call[0] ? "/*" : "*",
654 sizeof(pr->anchor_call));
655 return (0);
656 }
657
658 void
659 pf_anchor_remove(struct pf_rule *r)
660 {
661 if (r->anchor == NULL)
662 return;
663 if (r->anchor->refcnt <= 0) {
664 printf("pf_anchor_remove: broken refcount");
665 r->anchor = NULL;
666 return;
667 }
668 if (!--r->anchor->refcnt)
669 pf_remove_if_empty_ruleset(&r->anchor->ruleset);
670 r->anchor = NULL;
671 }
672
673 void
674 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
675 {
676 struct pf_pooladdr *mv_pool_pa;
677
678 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
679 TAILQ_REMOVE(poola, mv_pool_pa, entries);
680 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
681 }
682 }
683
684 void
685 pf_empty_pool(struct pf_palist *poola)
686 {
687 struct pf_pooladdr *empty_pool_pa;
688
689 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
690 pfi_dynaddr_remove(&empty_pool_pa->addr);
691 pf_tbladdr_remove(&empty_pool_pa->addr);
692 pfi_detach_rule(empty_pool_pa->kif);
693 TAILQ_REMOVE(poola, empty_pool_pa, entries);
694 pool_put(&pf_pooladdr_pl, empty_pool_pa);
695 }
696 }
697
698 void
699 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
700 {
701 if (rulequeue != NULL) {
702 if (rule->states <= 0) {
703 /*
704 * XXX - we need to remove the table *before* detaching
705 * the rule to make sure the table code does not delete
706 * the anchor under our feet.
707 */
708 pf_tbladdr_remove(&rule->src.addr);
709 pf_tbladdr_remove(&rule->dst.addr);
710 if (rule->overload_tbl)
711 pfr_detach_table(rule->overload_tbl);
712 }
713 TAILQ_REMOVE(rulequeue, rule, entries);
714 rule->entries.tqe_prev = NULL;
715 rule->nr = -1;
716 }
717
718 if (rule->states > 0 || rule->src_nodes > 0 ||
719 rule->entries.tqe_prev != NULL)
720 return;
721 pf_tag_unref(rule->tag);
722 pf_tag_unref(rule->match_tag);
723 #ifdef ALTQ
724 if (rule->pqid != rule->qid)
725 pf_qid_unref(rule->pqid);
726 pf_qid_unref(rule->qid);
727 #endif
728 pf_rtlabel_remove(&rule->src.addr);
729 pf_rtlabel_remove(&rule->dst.addr);
730 pfi_dynaddr_remove(&rule->src.addr);
731 pfi_dynaddr_remove(&rule->dst.addr);
732 if (rulequeue == NULL) {
733 pf_tbladdr_remove(&rule->src.addr);
734 pf_tbladdr_remove(&rule->dst.addr);
735 if (rule->overload_tbl)
736 pfr_detach_table(rule->overload_tbl);
737 }
738 pfi_detach_rule(rule->kif);
739 pf_anchor_remove(rule);
740 pf_empty_pool(&rule->rpool.list);
741 pool_put(&pf_rule_pl, rule);
742 }
743
744 static u_int16_t
745 tagname2tag(struct pf_tags *head, char *tagname)
746 {
747 struct pf_tagname *tag, *p = NULL;
748 u_int16_t new_tagid = 1;
749
750 TAILQ_FOREACH(tag, head, entries)
751 if (strcmp(tagname, tag->name) == 0) {
752 tag->ref++;
753 return (tag->tag);
754 }
755
756 /*
757 * to avoid fragmentation, we do a linear search from the beginning
758 * and take the first free slot we find. if there is none or the list
759 * is empty, append a new entry at the end.
760 */
761
762 /* new entry */
763 if (!TAILQ_EMPTY(head))
764 for (p = TAILQ_FIRST(head); p != NULL &&
765 p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
766 new_tagid = p->tag + 1;
767
768 if (new_tagid > TAGID_MAX)
769 return (0);
770
771 /* allocate and fill new struct pf_tagname */
772 tag = (struct pf_tagname *)malloc(sizeof(struct pf_tagname),
773 M_TEMP, M_NOWAIT);
774 if (tag == NULL)
775 return (0);
776 bzero(tag, sizeof(struct pf_tagname));
777 strlcpy(tag->name, tagname, sizeof(tag->name));
778 tag->tag = new_tagid;
779 tag->ref++;
780
781 if (p != NULL) /* insert new entry before p */
782 TAILQ_INSERT_BEFORE(p, tag, entries);
783 else /* either list empty or no free slot in between */
784 TAILQ_INSERT_TAIL(head, tag, entries);
785
786 return (tag->tag);
787 }
788
789 static void
790 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
791 {
792 struct pf_tagname *tag;
793
794 TAILQ_FOREACH(tag, head, entries)
795 if (tag->tag == tagid) {
796 strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
797 return;
798 }
799 }
800
801 static void
802 tag_unref(struct pf_tags *head, u_int16_t tag)
803 {
804 struct pf_tagname *p, *next;
805
806 if (tag == 0)
807 return;
808
809 for (p = TAILQ_FIRST(head); p != NULL; p = next) {
810 next = TAILQ_NEXT(p, entries);
811 if (tag == p->tag) {
812 if (--p->ref == 0) {
813 TAILQ_REMOVE(head, p, entries);
814 free(p, M_TEMP);
815 }
816 break;
817 }
818 }
819 }
820
821 u_int16_t
822 pf_tagname2tag(char *tagname)
823 {
824 return (tagname2tag(&pf_tags, tagname));
825 }
826
827 void
828 pf_tag2tagname(u_int16_t tagid, char *p)
829 {
830 return (tag2tagname(&pf_tags, tagid, p));
831 }
832
833 void
834 pf_tag_ref(u_int16_t tag)
835 {
836 struct pf_tagname *t;
837
838 TAILQ_FOREACH(t, &pf_tags, entries)
839 if (t->tag == tag)
840 break;
841 if (t != NULL)
842 t->ref++;
843 }
844
845 void
846 pf_tag_unref(u_int16_t tag)
847 {
848 return (tag_unref(&pf_tags, tag));
849 }
850
851 int
852 pf_rtlabel_add(struct pf_addr_wrap *a __unused)
853 {
854 #ifdef __OpenBSD__
855 if (a->type == PF_ADDR_RTLABEL &&
856 (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0)
857 return (-1);
858 #endif
859 return (0);
860 }
861
862 void
863 pf_rtlabel_remove(struct pf_addr_wrap *a __unused)
864 {
865 #ifdef __OpenBSD__
866 if (a->type == PF_ADDR_RTLABEL)
867 rtlabel_unref(a->v.rtlabel);
868 #endif
869 }
870
871 void
872 pf_rtlabel_copyout(struct pf_addr_wrap *a __unused)
873 {
874 #ifdef __OpenBSD__
875 const char *name;
876
877 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) {
878 if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL)
879 strlcpy(a->v.rtlabelname, "?",
880 sizeof(a->v.rtlabelname));
881 else
882 strlcpy(a->v.rtlabelname, name,
883 sizeof(a->v.rtlabelname));
884 }
885 #endif
886 }
887
888 #ifdef ALTQ
889 u_int32_t
890 pf_qname2qid(char *qname)
891 {
892 return ((u_int32_t)tagname2tag(&pf_qids, qname));
893 }
894
895 void
896 pf_qid2qname(u_int32_t qid, char *p)
897 {
898 return (tag2tagname(&pf_qids, (u_int16_t)qid, p));
899 }
900
901 void
902 pf_qid_unref(u_int32_t qid)
903 {
904 return (tag_unref(&pf_qids, (u_int16_t)qid));
905 }
906
907 int
908 pf_begin_altq(u_int32_t *ticket)
909 {
910 struct pf_altq *altq;
911 int error = 0;
912
913 /* Purge the old altq list */
914 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
915 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
916 if (altq->qname[0] == 0) {
917 /* detach and destroy the discipline */
918 error = altq_remove(altq);
919 } else
920 pf_qid_unref(altq->qid);
921 pool_put(&pf_altq_pl, altq);
922 }
923 if (error)
924 return (error);
925 *ticket = ++ticket_altqs_inactive;
926 altqs_inactive_open = 1;
927 return (0);
928 }
929
930 int
931 pf_rollback_altq(u_int32_t ticket)
932 {
933 struct pf_altq *altq;
934 int error = 0;
935
936 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
937 return (0);
938 /* Purge the old altq list */
939 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
940 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
941 if (altq->qname[0] == 0) {
942 /* detach and destroy the discipline */
943 error = altq_remove(altq);
944 } else
945 pf_qid_unref(altq->qid);
946 pool_put(&pf_altq_pl, altq);
947 }
948 altqs_inactive_open = 0;
949 return (error);
950 }
951
952 int
953 pf_commit_altq(u_int32_t ticket)
954 {
955 struct pf_altqqueue *old_altqs;
956 struct pf_altq *altq;
957 int s, err, error = 0;
958
959 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
960 return (EBUSY);
961
962 /* swap altqs, keep the old. */
963 s = splsoftnet();
964 old_altqs = pf_altqs_active;
965 pf_altqs_active = pf_altqs_inactive;
966 pf_altqs_inactive = old_altqs;
967 ticket_altqs_active = ticket_altqs_inactive;
968
969 /* Attach new disciplines */
970 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
971 if (altq->qname[0] == 0) {
972 /* attach the discipline */
973 error = altq_pfattach(altq);
974 if (error == 0 && pf_altq_running)
975 error = pf_enable_altq(altq);
976 if (error != 0) {
977 splx(s);
978 return (error);
979 }
980 }
981 }
982
983 /* Purge the old altq list */
984 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
985 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
986 if (altq->qname[0] == 0) {
987 /* detach and destroy the discipline */
988 if (pf_altq_running)
989 error = pf_disable_altq(altq);
990 err = altq_pfdetach(altq);
991 if (err != 0 && error == 0)
992 error = err;
993 err = altq_remove(altq);
994 if (err != 0 && error == 0)
995 error = err;
996 } else
997 pf_qid_unref(altq->qid);
998 pool_put(&pf_altq_pl, altq);
999 }
1000 splx(s);
1001
1002 altqs_inactive_open = 0;
1003 return (error);
1004 }
1005
1006 int
1007 pf_enable_altq(struct pf_altq *altq)
1008 {
1009 struct ifnet *ifp;
1010 struct tb_profile tb;
1011 int s, error = 0;
1012
1013 if ((ifp = ifunit(altq->ifname)) == NULL)
1014 return (EINVAL);
1015
1016 if (ifp->if_snd.altq_type != ALTQT_NONE)
1017 error = altq_enable(&ifp->if_snd);
1018
1019 /* set tokenbucket regulator */
1020 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
1021 tb.rate = altq->ifbandwidth;
1022 tb.depth = altq->tbrsize;
1023 #ifdef __OpenBSD__
1024 s = splimp();
1025 #else
1026 s = splnet();
1027 #endif
1028 error = tbr_set(&ifp->if_snd, &tb);
1029 splx(s);
1030 }
1031
1032 return (error);
1033 }
1034
1035 int
1036 pf_disable_altq(struct pf_altq *altq)
1037 {
1038 struct ifnet *ifp;
1039 struct tb_profile tb;
1040 int s, error;
1041
1042 if ((ifp = ifunit(altq->ifname)) == NULL)
1043 return (EINVAL);
1044
1045 /*
1046 * when the discipline is no longer referenced, it was overridden
1047 * by a new one. if so, just return.
1048 */
1049 if (altq->altq_disc != ifp->if_snd.altq_disc)
1050 return (0);
1051
1052 error = altq_disable(&ifp->if_snd);
1053
1054 if (error == 0) {
1055 /* clear tokenbucket regulator */
1056 tb.rate = 0;
1057 #ifdef __OpenBSD__
1058 s = splimp();
1059 #else
1060 s = splnet();
1061 #endif
1062 error = tbr_set(&ifp->if_snd, &tb);
1063 splx(s);
1064 }
1065
1066 return (error);
1067 }
1068 #endif /* ALTQ */
1069
1070 int
1071 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
1072 {
1073 struct pf_ruleset *rs;
1074 struct pf_rule *rule;
1075
1076 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1077 return (EINVAL);
1078 rs = pf_find_or_create_ruleset(anchor);
1079 if (rs == NULL)
1080 return (EINVAL);
1081 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL)
1082 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
1083 *ticket = ++rs->rules[rs_num].inactive.ticket;
1084 rs->rules[rs_num].inactive.open = 1;
1085 return (0);
1086 }
1087
1088 int
1089 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
1090 {
1091 struct pf_ruleset *rs;
1092 struct pf_rule *rule;
1093
1094 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1095 return (EINVAL);
1096 rs = pf_find_ruleset(anchor);
1097 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1098 rs->rules[rs_num].inactive.ticket != ticket)
1099 return (0);
1100 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL)
1101 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
1102 rs->rules[rs_num].inactive.open = 0;
1103 return (0);
1104 }
1105
1106 int
1107 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1108 {
1109 struct pf_ruleset *rs;
1110 struct pf_rule *rule;
1111 struct pf_rulequeue *old_rules;
1112 int s;
1113
1114 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1115 return (EINVAL);
1116 rs = pf_find_ruleset(anchor);
1117 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1118 ticket != rs->rules[rs_num].inactive.ticket)
1119 return (EBUSY);
1120
1121 /* Swap rules, keep the old. */
1122 s = splsoftnet();
1123 old_rules = rs->rules[rs_num].active.ptr;
1124 rs->rules[rs_num].active.ptr =
1125 rs->rules[rs_num].inactive.ptr;
1126 rs->rules[rs_num].inactive.ptr = old_rules;
1127 rs->rules[rs_num].active.ticket =
1128 rs->rules[rs_num].inactive.ticket;
1129 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1130
1131 /* Purge the old rule list. */
1132 while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1133 pf_rm_rule(old_rules, rule);
1134 rs->rules[rs_num].inactive.open = 0;
1135 pf_remove_if_empty_ruleset(rs);
1136 splx(s);
1137 return (0);
1138 }
1139
1140 int
1141 pfioctl(dev_t dev __unused, u_long cmd, caddr_t addr, int flags, struct lwp *l)
1142 {
1143 struct pf_pooladdr *pa = NULL;
1144 struct pf_pool *pool = NULL;
1145 int s;
1146 int error = 0;
1147
1148 /* XXX keep in sync with switch() below */
1149 if (kauth_authorize_network(l->l_cred, KAUTH_NETWORK_FIREWALL,
1150 KAUTH_REQ_NETWORK_FIREWALL_FW, NULL, NULL, NULL))
1151 switch (cmd) {
1152 case DIOCGETRULES:
1153 case DIOCGETRULE:
1154 case DIOCGETADDRS:
1155 case DIOCGETADDR:
1156 case DIOCGETSTATE:
1157 case DIOCSETSTATUSIF:
1158 case DIOCGETSTATUS:
1159 case DIOCCLRSTATUS:
1160 case DIOCNATLOOK:
1161 case DIOCSETDEBUG:
1162 case DIOCGETSTATES:
1163 case DIOCGETTIMEOUT:
1164 case DIOCCLRRULECTRS:
1165 case DIOCGETLIMIT:
1166 case DIOCGETALTQS:
1167 case DIOCGETALTQ:
1168 case DIOCGETQSTATS:
1169 case DIOCGETRULESETS:
1170 case DIOCGETRULESET:
1171 case DIOCRGETTABLES:
1172 case DIOCRGETTSTATS:
1173 case DIOCRCLRTSTATS:
1174 case DIOCRCLRADDRS:
1175 case DIOCRADDADDRS:
1176 case DIOCRDELADDRS:
1177 case DIOCRSETADDRS:
1178 case DIOCRGETADDRS:
1179 case DIOCRGETASTATS:
1180 case DIOCRCLRASTATS:
1181 case DIOCRTSTADDRS:
1182 case DIOCOSFPGET:
1183 case DIOCGETSRCNODES:
1184 case DIOCCLRSRCNODES:
1185 case DIOCIGETIFACES:
1186 case DIOCICLRISTATS:
1187 case DIOCSETIFFLAG:
1188 case DIOCCLRIFFLAG:
1189 break;
1190 case DIOCRCLRTABLES:
1191 case DIOCRADDTABLES:
1192 case DIOCRDELTABLES:
1193 case DIOCRSETTFLAGS:
1194 if (((struct pfioc_table *)addr)->pfrio_flags &
1195 PFR_FLAG_DUMMY)
1196 break; /* dummy operation ok */
1197 return (EPERM);
1198 default:
1199 return (EPERM);
1200 }
1201
1202 if (!(flags & FWRITE))
1203 switch (cmd) {
1204 case DIOCGETRULES:
1205 case DIOCGETRULE:
1206 case DIOCGETADDRS:
1207 case DIOCGETADDR:
1208 case DIOCGETSTATE:
1209 case DIOCGETSTATUS:
1210 case DIOCGETSTATES:
1211 case DIOCGETTIMEOUT:
1212 case DIOCGETLIMIT:
1213 case DIOCGETALTQS:
1214 case DIOCGETALTQ:
1215 case DIOCGETQSTATS:
1216 case DIOCGETRULESETS:
1217 case DIOCGETRULESET:
1218 case DIOCRGETTABLES:
1219 case DIOCRGETTSTATS:
1220 case DIOCRGETADDRS:
1221 case DIOCRGETASTATS:
1222 case DIOCRTSTADDRS:
1223 case DIOCOSFPGET:
1224 case DIOCGETSRCNODES:
1225 case DIOCIGETIFACES:
1226 break;
1227 case DIOCRCLRTABLES:
1228 case DIOCRADDTABLES:
1229 case DIOCRDELTABLES:
1230 case DIOCRCLRTSTATS:
1231 case DIOCRCLRADDRS:
1232 case DIOCRADDADDRS:
1233 case DIOCRDELADDRS:
1234 case DIOCRSETADDRS:
1235 case DIOCRSETTFLAGS:
1236 if (((struct pfioc_table *)addr)->pfrio_flags &
1237 PFR_FLAG_DUMMY)
1238 break; /* dummy operation ok */
1239 return (EACCES);
1240 default:
1241 return (EACCES);
1242 }
1243
1244 s = splsoftnet();
1245 switch (cmd) {
1246
1247 case DIOCSTART:
1248 if (pf_status.running)
1249 error = EEXIST;
1250 else {
1251 #ifdef __NetBSD__
1252 error = pf_pfil_attach();
1253 if (error)
1254 break;
1255 #endif
1256 pf_status.running = 1;
1257 pf_status.since = time_second;
1258 if (pf_status.stateid == 0) {
1259 pf_status.stateid = time_second;
1260 pf_status.stateid = pf_status.stateid << 32;
1261 }
1262 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1263 }
1264 break;
1265
1266 case DIOCSTOP:
1267 if (!pf_status.running)
1268 error = ENOENT;
1269 else {
1270 #ifdef __NetBSD__
1271 error = pf_pfil_detach();
1272 if (error)
1273 break;
1274 #endif
1275 pf_status.running = 0;
1276 pf_status.since = time_second;
1277 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1278 }
1279 break;
1280
1281 case DIOCADDRULE: {
1282 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1283 struct pf_ruleset *ruleset;
1284 struct pf_rule *rule, *tail;
1285 struct pf_pooladdr *pa;
1286 int rs_num;
1287
1288 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1289 ruleset = pf_find_ruleset(pr->anchor);
1290 if (ruleset == NULL) {
1291 error = EINVAL;
1292 break;
1293 }
1294 rs_num = pf_get_ruleset_number(pr->rule.action);
1295 if (rs_num >= PF_RULESET_MAX) {
1296 error = EINVAL;
1297 break;
1298 }
1299 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1300 error = EINVAL;
1301 break;
1302 }
1303 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
1304 error = EBUSY;
1305 break;
1306 }
1307 if (pr->pool_ticket != ticket_pabuf) {
1308 error = EBUSY;
1309 break;
1310 }
1311 rule = pool_get(&pf_rule_pl, PR_NOWAIT);
1312 if (rule == NULL) {
1313 error = ENOMEM;
1314 break;
1315 }
1316 bcopy(&pr->rule, rule, sizeof(struct pf_rule));
1317 rule->anchor = NULL;
1318 rule->kif = NULL;
1319 TAILQ_INIT(&rule->rpool.list);
1320 /* initialize refcounting */
1321 rule->states = 0;
1322 rule->src_nodes = 0;
1323 rule->entries.tqe_prev = NULL;
1324 #ifndef INET
1325 if (rule->af == AF_INET) {
1326 pool_put(&pf_rule_pl, rule);
1327 error = EAFNOSUPPORT;
1328 break;
1329 }
1330 #endif /* INET */
1331 #ifndef INET6
1332 if (rule->af == AF_INET6) {
1333 pool_put(&pf_rule_pl, rule);
1334 error = EAFNOSUPPORT;
1335 break;
1336 }
1337 #endif /* INET6 */
1338 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
1339 pf_rulequeue);
1340 if (tail)
1341 rule->nr = tail->nr + 1;
1342 else
1343 rule->nr = 0;
1344 if (rule->ifname[0]) {
1345 rule->kif = pfi_attach_rule(rule->ifname);
1346 if (rule->kif == NULL) {
1347 pool_put(&pf_rule_pl, rule);
1348 error = EINVAL;
1349 break;
1350 }
1351 }
1352
1353 #ifdef ALTQ
1354 /* set queue IDs */
1355 if (rule->qname[0] != 0) {
1356 if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
1357 error = EBUSY;
1358 else if (rule->pqname[0] != 0) {
1359 if ((rule->pqid =
1360 pf_qname2qid(rule->pqname)) == 0)
1361 error = EBUSY;
1362 } else
1363 rule->pqid = rule->qid;
1364 }
1365 #endif
1366 if (rule->tagname[0])
1367 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
1368 error = EBUSY;
1369 if (rule->match_tagname[0])
1370 if ((rule->match_tag =
1371 pf_tagname2tag(rule->match_tagname)) == 0)
1372 error = EBUSY;
1373 if (rule->rt && !rule->direction)
1374 error = EINVAL;
1375 if (pf_rtlabel_add(&rule->src.addr) ||
1376 pf_rtlabel_add(&rule->dst.addr))
1377 error = EBUSY;
1378 if (pfi_dynaddr_setup(&rule->src.addr, rule->af))
1379 error = EINVAL;
1380 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af))
1381 error = EINVAL;
1382 if (pf_tbladdr_setup(ruleset, &rule->src.addr))
1383 error = EINVAL;
1384 if (pf_tbladdr_setup(ruleset, &rule->dst.addr))
1385 error = EINVAL;
1386 if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1387 error = EINVAL;
1388 TAILQ_FOREACH(pa, &pf_pabuf, entries)
1389 if (pf_tbladdr_setup(ruleset, &pa->addr))
1390 error = EINVAL;
1391
1392 if (rule->overload_tblname[0]) {
1393 if ((rule->overload_tbl = pfr_attach_table(ruleset,
1394 rule->overload_tblname)) == NULL)
1395 error = EINVAL;
1396 else
1397 rule->overload_tbl->pfrkt_flags |=
1398 PFR_TFLAG_ACTIVE;
1399 }
1400
1401 pf_mv_pool(&pf_pabuf, &rule->rpool.list);
1402 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
1403 (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
1404 (rule->rt > PF_FASTROUTE)) &&
1405 (TAILQ_FIRST(&rule->rpool.list) == NULL))
1406 error = EINVAL;
1407
1408 if (error) {
1409 pf_rm_rule(NULL, rule);
1410 break;
1411 }
1412 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
1413 rule->evaluations = rule->packets = rule->bytes = 0;
1414 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
1415 rule, entries);
1416 break;
1417 }
1418
1419 case DIOCGETRULES: {
1420 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1421 struct pf_ruleset *ruleset;
1422 struct pf_rule *tail;
1423 int rs_num;
1424
1425 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1426 ruleset = pf_find_ruleset(pr->anchor);
1427 if (ruleset == NULL) {
1428 error = EINVAL;
1429 break;
1430 }
1431 rs_num = pf_get_ruleset_number(pr->rule.action);
1432 if (rs_num >= PF_RULESET_MAX) {
1433 error = EINVAL;
1434 break;
1435 }
1436 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
1437 pf_rulequeue);
1438 if (tail)
1439 pr->nr = tail->nr + 1;
1440 else
1441 pr->nr = 0;
1442 pr->ticket = ruleset->rules[rs_num].active.ticket;
1443 break;
1444 }
1445
1446 case DIOCGETRULE: {
1447 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1448 struct pf_ruleset *ruleset;
1449 struct pf_rule *rule;
1450 int rs_num, i;
1451
1452 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1453 ruleset = pf_find_ruleset(pr->anchor);
1454 if (ruleset == NULL) {
1455 error = EINVAL;
1456 break;
1457 }
1458 rs_num = pf_get_ruleset_number(pr->rule.action);
1459 if (rs_num >= PF_RULESET_MAX) {
1460 error = EINVAL;
1461 break;
1462 }
1463 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
1464 error = EBUSY;
1465 break;
1466 }
1467 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
1468 while ((rule != NULL) && (rule->nr != pr->nr))
1469 rule = TAILQ_NEXT(rule, entries);
1470 if (rule == NULL) {
1471 error = EBUSY;
1472 break;
1473 }
1474 bcopy(rule, &pr->rule, sizeof(struct pf_rule));
1475 if (pf_anchor_copyout(ruleset, rule, pr)) {
1476 error = EBUSY;
1477 break;
1478 }
1479 pfi_dynaddr_copyout(&pr->rule.src.addr);
1480 pfi_dynaddr_copyout(&pr->rule.dst.addr);
1481 pf_tbladdr_copyout(&pr->rule.src.addr);
1482 pf_tbladdr_copyout(&pr->rule.dst.addr);
1483 pf_rtlabel_copyout(&pr->rule.src.addr);
1484 pf_rtlabel_copyout(&pr->rule.dst.addr);
1485 for (i = 0; i < PF_SKIP_COUNT; ++i)
1486 if (rule->skip[i].ptr == NULL)
1487 pr->rule.skip[i].nr = -1;
1488 else
1489 pr->rule.skip[i].nr =
1490 rule->skip[i].ptr->nr;
1491 break;
1492 }
1493
1494 case DIOCCHANGERULE: {
1495 struct pfioc_rule *pcr = (struct pfioc_rule *)addr;
1496 struct pf_ruleset *ruleset;
1497 struct pf_rule *oldrule = NULL, *newrule = NULL;
1498 u_int32_t nr = 0;
1499 int rs_num;
1500
1501 if (!(pcr->action == PF_CHANGE_REMOVE ||
1502 pcr->action == PF_CHANGE_GET_TICKET) &&
1503 pcr->pool_ticket != ticket_pabuf) {
1504 error = EBUSY;
1505 break;
1506 }
1507
1508 if (pcr->action < PF_CHANGE_ADD_HEAD ||
1509 pcr->action > PF_CHANGE_GET_TICKET) {
1510 error = EINVAL;
1511 break;
1512 }
1513 ruleset = pf_find_ruleset(pcr->anchor);
1514 if (ruleset == NULL) {
1515 error = EINVAL;
1516 break;
1517 }
1518 rs_num = pf_get_ruleset_number(pcr->rule.action);
1519 if (rs_num >= PF_RULESET_MAX) {
1520 error = EINVAL;
1521 break;
1522 }
1523
1524 if (pcr->action == PF_CHANGE_GET_TICKET) {
1525 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
1526 break;
1527 } else {
1528 if (pcr->ticket !=
1529 ruleset->rules[rs_num].active.ticket) {
1530 error = EINVAL;
1531 break;
1532 }
1533 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1534 error = EINVAL;
1535 break;
1536 }
1537 }
1538
1539 if (pcr->action != PF_CHANGE_REMOVE) {
1540 newrule = pool_get(&pf_rule_pl, PR_NOWAIT);
1541 if (newrule == NULL) {
1542 error = ENOMEM;
1543 break;
1544 }
1545 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule));
1546 TAILQ_INIT(&newrule->rpool.list);
1547 /* initialize refcounting */
1548 newrule->states = 0;
1549 newrule->entries.tqe_prev = NULL;
1550 #ifndef INET
1551 if (newrule->af == AF_INET) {
1552 pool_put(&pf_rule_pl, newrule);
1553 error = EAFNOSUPPORT;
1554 break;
1555 }
1556 #endif /* INET */
1557 #ifndef INET6
1558 if (newrule->af == AF_INET6) {
1559 pool_put(&pf_rule_pl, newrule);
1560 error = EAFNOSUPPORT;
1561 break;
1562 }
1563 #endif /* INET6 */
1564 if (newrule->ifname[0]) {
1565 newrule->kif = pfi_attach_rule(newrule->ifname);
1566 if (newrule->kif == NULL) {
1567 pool_put(&pf_rule_pl, newrule);
1568 error = EINVAL;
1569 break;
1570 }
1571 } else
1572 newrule->kif = NULL;
1573
1574 #ifdef ALTQ
1575 /* set queue IDs */
1576 if (newrule->qname[0] != 0) {
1577 if ((newrule->qid =
1578 pf_qname2qid(newrule->qname)) == 0)
1579 error = EBUSY;
1580 else if (newrule->pqname[0] != 0) {
1581 if ((newrule->pqid =
1582 pf_qname2qid(newrule->pqname)) == 0)
1583 error = EBUSY;
1584 } else
1585 newrule->pqid = newrule->qid;
1586 }
1587 #endif /* ALTQ */
1588 if (newrule->tagname[0])
1589 if ((newrule->tag =
1590 pf_tagname2tag(newrule->tagname)) == 0)
1591 error = EBUSY;
1592 if (newrule->match_tagname[0])
1593 if ((newrule->match_tag = pf_tagname2tag(
1594 newrule->match_tagname)) == 0)
1595 error = EBUSY;
1596 if (newrule->rt && !newrule->direction)
1597 error = EINVAL;
1598 if (pf_rtlabel_add(&newrule->src.addr) ||
1599 pf_rtlabel_add(&newrule->dst.addr))
1600 error = EBUSY;
1601 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af))
1602 error = EINVAL;
1603 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af))
1604 error = EINVAL;
1605 if (pf_tbladdr_setup(ruleset, &newrule->src.addr))
1606 error = EINVAL;
1607 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr))
1608 error = EINVAL;
1609 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1610 error = EINVAL;
1611
1612 if (newrule->overload_tblname[0]) {
1613 if ((newrule->overload_tbl = pfr_attach_table(
1614 ruleset, newrule->overload_tblname)) ==
1615 NULL)
1616 error = EINVAL;
1617 else
1618 newrule->overload_tbl->pfrkt_flags |=
1619 PFR_TFLAG_ACTIVE;
1620 }
1621
1622 pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
1623 if (((((newrule->action == PF_NAT) ||
1624 (newrule->action == PF_RDR) ||
1625 (newrule->action == PF_BINAT) ||
1626 (newrule->rt > PF_FASTROUTE)) &&
1627 !pcr->anchor[0])) &&
1628 (TAILQ_FIRST(&newrule->rpool.list) == NULL))
1629 error = EINVAL;
1630
1631 if (error) {
1632 pf_rm_rule(NULL, newrule);
1633 break;
1634 }
1635 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
1636 newrule->evaluations = newrule->packets = 0;
1637 newrule->bytes = 0;
1638 }
1639 pf_empty_pool(&pf_pabuf);
1640
1641 if (pcr->action == PF_CHANGE_ADD_HEAD)
1642 oldrule = TAILQ_FIRST(
1643 ruleset->rules[rs_num].active.ptr);
1644 else if (pcr->action == PF_CHANGE_ADD_TAIL)
1645 oldrule = TAILQ_LAST(
1646 ruleset->rules[rs_num].active.ptr, pf_rulequeue);
1647 else {
1648 oldrule = TAILQ_FIRST(
1649 ruleset->rules[rs_num].active.ptr);
1650 while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1651 oldrule = TAILQ_NEXT(oldrule, entries);
1652 if (oldrule == NULL) {
1653 if (newrule != NULL)
1654 pf_rm_rule(NULL, newrule);
1655 error = EINVAL;
1656 break;
1657 }
1658 }
1659
1660 if (pcr->action == PF_CHANGE_REMOVE)
1661 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
1662 else {
1663 if (oldrule == NULL)
1664 TAILQ_INSERT_TAIL(
1665 ruleset->rules[rs_num].active.ptr,
1666 newrule, entries);
1667 else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1668 pcr->action == PF_CHANGE_ADD_BEFORE)
1669 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1670 else
1671 TAILQ_INSERT_AFTER(
1672 ruleset->rules[rs_num].active.ptr,
1673 oldrule, newrule, entries);
1674 }
1675
1676 nr = 0;
1677 TAILQ_FOREACH(oldrule,
1678 ruleset->rules[rs_num].active.ptr, entries)
1679 oldrule->nr = nr++;
1680
1681 ruleset->rules[rs_num].active.ticket++;
1682
1683 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
1684 pf_remove_if_empty_ruleset(ruleset);
1685
1686 break;
1687 }
1688
1689 case DIOCCLRSTATES: {
1690 struct pf_state *state;
1691 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1692 int killed = 0;
1693
1694 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1695 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1696 state->u.s.kif->pfik_name)) {
1697 state->timeout = PFTM_PURGE;
1698 #if NPFSYNC
1699 /* don't send out individual delete messages */
1700 state->sync_flags = PFSTATE_NOSYNC;
1701 #endif
1702 killed++;
1703 }
1704 }
1705 pf_purge_expired_states();
1706 pf_status.states = 0;
1707 psk->psk_af = killed;
1708 #if NPFSYNC
1709 pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
1710 #endif
1711 break;
1712 }
1713
1714 case DIOCKILLSTATES: {
1715 struct pf_state *state;
1716 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1717 int killed = 0;
1718
1719 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1720 if ((!psk->psk_af || state->af == psk->psk_af)
1721 && (!psk->psk_proto || psk->psk_proto ==
1722 state->proto) &&
1723 PF_MATCHA(psk->psk_src.neg,
1724 &psk->psk_src.addr.v.a.addr,
1725 &psk->psk_src.addr.v.a.mask,
1726 &state->lan.addr, state->af) &&
1727 PF_MATCHA(psk->psk_dst.neg,
1728 &psk->psk_dst.addr.v.a.addr,
1729 &psk->psk_dst.addr.v.a.mask,
1730 &state->ext.addr, state->af) &&
1731 (psk->psk_src.port_op == 0 ||
1732 pf_match_port(psk->psk_src.port_op,
1733 psk->psk_src.port[0], psk->psk_src.port[1],
1734 state->lan.port)) &&
1735 (psk->psk_dst.port_op == 0 ||
1736 pf_match_port(psk->psk_dst.port_op,
1737 psk->psk_dst.port[0], psk->psk_dst.port[1],
1738 state->ext.port)) &&
1739 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1740 state->u.s.kif->pfik_name))) {
1741 state->timeout = PFTM_PURGE;
1742 killed++;
1743 }
1744 }
1745 pf_purge_expired_states();
1746 psk->psk_af = killed;
1747 break;
1748 }
1749
1750 case DIOCADDSTATE: {
1751 struct pfioc_state *ps = (struct pfioc_state *)addr;
1752 struct pf_state *state;
1753 struct pfi_kif *kif;
1754
1755 if (ps->state.timeout >= PFTM_MAX &&
1756 ps->state.timeout != PFTM_UNTIL_PACKET) {
1757 error = EINVAL;
1758 break;
1759 }
1760 state = pool_get(&pf_state_pl, PR_NOWAIT);
1761 if (state == NULL) {
1762 error = ENOMEM;
1763 break;
1764 }
1765 kif = pfi_lookup_create(ps->state.u.ifname);
1766 if (kif == NULL) {
1767 pool_put(&pf_state_pl, state);
1768 error = ENOENT;
1769 break;
1770 }
1771 bcopy(&ps->state, state, sizeof(struct pf_state));
1772 bzero(&state->u, sizeof(state->u));
1773 state->rule.ptr = &pf_default_rule;
1774 state->nat_rule.ptr = NULL;
1775 state->anchor.ptr = NULL;
1776 state->rt_kif = NULL;
1777 state->creation = time_second;
1778 state->pfsync_time = 0;
1779 state->packets[0] = state->packets[1] = 0;
1780 state->bytes[0] = state->bytes[1] = 0;
1781
1782 if (pf_insert_state(kif, state)) {
1783 pfi_maybe_destroy(kif);
1784 pool_put(&pf_state_pl, state);
1785 error = ENOMEM;
1786 }
1787 break;
1788 }
1789
1790 case DIOCGETSTATE: {
1791 struct pfioc_state *ps = (struct pfioc_state *)addr;
1792 struct pf_state *state;
1793 u_int32_t nr;
1794
1795 nr = 0;
1796 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1797 if (nr >= ps->nr)
1798 break;
1799 nr++;
1800 }
1801 if (state == NULL) {
1802 error = EBUSY;
1803 break;
1804 }
1805 bcopy(state, &ps->state, sizeof(struct pf_state));
1806 ps->state.rule.nr = state->rule.ptr->nr;
1807 ps->state.nat_rule.nr = (state->nat_rule.ptr == NULL) ?
1808 -1 : state->nat_rule.ptr->nr;
1809 ps->state.anchor.nr = (state->anchor.ptr == NULL) ?
1810 -1 : state->anchor.ptr->nr;
1811 ps->state.expire = pf_state_expires(state);
1812 if (ps->state.expire > time_second)
1813 ps->state.expire -= time_second;
1814 else
1815 ps->state.expire = 0;
1816 break;
1817 }
1818
1819 case DIOCGETSTATES: {
1820 struct pfioc_states *ps = (struct pfioc_states *)addr;
1821 struct pf_state *state;
1822 struct pf_state *p, pstore;
1823 struct pfi_kif *kif;
1824 u_int32_t nr = 0;
1825 int space = ps->ps_len;
1826
1827 if (space == 0) {
1828 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states)
1829 nr += kif->pfik_states;
1830 ps->ps_len = sizeof(struct pf_state) * nr;
1831 break;
1832 }
1833
1834 p = ps->ps_states;
1835 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states)
1836 RB_FOREACH(state, pf_state_tree_ext_gwy,
1837 &kif->pfik_ext_gwy) {
1838 int secs = time_second;
1839
1840 if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len)
1841 break;
1842
1843 bcopy(state, &pstore, sizeof(pstore));
1844 strlcpy(pstore.u.ifname, kif->pfik_name,
1845 sizeof(pstore.u.ifname));
1846 pstore.rule.nr = state->rule.ptr->nr;
1847 pstore.nat_rule.nr = (state->nat_rule.ptr ==
1848 NULL) ? -1 : state->nat_rule.ptr->nr;
1849 pstore.anchor.nr = (state->anchor.ptr ==
1850 NULL) ? -1 : state->anchor.ptr->nr;
1851 pstore.creation = secs - pstore.creation;
1852 pstore.expire = pf_state_expires(state);
1853 if (pstore.expire > secs)
1854 pstore.expire -= secs;
1855 else
1856 pstore.expire = 0;
1857 error = copyout(&pstore, p, sizeof(*p));
1858 if (error)
1859 goto fail;
1860 p++;
1861 nr++;
1862 }
1863 ps->ps_len = sizeof(struct pf_state) * nr;
1864 break;
1865 }
1866
1867 case DIOCGETSTATUS: {
1868 struct pf_status *s = (struct pf_status *)addr;
1869 bcopy(&pf_status, s, sizeof(struct pf_status));
1870 pfi_fill_oldstatus(s);
1871 break;
1872 }
1873
1874 case DIOCSETSTATUSIF: {
1875 struct pfioc_if *pi = (struct pfioc_if *)addr;
1876
1877 if (pi->ifname[0] == 0) {
1878 bzero(pf_status.ifname, IFNAMSIZ);
1879 break;
1880 }
1881 if (ifunit(pi->ifname) == NULL) {
1882 error = EINVAL;
1883 break;
1884 }
1885 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
1886 break;
1887 }
1888
1889 case DIOCCLRSTATUS: {
1890 bzero(pf_status.counters, sizeof(pf_status.counters));
1891 bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
1892 bzero(pf_status.scounters, sizeof(pf_status.scounters));
1893 if (*pf_status.ifname)
1894 pfi_clr_istats(pf_status.ifname, NULL,
1895 PFI_FLAG_INSTANCE);
1896 break;
1897 }
1898
1899 case DIOCNATLOOK: {
1900 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr;
1901 struct pf_state *state;
1902 struct pf_state key;
1903 int m = 0, direction = pnl->direction;
1904
1905 key.af = pnl->af;
1906 key.proto = pnl->proto;
1907
1908 if (!pnl->proto ||
1909 PF_AZERO(&pnl->saddr, pnl->af) ||
1910 PF_AZERO(&pnl->daddr, pnl->af) ||
1911 !pnl->dport || !pnl->sport)
1912 error = EINVAL;
1913 else {
1914 /*
1915 * userland gives us source and dest of connection,
1916 * reverse the lookup so we ask for what happens with
1917 * the return traffic, enabling us to find it in the
1918 * state tree.
1919 */
1920 if (direction == PF_IN) {
1921 PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af);
1922 key.ext.port = pnl->dport;
1923 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af);
1924 key.gwy.port = pnl->sport;
1925 state = pf_find_state_all(&key, PF_EXT_GWY, &m);
1926 } else {
1927 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af);
1928 key.lan.port = pnl->dport;
1929 PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af);
1930 key.ext.port = pnl->sport;
1931 state = pf_find_state_all(&key, PF_LAN_EXT, &m);
1932 }
1933 if (m > 1)
1934 error = E2BIG; /* more than one state */
1935 else if (state != NULL) {
1936 if (direction == PF_IN) {
1937 PF_ACPY(&pnl->rsaddr, &state->lan.addr,
1938 state->af);
1939 pnl->rsport = state->lan.port;
1940 PF_ACPY(&pnl->rdaddr, &pnl->daddr,
1941 pnl->af);
1942 pnl->rdport = pnl->dport;
1943 } else {
1944 PF_ACPY(&pnl->rdaddr, &state->gwy.addr,
1945 state->af);
1946 pnl->rdport = state->gwy.port;
1947 PF_ACPY(&pnl->rsaddr, &pnl->saddr,
1948 pnl->af);
1949 pnl->rsport = pnl->sport;
1950 }
1951 } else
1952 error = ENOENT;
1953 }
1954 break;
1955 }
1956
1957 case DIOCSETTIMEOUT: {
1958 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1959 int old;
1960
1961 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
1962 pt->seconds < 0) {
1963 error = EINVAL;
1964 goto fail;
1965 }
1966 old = pf_default_rule.timeout[pt->timeout];
1967 pf_default_rule.timeout[pt->timeout] = pt->seconds;
1968 pt->seconds = old;
1969 break;
1970 }
1971
1972 case DIOCGETTIMEOUT: {
1973 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1974
1975 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
1976 error = EINVAL;
1977 goto fail;
1978 }
1979 pt->seconds = pf_default_rule.timeout[pt->timeout];
1980 break;
1981 }
1982
1983 case DIOCGETLIMIT: {
1984 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
1985
1986 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
1987 error = EINVAL;
1988 goto fail;
1989 }
1990 pl->limit = pf_pool_limits[pl->index].limit;
1991 break;
1992 }
1993
1994 case DIOCSETLIMIT: {
1995 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
1996 int old_limit;
1997
1998 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
1999 pf_pool_limits[pl->index].pp == NULL) {
2000 error = EINVAL;
2001 goto fail;
2002 }
2003 #ifdef __OpenBSD__
2004 if (pool_sethardlimit(pf_pool_limits[pl->index].pp,
2005 pl->limit, NULL, 0) != 0) {
2006 error = EBUSY;
2007 goto fail;
2008 }
2009 #else
2010 pool_sethardlimit(pf_pool_limits[pl->index].pp,
2011 pl->limit, NULL, 0);
2012 #endif
2013 old_limit = pf_pool_limits[pl->index].limit;
2014 pf_pool_limits[pl->index].limit = pl->limit;
2015 pl->limit = old_limit;
2016 break;
2017 }
2018
2019 case DIOCSETDEBUG: {
2020 u_int32_t *level = (u_int32_t *)addr;
2021
2022 pf_status.debug = *level;
2023 break;
2024 }
2025
2026 case DIOCCLRRULECTRS: {
2027 struct pf_ruleset *ruleset = &pf_main_ruleset;
2028 struct pf_rule *rule;
2029
2030 TAILQ_FOREACH(rule,
2031 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries)
2032 rule->evaluations = rule->packets =
2033 rule->bytes = 0;
2034 break;
2035 }
2036
2037 #ifdef ALTQ
2038 case DIOCSTARTALTQ: {
2039 struct pf_altq *altq;
2040
2041 /* enable all altq interfaces on active list */
2042 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
2043 if (altq->qname[0] == 0) {
2044 error = pf_enable_altq(altq);
2045 if (error != 0)
2046 break;
2047 }
2048 }
2049 if (error == 0)
2050 pf_altq_running = 1;
2051 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
2052 break;
2053 }
2054
2055 case DIOCSTOPALTQ: {
2056 struct pf_altq *altq;
2057
2058 /* disable all altq interfaces on active list */
2059 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
2060 if (altq->qname[0] == 0) {
2061 error = pf_disable_altq(altq);
2062 if (error != 0)
2063 break;
2064 }
2065 }
2066 if (error == 0)
2067 pf_altq_running = 0;
2068 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
2069 break;
2070 }
2071
2072 case DIOCADDALTQ: {
2073 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2074 struct pf_altq *altq, *a;
2075
2076 if (pa->ticket != ticket_altqs_inactive) {
2077 error = EBUSY;
2078 break;
2079 }
2080 altq = pool_get(&pf_altq_pl, PR_NOWAIT);
2081 if (altq == NULL) {
2082 error = ENOMEM;
2083 break;
2084 }
2085 bcopy(&pa->altq, altq, sizeof(struct pf_altq));
2086
2087 /*
2088 * if this is for a queue, find the discipline and
2089 * copy the necessary fields
2090 */
2091 if (altq->qname[0] != 0) {
2092 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
2093 error = EBUSY;
2094 pool_put(&pf_altq_pl, altq);
2095 break;
2096 }
2097 TAILQ_FOREACH(a, pf_altqs_inactive, entries) {
2098 if (strncmp(a->ifname, altq->ifname,
2099 IFNAMSIZ) == 0 && a->qname[0] == 0) {
2100 altq->altq_disc = a->altq_disc;
2101 break;
2102 }
2103 }
2104 }
2105
2106 error = altq_add(altq);
2107 if (error) {
2108 pool_put(&pf_altq_pl, altq);
2109 break;
2110 }
2111
2112 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries);
2113 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2114 break;
2115 }
2116
2117 case DIOCGETALTQS: {
2118 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2119 struct pf_altq *altq;
2120
2121 pa->nr = 0;
2122 TAILQ_FOREACH(altq, pf_altqs_active, entries)
2123 pa->nr++;
2124 pa->ticket = ticket_altqs_active;
2125 break;
2126 }
2127
2128 case DIOCGETALTQ: {
2129 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2130 struct pf_altq *altq;
2131 u_int32_t nr;
2132
2133 if (pa->ticket != ticket_altqs_active) {
2134 error = EBUSY;
2135 break;
2136 }
2137 nr = 0;
2138 altq = TAILQ_FIRST(pf_altqs_active);
2139 while ((altq != NULL) && (nr < pa->nr)) {
2140 altq = TAILQ_NEXT(altq, entries);
2141 nr++;
2142 }
2143 if (altq == NULL) {
2144 error = EBUSY;
2145 break;
2146 }
2147 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2148 break;
2149 }
2150
2151 case DIOCCHANGEALTQ:
2152 /* CHANGEALTQ not supported yet! */
2153 error = ENODEV;
2154 break;
2155
2156 case DIOCGETQSTATS: {
2157 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr;
2158 struct pf_altq *altq;
2159 u_int32_t nr;
2160 int nbytes;
2161
2162 if (pq->ticket != ticket_altqs_active) {
2163 error = EBUSY;
2164 break;
2165 }
2166 nbytes = pq->nbytes;
2167 nr = 0;
2168 altq = TAILQ_FIRST(pf_altqs_active);
2169 while ((altq != NULL) && (nr < pq->nr)) {
2170 altq = TAILQ_NEXT(altq, entries);
2171 nr++;
2172 }
2173 if (altq == NULL) {
2174 error = EBUSY;
2175 break;
2176 }
2177 error = altq_getqstats(altq, pq->buf, &nbytes);
2178 if (error == 0) {
2179 pq->scheduler = altq->scheduler;
2180 pq->nbytes = nbytes;
2181 }
2182 break;
2183 }
2184 #endif /* ALTQ */
2185
2186 case DIOCBEGINADDRS: {
2187 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2188
2189 pf_empty_pool(&pf_pabuf);
2190 pp->ticket = ++ticket_pabuf;
2191 break;
2192 }
2193
2194 case DIOCADDADDR: {
2195 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2196
2197 #ifndef INET
2198 if (pp->af == AF_INET) {
2199 error = EAFNOSUPPORT;
2200 break;
2201 }
2202 #endif /* INET */
2203 #ifndef INET6
2204 if (pp->af == AF_INET6) {
2205 error = EAFNOSUPPORT;
2206 break;
2207 }
2208 #endif /* INET6 */
2209 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2210 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2211 pp->addr.addr.type != PF_ADDR_TABLE) {
2212 error = EINVAL;
2213 break;
2214 }
2215 pa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
2216 if (pa == NULL) {
2217 error = ENOMEM;
2218 break;
2219 }
2220 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr));
2221 if (pa->ifname[0]) {
2222 pa->kif = pfi_attach_rule(pa->ifname);
2223 if (pa->kif == NULL) {
2224 pool_put(&pf_pooladdr_pl, pa);
2225 error = EINVAL;
2226 break;
2227 }
2228 }
2229 if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
2230 pfi_dynaddr_remove(&pa->addr);
2231 pfi_detach_rule(pa->kif);
2232 pool_put(&pf_pooladdr_pl, pa);
2233 error = EINVAL;
2234 break;
2235 }
2236 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
2237 break;
2238 }
2239
2240 case DIOCGETADDRS: {
2241 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2242
2243 pp->nr = 0;
2244 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2245 pp->r_num, 0, 1, 0);
2246 if (pool == NULL) {
2247 error = EBUSY;
2248 break;
2249 }
2250 TAILQ_FOREACH(pa, &pool->list, entries)
2251 pp->nr++;
2252 break;
2253 }
2254
2255 case DIOCGETADDR: {
2256 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2257 u_int32_t nr = 0;
2258
2259 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2260 pp->r_num, 0, 1, 1);
2261 if (pool == NULL) {
2262 error = EBUSY;
2263 break;
2264 }
2265 pa = TAILQ_FIRST(&pool->list);
2266 while ((pa != NULL) && (nr < pp->nr)) {
2267 pa = TAILQ_NEXT(pa, entries);
2268 nr++;
2269 }
2270 if (pa == NULL) {
2271 error = EBUSY;
2272 break;
2273 }
2274 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr));
2275 pfi_dynaddr_copyout(&pp->addr.addr);
2276 pf_tbladdr_copyout(&pp->addr.addr);
2277 pf_rtlabel_copyout(&pp->addr.addr);
2278 break;
2279 }
2280
2281 case DIOCCHANGEADDR: {
2282 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr;
2283 struct pf_pooladdr *oldpa = NULL, *newpa = NULL;
2284 struct pf_ruleset *ruleset;
2285
2286 if (pca->action < PF_CHANGE_ADD_HEAD ||
2287 pca->action > PF_CHANGE_REMOVE) {
2288 error = EINVAL;
2289 break;
2290 }
2291 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
2292 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
2293 pca->addr.addr.type != PF_ADDR_TABLE) {
2294 error = EINVAL;
2295 break;
2296 }
2297
2298 ruleset = pf_find_ruleset(pca->anchor);
2299 if (ruleset == NULL) {
2300 error = EBUSY;
2301 break;
2302 }
2303 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
2304 pca->r_num, pca->r_last, 1, 1);
2305 if (pool == NULL) {
2306 error = EBUSY;
2307 break;
2308 }
2309 if (pca->action != PF_CHANGE_REMOVE) {
2310 newpa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
2311 if (newpa == NULL) {
2312 error = ENOMEM;
2313 break;
2314 }
2315 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
2316 #ifndef INET
2317 if (pca->af == AF_INET) {
2318 pool_put(&pf_pooladdr_pl, newpa);
2319 error = EAFNOSUPPORT;
2320 break;
2321 }
2322 #endif /* INET */
2323 #ifndef INET6
2324 if (pca->af == AF_INET6) {
2325 pool_put(&pf_pooladdr_pl, newpa);
2326 error = EAFNOSUPPORT;
2327 break;
2328 }
2329 #endif /* INET6 */
2330 if (newpa->ifname[0]) {
2331 newpa->kif = pfi_attach_rule(newpa->ifname);
2332 if (newpa->kif == NULL) {
2333 pool_put(&pf_pooladdr_pl, newpa);
2334 error = EINVAL;
2335 break;
2336 }
2337 } else
2338 newpa->kif = NULL;
2339 if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
2340 pf_tbladdr_setup(ruleset, &newpa->addr)) {
2341 pfi_dynaddr_remove(&newpa->addr);
2342 pfi_detach_rule(newpa->kif);
2343 pool_put(&pf_pooladdr_pl, newpa);
2344 error = EINVAL;
2345 break;
2346 }
2347 }
2348
2349 if (pca->action == PF_CHANGE_ADD_HEAD)
2350 oldpa = TAILQ_FIRST(&pool->list);
2351 else if (pca->action == PF_CHANGE_ADD_TAIL)
2352 oldpa = TAILQ_LAST(&pool->list, pf_palist);
2353 else {
2354 int i = 0;
2355
2356 oldpa = TAILQ_FIRST(&pool->list);
2357 while ((oldpa != NULL) && (i < pca->nr)) {
2358 oldpa = TAILQ_NEXT(oldpa, entries);
2359 i++;
2360 }
2361 if (oldpa == NULL) {
2362 error = EINVAL;
2363 break;
2364 }
2365 }
2366
2367 if (pca->action == PF_CHANGE_REMOVE) {
2368 TAILQ_REMOVE(&pool->list, oldpa, entries);
2369 pfi_dynaddr_remove(&oldpa->addr);
2370 pf_tbladdr_remove(&oldpa->addr);
2371 pfi_detach_rule(oldpa->kif);
2372 pool_put(&pf_pooladdr_pl, oldpa);
2373 } else {
2374 if (oldpa == NULL)
2375 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
2376 else if (pca->action == PF_CHANGE_ADD_HEAD ||
2377 pca->action == PF_CHANGE_ADD_BEFORE)
2378 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
2379 else
2380 TAILQ_INSERT_AFTER(&pool->list, oldpa,
2381 newpa, entries);
2382 }
2383
2384 pool->cur = TAILQ_FIRST(&pool->list);
2385 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
2386 pca->af);
2387 break;
2388 }
2389
2390 case DIOCGETRULESETS: {
2391 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2392 struct pf_ruleset *ruleset;
2393 struct pf_anchor *anchor;
2394
2395 pr->path[sizeof(pr->path) - 1] = 0;
2396 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2397 error = EINVAL;
2398 break;
2399 }
2400 pr->nr = 0;
2401 if (ruleset->anchor == NULL) {
2402 /* XXX kludge for pf_main_ruleset */
2403 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2404 if (anchor->parent == NULL)
2405 pr->nr++;
2406 } else {
2407 RB_FOREACH(anchor, pf_anchor_node,
2408 &ruleset->anchor->children)
2409 pr->nr++;
2410 }
2411 break;
2412 }
2413
2414 case DIOCGETRULESET: {
2415 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2416 struct pf_ruleset *ruleset;
2417 struct pf_anchor *anchor;
2418 u_int32_t nr = 0;
2419
2420 pr->path[sizeof(pr->path) - 1] = 0;
2421 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2422 error = EINVAL;
2423 break;
2424 }
2425 pr->name[0] = 0;
2426 if (ruleset->anchor == NULL) {
2427 /* XXX kludge for pf_main_ruleset */
2428 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2429 if (anchor->parent == NULL && nr++ == pr->nr) {
2430 strlcpy(pr->name, anchor->name,
2431 sizeof(pr->name));
2432 break;
2433 }
2434 } else {
2435 RB_FOREACH(anchor, pf_anchor_node,
2436 &ruleset->anchor->children)
2437 if (nr++ == pr->nr) {
2438 strlcpy(pr->name, anchor->name,
2439 sizeof(pr->name));
2440 break;
2441 }
2442 }
2443 if (!pr->name[0])
2444 error = EBUSY;
2445 break;
2446 }
2447
2448 case DIOCRCLRTABLES: {
2449 struct pfioc_table *io = (struct pfioc_table *)addr;
2450
2451 if (io->pfrio_esize != 0) {
2452 error = ENODEV;
2453 break;
2454 }
2455 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
2456 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2457 break;
2458 }
2459
2460 case DIOCRADDTABLES: {
2461 struct pfioc_table *io = (struct pfioc_table *)addr;
2462
2463 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2464 error = ENODEV;
2465 break;
2466 }
2467 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
2468 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2469 break;
2470 }
2471
2472 case DIOCRDELTABLES: {
2473 struct pfioc_table *io = (struct pfioc_table *)addr;
2474
2475 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2476 error = ENODEV;
2477 break;
2478 }
2479 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
2480 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2481 break;
2482 }
2483
2484 case DIOCRGETTABLES: {
2485 struct pfioc_table *io = (struct pfioc_table *)addr;
2486
2487 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2488 error = ENODEV;
2489 break;
2490 }
2491 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
2492 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2493 break;
2494 }
2495
2496 case DIOCRGETTSTATS: {
2497 struct pfioc_table *io = (struct pfioc_table *)addr;
2498
2499 if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
2500 error = ENODEV;
2501 break;
2502 }
2503 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
2504 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2505 break;
2506 }
2507
2508 case DIOCRCLRTSTATS: {
2509 struct pfioc_table *io = (struct pfioc_table *)addr;
2510
2511 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2512 error = ENODEV;
2513 break;
2514 }
2515 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
2516 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2517 break;
2518 }
2519
2520 case DIOCRSETTFLAGS: {
2521 struct pfioc_table *io = (struct pfioc_table *)addr;
2522
2523 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2524 error = ENODEV;
2525 break;
2526 }
2527 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
2528 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
2529 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2530 break;
2531 }
2532
2533 case DIOCRCLRADDRS: {
2534 struct pfioc_table *io = (struct pfioc_table *)addr;
2535
2536 if (io->pfrio_esize != 0) {
2537 error = ENODEV;
2538 break;
2539 }
2540 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
2541 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2542 break;
2543 }
2544
2545 case DIOCRADDADDRS: {
2546 struct pfioc_table *io = (struct pfioc_table *)addr;
2547
2548 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2549 error = ENODEV;
2550 break;
2551 }
2552 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
2553 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
2554 PFR_FLAG_USERIOCTL);
2555 break;
2556 }
2557
2558 case DIOCRDELADDRS: {
2559 struct pfioc_table *io = (struct pfioc_table *)addr;
2560
2561 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2562 error = ENODEV;
2563 break;
2564 }
2565 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
2566 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
2567 PFR_FLAG_USERIOCTL);
2568 break;
2569 }
2570
2571 case DIOCRSETADDRS: {
2572 struct pfioc_table *io = (struct pfioc_table *)addr;
2573
2574 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2575 error = ENODEV;
2576 break;
2577 }
2578 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
2579 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
2580 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
2581 PFR_FLAG_USERIOCTL);
2582 break;
2583 }
2584
2585 case DIOCRGETADDRS: {
2586 struct pfioc_table *io = (struct pfioc_table *)addr;
2587
2588 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2589 error = ENODEV;
2590 break;
2591 }
2592 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
2593 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2594 break;
2595 }
2596
2597 case DIOCRGETASTATS: {
2598 struct pfioc_table *io = (struct pfioc_table *)addr;
2599
2600 if (io->pfrio_esize != sizeof(struct pfr_astats)) {
2601 error = ENODEV;
2602 break;
2603 }
2604 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
2605 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2606 break;
2607 }
2608
2609 case DIOCRCLRASTATS: {
2610 struct pfioc_table *io = (struct pfioc_table *)addr;
2611
2612 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2613 error = ENODEV;
2614 break;
2615 }
2616 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
2617 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
2618 PFR_FLAG_USERIOCTL);
2619 break;
2620 }
2621
2622 case DIOCRTSTADDRS: {
2623 struct pfioc_table *io = (struct pfioc_table *)addr;
2624
2625 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2626 error = ENODEV;
2627 break;
2628 }
2629 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
2630 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
2631 PFR_FLAG_USERIOCTL);
2632 break;
2633 }
2634
2635 case DIOCRINADEFINE: {
2636 struct pfioc_table *io = (struct pfioc_table *)addr;
2637
2638 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2639 error = ENODEV;
2640 break;
2641 }
2642 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
2643 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
2644 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2645 break;
2646 }
2647
2648 case DIOCOSFPADD: {
2649 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2650 error = pf_osfp_add(io);
2651 break;
2652 }
2653
2654 case DIOCOSFPGET: {
2655 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2656 error = pf_osfp_get(io);
2657 break;
2658 }
2659
2660 case DIOCXBEGIN: {
2661 struct pfioc_trans *io = (struct pfioc_trans *)
2662 addr;
2663 static struct pfioc_trans_e ioe;
2664 static struct pfr_table table;
2665 int i;
2666
2667 if (io->esize != sizeof(ioe)) {
2668 error = ENODEV;
2669 goto fail;
2670 }
2671 for (i = 0; i < io->size; i++) {
2672 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2673 error = EFAULT;
2674 goto fail;
2675 }
2676 switch (ioe.rs_num) {
2677 #ifdef ALTQ
2678 case PF_RULESET_ALTQ:
2679 if (ioe.anchor[0]) {
2680 error = EINVAL;
2681 goto fail;
2682 }
2683 if ((error = pf_begin_altq(&ioe.ticket)))
2684 goto fail;
2685 break;
2686 #endif /* ALTQ */
2687 case PF_RULESET_TABLE:
2688 bzero(&table, sizeof(table));
2689 strlcpy(table.pfrt_anchor, ioe.anchor,
2690 sizeof(table.pfrt_anchor));
2691 if ((error = pfr_ina_begin(&table,
2692 &ioe.ticket, NULL, 0)))
2693 goto fail;
2694 break;
2695 default:
2696 if ((error = pf_begin_rules(&ioe.ticket,
2697 ioe.rs_num, ioe.anchor)))
2698 goto fail;
2699 break;
2700 }
2701 if (copyout(&ioe, io->array+i, sizeof(io->array[i]))) {
2702 error = EFAULT;
2703 goto fail;
2704 }
2705 }
2706 break;
2707 }
2708
2709 case DIOCXROLLBACK: {
2710 struct pfioc_trans *io = (struct pfioc_trans *)
2711 addr;
2712 static struct pfioc_trans_e ioe;
2713 static struct pfr_table table;
2714 int i;
2715
2716 if (io->esize != sizeof(ioe)) {
2717 error = ENODEV;
2718 goto fail;
2719 }
2720 for (i = 0; i < io->size; i++) {
2721 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2722 error = EFAULT;
2723 goto fail;
2724 }
2725 switch (ioe.rs_num) {
2726 #ifdef ALTQ
2727 case PF_RULESET_ALTQ:
2728 if (ioe.anchor[0]) {
2729 error = EINVAL;
2730 goto fail;
2731 }
2732 if ((error = pf_rollback_altq(ioe.ticket)))
2733 goto fail; /* really bad */
2734 break;
2735 #endif /* ALTQ */
2736 case PF_RULESET_TABLE:
2737 bzero(&table, sizeof(table));
2738 strlcpy(table.pfrt_anchor, ioe.anchor,
2739 sizeof(table.pfrt_anchor));
2740 if ((error = pfr_ina_rollback(&table,
2741 ioe.ticket, NULL, 0)))
2742 goto fail; /* really bad */
2743 break;
2744 default:
2745 if ((error = pf_rollback_rules(ioe.ticket,
2746 ioe.rs_num, ioe.anchor)))
2747 goto fail; /* really bad */
2748 break;
2749 }
2750 }
2751 break;
2752 }
2753
2754 case DIOCXCOMMIT: {
2755 struct pfioc_trans *io = (struct pfioc_trans *)
2756 addr;
2757 static struct pfioc_trans_e ioe;
2758 static struct pfr_table table;
2759 struct pf_ruleset *rs;
2760 int i;
2761
2762 if (io->esize != sizeof(ioe)) {
2763 error = ENODEV;
2764 goto fail;
2765 }
2766 /* first makes sure everything will succeed */
2767 for (i = 0; i < io->size; i++) {
2768 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2769 error = EFAULT;
2770 goto fail;
2771 }
2772 switch (ioe.rs_num) {
2773 #ifdef ALTQ
2774 case PF_RULESET_ALTQ:
2775 if (ioe.anchor[0]) {
2776 error = EINVAL;
2777 goto fail;
2778 }
2779 if (!altqs_inactive_open || ioe.ticket !=
2780 ticket_altqs_inactive) {
2781 error = EBUSY;
2782 goto fail;
2783 }
2784 break;
2785 #endif /* ALTQ */
2786 case PF_RULESET_TABLE:
2787 rs = pf_find_ruleset(ioe.anchor);
2788 if (rs == NULL || !rs->topen || ioe.ticket !=
2789 rs->tticket) {
2790 error = EBUSY;
2791 goto fail;
2792 }
2793 break;
2794 default:
2795 if (ioe.rs_num < 0 || ioe.rs_num >=
2796 PF_RULESET_MAX) {
2797 error = EINVAL;
2798 goto fail;
2799 }
2800 rs = pf_find_ruleset(ioe.anchor);
2801 if (rs == NULL ||
2802 !rs->rules[ioe.rs_num].inactive.open ||
2803 rs->rules[ioe.rs_num].inactive.ticket !=
2804 ioe.ticket) {
2805 error = EBUSY;
2806 goto fail;
2807 }
2808 break;
2809 }
2810 }
2811 /* now do the commit - no errors should happen here */
2812 for (i = 0; i < io->size; i++) {
2813 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2814 error = EFAULT;
2815 goto fail;
2816 }
2817 switch (ioe.rs_num) {
2818 #ifdef ALTQ
2819 case PF_RULESET_ALTQ:
2820 if ((error = pf_commit_altq(ioe.ticket)))
2821 goto fail; /* really bad */
2822 break;
2823 #endif /* ALTQ */
2824 case PF_RULESET_TABLE:
2825 bzero(&table, sizeof(table));
2826 strlcpy(table.pfrt_anchor, ioe.anchor,
2827 sizeof(table.pfrt_anchor));
2828 if ((error = pfr_ina_commit(&table, ioe.ticket,
2829 NULL, NULL, 0)))
2830 goto fail; /* really bad */
2831 break;
2832 default:
2833 if ((error = pf_commit_rules(ioe.ticket,
2834 ioe.rs_num, ioe.anchor)))
2835 goto fail; /* really bad */
2836 break;
2837 }
2838 }
2839 break;
2840 }
2841
2842 case DIOCGETSRCNODES: {
2843 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr;
2844 struct pf_src_node *n;
2845 struct pf_src_node *p, pstore;
2846 u_int32_t nr = 0;
2847 int space = psn->psn_len;
2848
2849 if (space == 0) {
2850 RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
2851 nr++;
2852 psn->psn_len = sizeof(struct pf_src_node) * nr;
2853 break;
2854 }
2855
2856 p = psn->psn_src_nodes;
2857 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2858 int secs = time_second, diff;
2859
2860 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
2861 break;
2862
2863 bcopy(n, &pstore, sizeof(pstore));
2864 if (n->rule.ptr != NULL)
2865 pstore.rule.nr = n->rule.ptr->nr;
2866 pstore.creation = secs - pstore.creation;
2867 if (pstore.expire > secs)
2868 pstore.expire -= secs;
2869 else
2870 pstore.expire = 0;
2871
2872 /* adjust the connection rate estimate */
2873 diff = secs - n->conn_rate.last;
2874 if (diff >= n->conn_rate.seconds)
2875 pstore.conn_rate.count = 0;
2876 else
2877 pstore.conn_rate.count -=
2878 n->conn_rate.count * diff /
2879 n->conn_rate.seconds;
2880
2881 error = copyout(&pstore, p, sizeof(*p));
2882 if (error)
2883 goto fail;
2884 p++;
2885 nr++;
2886 }
2887 psn->psn_len = sizeof(struct pf_src_node) * nr;
2888 break;
2889 }
2890
2891 case DIOCCLRSRCNODES: {
2892 struct pf_src_node *n;
2893 struct pf_state *state;
2894
2895 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
2896 state->src_node = NULL;
2897 state->nat_src_node = NULL;
2898 }
2899 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2900 n->expire = 1;
2901 n->states = 0;
2902 }
2903 pf_purge_expired_src_nodes();
2904 pf_status.src_nodes = 0;
2905 break;
2906 }
2907
2908 case DIOCSETHOSTID: {
2909 u_int32_t *hostid = (u_int32_t *)addr;
2910
2911 if (*hostid == 0)
2912 pf_status.hostid = arc4random();
2913 else
2914 pf_status.hostid = *hostid;
2915 break;
2916 }
2917
2918 case DIOCOSFPFLUSH:
2919 pf_osfp_flush();
2920 break;
2921
2922 case DIOCIGETIFACES: {
2923 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2924
2925 if (io->pfiio_esize != sizeof(struct pfi_if)) {
2926 error = ENODEV;
2927 break;
2928 }
2929 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer,
2930 &io->pfiio_size, io->pfiio_flags);
2931 break;
2932 }
2933
2934 case DIOCICLRISTATS: {
2935 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2936
2937 error = pfi_clr_istats(io->pfiio_name, &io->pfiio_nzero,
2938 io->pfiio_flags);
2939 break;
2940 }
2941
2942 case DIOCSETIFFLAG: {
2943 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2944
2945 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
2946 break;
2947 }
2948
2949 case DIOCCLRIFFLAG: {
2950 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2951
2952 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
2953 break;
2954 }
2955
2956 default:
2957 error = ENODEV;
2958 break;
2959 }
2960 fail:
2961 splx(s);
2962 return (error);
2963 }
2964
2965 #ifdef __NetBSD__
2966 #ifdef INET
2967 int
2968 pfil4_wrapper(void *arg __unused, struct mbuf **mp, struct ifnet *ifp, int dir)
2969 {
2970 int error;
2971
2972 /*
2973 * ensure that mbufs are writable beforehand
2974 * as it's assumed by pf code.
2975 * ip hdr (60 bytes) + tcp hdr (60 bytes) should be enough.
2976 * XXX inefficient
2977 */
2978 error = m_makewritable(mp, 0, 60 + 60, M_DONTWAIT);
2979 if (error) {
2980 m_freem(*mp);
2981 *mp = NULL;
2982 return error;
2983 }
2984
2985 /*
2986 * If the packet is out-bound, we can't delay checksums
2987 * here. For in-bound, the checksum has already been
2988 * validated.
2989 */
2990 if (dir == PFIL_OUT) {
2991 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
2992 in_delayed_cksum(*mp);
2993 (*mp)->m_pkthdr.csum_flags &=
2994 ~(M_CSUM_TCPv4|M_CSUM_UDPv4);
2995 }
2996 }
2997
2998 if (pf_test(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp, NULL)
2999 != PF_PASS) {
3000 m_freem(*mp);
3001 *mp = NULL;
3002 return EHOSTUNREACH;
3003 }
3004
3005 /*
3006 * we're not compatible with fast-forward.
3007 */
3008
3009 if (dir == PFIL_IN && *mp) {
3010 (*mp)->m_flags &= ~M_CANFASTFWD;
3011 }
3012
3013 return (0);
3014 }
3015 #endif /* INET */
3016
3017 #ifdef INET6
3018 int
3019 pfil6_wrapper(void *arg __unused, struct mbuf **mp, struct ifnet *ifp, int dir)
3020 {
3021 int error;
3022
3023 /*
3024 * ensure that mbufs are writable beforehand
3025 * as it's assumed by pf code.
3026 * XXX inefficient
3027 */
3028 error = m_makewritable(mp, 0, M_COPYALL, M_DONTWAIT);
3029 if (error) {
3030 m_freem(*mp);
3031 *mp = NULL;
3032 return error;
3033 }
3034
3035 /*
3036 * If the packet is out-bound, we can't delay checksums
3037 * here. For in-bound, the checksum has already been
3038 * validated.
3039 */
3040 if (dir == PFIL_OUT) {
3041 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv6|M_CSUM_UDPv6)) {
3042 in6_delayed_cksum(*mp);
3043 (*mp)->m_pkthdr.csum_flags &=
3044 ~(M_CSUM_TCPv6|M_CSUM_UDPv6);
3045 }
3046 }
3047
3048 if (pf_test6(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp, NULL)
3049 != PF_PASS) {
3050 m_freem(*mp);
3051 *mp = NULL;
3052 return EHOSTUNREACH;
3053 } else
3054 return (0);
3055 }
3056 #endif
3057
3058 int
3059 pfil_ifnet_wrapper(void *arg __unused, struct mbuf **mp, struct ifnet *ifp,
3060 int dir __unused)
3061 {
3062 u_long cmd = (u_long)mp;
3063
3064 switch (cmd) {
3065 case PFIL_IFNET_ATTACH:
3066 pfi_attach_ifnet(ifp);
3067 break;
3068 case PFIL_IFNET_DETACH:
3069 pfi_detach_ifnet(ifp);
3070 break;
3071 }
3072
3073 return (0);
3074 }
3075
3076 int
3077 pfil_ifaddr_wrapper(void *arg __unused, struct mbuf **mp, struct ifnet *ifp,
3078 int dir __unused)
3079 {
3080 extern void pfi_kifaddr_update_if(struct ifnet *);
3081
3082 u_long cmd = (u_long)mp;
3083
3084 switch (cmd) {
3085 case SIOCSIFADDR:
3086 case SIOCAIFADDR:
3087 case SIOCDIFADDR:
3088 #ifdef INET6
3089 case SIOCAIFADDR_IN6:
3090 case SIOCDIFADDR_IN6:
3091 #endif
3092 pfi_kifaddr_update_if(ifp);
3093 break;
3094 default:
3095 panic("unexpected ioctl");
3096 }
3097
3098 return (0);
3099 }
3100
3101 static int
3102 pf_pfil_attach(void)
3103 {
3104 struct pfil_head *ph_inet;
3105 #ifdef INET6
3106 struct pfil_head *ph_inet6;
3107 #endif
3108 int error;
3109 int i;
3110
3111 if (pf_pfil_attached)
3112 return (0);
3113
3114 error = pfil_add_hook(pfil_ifnet_wrapper, NULL, PFIL_IFNET, &if_pfil);
3115 if (error)
3116 goto bad1;
3117 error = pfil_add_hook(pfil_ifaddr_wrapper, NULL, PFIL_IFADDR, &if_pfil);
3118 if (error)
3119 goto bad2;
3120
3121 ph_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3122 if (ph_inet)
3123 error = pfil_add_hook((void *)pfil4_wrapper, NULL,
3124 PFIL_IN|PFIL_OUT, ph_inet);
3125 else
3126 error = ENOENT;
3127 if (error)
3128 goto bad3;
3129
3130 #ifdef INET6
3131 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3132 if (ph_inet6)
3133 error = pfil_add_hook((void *)pfil6_wrapper, NULL,
3134 PFIL_IN|PFIL_OUT, ph_inet6);
3135 else
3136 error = ENOENT;
3137 if (error)
3138 goto bad4;
3139 #endif
3140
3141 for (i = 0; i < if_indexlim; i++)
3142 if (ifindex2ifnet[i])
3143 pfi_attach_ifnet(ifindex2ifnet[i]);
3144 pf_pfil_attached = 1;
3145
3146 return (0);
3147
3148 #ifdef INET6
3149 bad4:
3150 pfil_remove_hook(pfil4_wrapper, NULL, PFIL_IN|PFIL_OUT, ph_inet);
3151 #endif
3152 bad3:
3153 pfil_remove_hook(pfil_ifaddr_wrapper, NULL, PFIL_IFADDR, &if_pfil);
3154 bad2:
3155 pfil_remove_hook(pfil_ifnet_wrapper, NULL, PFIL_IFNET, &if_pfil);
3156 bad1:
3157 return (error);
3158 }
3159
3160 static int
3161 pf_pfil_detach(void)
3162 {
3163 struct pfil_head *ph_inet;
3164 #ifdef INET6
3165 struct pfil_head *ph_inet6;
3166 #endif
3167 int i;
3168
3169 if (pf_pfil_attached == 0)
3170 return (0);
3171
3172 for (i = 0; i < if_indexlim; i++)
3173 if (pfi_index2kif[i])
3174 pfi_detach_ifnet(ifindex2ifnet[i]);
3175
3176 pfil_remove_hook(pfil_ifaddr_wrapper, NULL, PFIL_IFADDR, &if_pfil);
3177 pfil_remove_hook(pfil_ifnet_wrapper, NULL, PFIL_IFNET, &if_pfil);
3178
3179 ph_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3180 if (ph_inet)
3181 pfil_remove_hook((void *)pfil4_wrapper, NULL,
3182 PFIL_IN|PFIL_OUT, ph_inet);
3183 #ifdef INET6
3184 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3185 if (ph_inet6)
3186 pfil_remove_hook((void *)pfil6_wrapper, NULL,
3187 PFIL_IN|PFIL_OUT, ph_inet6);
3188 #endif
3189 pf_pfil_attached = 0;
3190
3191 return (0);
3192 }
3193 #endif
3194