pf_ioctl.c revision 1.21.12.2 1 /* $NetBSD: pf_ioctl.c,v 1.21.12.2 2006/09/25 04:03:10 peter Exp $ */
2 /* $OpenBSD: pf_ioctl.c,v 1.139 2005/03/03 07:13:39 dhartmei Exp $ */
3
4 /*
5 * Copyright (c) 2001 Daniel Hartmeier
6 * Copyright (c) 2002,2003 Henning Brauer
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 *
13 * - Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * - Redistributions in binary form must reproduce the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer in the documentation and/or other materials provided
18 * with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Effort sponsored in part by the Defense Advanced Research Projects
34 * Agency (DARPA) and Air Force Research Laboratory, Air Force
35 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36 *
37 */
38
39 #ifdef _KERNEL_OPT
40 #include "opt_inet.h"
41 #include "opt_altq.h"
42 #include "opt_pfil_hooks.h"
43 #endif
44
45 #ifdef __OpenBSD__
46 #include "pfsync.h"
47 #else
48 #define NPFSYNC 0
49 #endif
50
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/mbuf.h>
54 #include <sys/filio.h>
55 #include <sys/fcntl.h>
56 #include <sys/socket.h>
57 #include <sys/socketvar.h>
58 #include <sys/kernel.h>
59 #include <sys/time.h>
60 #ifdef __OpenBSD__
61 #include <sys/timeout.h>
62 #else
63 #include <sys/callout.h>
64 #endif
65 #include <sys/pool.h>
66 #include <sys/malloc.h>
67 #ifdef __NetBSD__
68 #include <sys/conf.h>
69 #include <sys/lwp.h>
70 #include <sys/kauth.h>
71 #endif
72
73 #include <net/if.h>
74 #include <net/if_types.h>
75 #include <net/route.h>
76
77 #include <netinet/in.h>
78 #include <netinet/in_var.h>
79 #include <netinet/in_systm.h>
80 #include <netinet/ip.h>
81 #include <netinet/ip_var.h>
82 #include <netinet/ip_icmp.h>
83
84 #ifdef __OpenBSD__
85 #include <dev/rndvar.h>
86 #endif
87 #include <net/pfvar.h>
88
89 #if NPFSYNC > 0
90 #include <net/if_pfsync.h>
91 #endif /* NPFSYNC > 0 */
92
93 #ifdef INET6
94 #include <netinet/ip6.h>
95 #include <netinet/in_pcb.h>
96 #endif /* INET6 */
97
98 #ifdef ALTQ
99 #include <altq/altq.h>
100 #endif
101
102 void pfattach(int);
103 #ifdef _LKM
104 void pfdetach(void);
105 #endif
106 int pfopen(dev_t, int, int, struct lwp *);
107 int pfclose(dev_t, int, int, struct lwp *);
108 struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
109 u_int8_t, u_int8_t, u_int8_t);
110 int pf_get_ruleset_number(u_int8_t);
111 void pf_init_ruleset(struct pf_ruleset *);
112 int pf_anchor_setup(struct pf_rule *,
113 const struct pf_ruleset *, const char *);
114 int pf_anchor_copyout(const struct pf_ruleset *,
115 const struct pf_rule *, struct pfioc_rule *);
116 void pf_anchor_remove(struct pf_rule *);
117
118 void pf_mv_pool(struct pf_palist *, struct pf_palist *);
119 void pf_empty_pool(struct pf_palist *);
120 int pfioctl(dev_t, u_long, caddr_t, int, struct lwp *);
121 #ifdef ALTQ
122 int pf_begin_altq(u_int32_t *);
123 int pf_rollback_altq(u_int32_t);
124 int pf_commit_altq(u_int32_t);
125 int pf_enable_altq(struct pf_altq *);
126 int pf_disable_altq(struct pf_altq *);
127 #endif /* ALTQ */
128 int pf_begin_rules(u_int32_t *, int, const char *);
129 int pf_rollback_rules(u_int32_t, int, char *);
130 int pf_commit_rules(u_int32_t, int, char *);
131
132 #ifdef __NetBSD__
133 const struct cdevsw pf_cdevsw = {
134 pfopen, pfclose, noread, nowrite, pfioctl,
135 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER,
136 };
137
138 static int pf_pfil_attach(void);
139 static int pf_pfil_detach(void);
140
141 static int pf_pfil_attached = 0;
142 #endif
143
144 #ifdef __OpenBSD__
145 extern struct timeout pf_expire_to;
146 #else
147 extern struct callout pf_expire_to;
148 #endif
149
150 struct pf_rule pf_default_rule;
151 #ifdef ALTQ
152 static int pf_altq_running;
153 #endif
154
155 #define TAGID_MAX 50000
156 TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
157 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
158
159 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
160 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
161 #endif
162 static u_int16_t tagname2tag(struct pf_tags *, char *);
163 static void tag2tagname(struct pf_tags *, u_int16_t, char *);
164 static void tag_unref(struct pf_tags *, u_int16_t);
165 int pf_rtlabel_add(struct pf_addr_wrap *);
166 void pf_rtlabel_remove(struct pf_addr_wrap *);
167 void pf_rtlabel_copyout(struct pf_addr_wrap *);
168
169 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
170
171 #ifdef __NetBSD__
172 extern struct pfil_head if_pfil;
173 #endif
174
175 void
176 pfattach(int num)
177 {
178 u_int32_t *timeout = pf_default_rule.timeout;
179
180 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl",
181 &pool_allocator_nointr);
182 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
183 "pfsrctrpl", NULL);
184 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl",
185 NULL);
186 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl",
187 &pool_allocator_nointr);
188 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0,
189 "pfpooladdrpl", &pool_allocator_nointr);
190 pfr_initialize();
191 pfi_initialize();
192 pf_osfp_initialize();
193
194 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
195 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
196
197 RB_INIT(&tree_src_tracking);
198 RB_INIT(&pf_anchors);
199 pf_init_ruleset(&pf_main_ruleset);
200 TAILQ_INIT(&pf_altqs[0]);
201 TAILQ_INIT(&pf_altqs[1]);
202 TAILQ_INIT(&pf_pabuf);
203 pf_altqs_active = &pf_altqs[0];
204 pf_altqs_inactive = &pf_altqs[1];
205 TAILQ_INIT(&state_updates);
206
207 /* default rule should never be garbage collected */
208 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
209 pf_default_rule.action = PF_PASS;
210 pf_default_rule.nr = -1;
211
212 /* initialize default timeouts */
213 timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
214 timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
215 timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
216 timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
217 timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
218 timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
219 timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
220 timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
221 timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
222 timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
223 timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
224 timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
225 timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
226 timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
227 timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
228 timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
229 timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
230 timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
231
232 #ifdef __OpenBSD__
233 timeout_set(&pf_expire_to, pf_purge_timeout, &pf_expire_to);
234 timeout_add(&pf_expire_to, timeout[PFTM_INTERVAL] * hz);
235 #else
236 callout_init(&pf_expire_to);
237 callout_reset(&pf_expire_to, timeout[PFTM_INTERVAL] * hz,
238 pf_purge_timeout, &pf_expire_to);
239 #endif
240
241 pf_normalize_init();
242 bzero(&pf_status, sizeof(pf_status));
243 pf_status.debug = PF_DEBUG_URGENT;
244
245 /* XXX do our best to avoid a conflict */
246 pf_status.hostid = arc4random();
247 }
248
249 #ifdef _LKM
250 void
251 pfdetach(void)
252 {
253 struct pf_anchor *anchor;
254 struct pf_state *state;
255 struct pf_src_node *node;
256 struct pfioc_table pt;
257 u_int32_t ticket;
258 int i;
259 char r = '\0';
260
261 (void)pf_pfil_detach();
262
263 callout_stop(&pf_expire_to);
264 pf_status.running = 0;
265
266 /* clear the rulesets */
267 for (i = 0; i < PF_RULESET_MAX; i++)
268 if (pf_begin_rules(&ticket, i, &r) == 0)
269 pf_commit_rules(ticket, i, &r);
270 #ifdef ALTQ
271 if (pf_begin_altq(&ticket) == 0)
272 pf_commit_altq(ticket);
273 #endif
274
275 /* clear states */
276 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
277 state->timeout = PFTM_PURGE;
278 #if NPFSYNC
279 state->sync_flags = PFSTATE_NOSYNC;
280 #endif
281 }
282 pf_purge_expired_states();
283 #if NPFSYNC
284 pfsync_clear_states(pf_status.hostid, NULL);
285 #endif
286
287 /* clear source nodes */
288 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
289 state->src_node = NULL;
290 state->nat_src_node = NULL;
291 }
292 RB_FOREACH(node, pf_src_tree, &tree_src_tracking) {
293 node->expire = 1;
294 node->states = 0;
295 }
296 pf_purge_expired_src_nodes();
297
298 /* clear tables */
299 memset(&pt, '\0', sizeof(pt));
300 pfr_clr_tables(&pt.pfrio_table, &pt.pfrio_ndel, pt.pfrio_flags);
301
302 /* destroy anchors */
303 while ((anchor = RB_MIN(pf_anchor_global, &pf_anchors)) != NULL) {
304 for (i = 0; i < PF_RULESET_MAX; i++)
305 if (pf_begin_rules(&ticket, i, anchor->name) == 0)
306 pf_commit_rules(ticket, i, anchor->name);
307 }
308
309 /* destroy main ruleset */
310 pf_remove_if_empty_ruleset(&pf_main_ruleset);
311
312 /* destroy the pools */
313 pool_destroy(&pf_pooladdr_pl);
314 pool_destroy(&pf_altq_pl);
315 pool_destroy(&pf_state_pl);
316 pool_destroy(&pf_rule_pl);
317 pool_destroy(&pf_src_tree_pl);
318
319 /* destroy subsystems */
320 pf_normalize_destroy();
321 pf_osfp_destroy();
322 pfr_destroy();
323 pfi_destroy();
324 }
325 #endif
326
327 int
328 pfopen(dev_t dev, int flags, int fmt, struct lwp *l)
329 {
330 if (minor(dev) >= 1)
331 return (ENXIO);
332 return (0);
333 }
334
335 int
336 pfclose(dev_t dev, int flags, int fmt, struct lwp *l)
337 {
338 if (minor(dev) >= 1)
339 return (ENXIO);
340 return (0);
341 }
342
343 struct pf_pool *
344 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
345 u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
346 u_int8_t check_ticket)
347 {
348 struct pf_ruleset *ruleset;
349 struct pf_rule *rule;
350 int rs_num;
351
352 ruleset = pf_find_ruleset(anchor);
353 if (ruleset == NULL)
354 return (NULL);
355 rs_num = pf_get_ruleset_number(rule_action);
356 if (rs_num >= PF_RULESET_MAX)
357 return (NULL);
358 if (active) {
359 if (check_ticket && ticket !=
360 ruleset->rules[rs_num].active.ticket)
361 return (NULL);
362 if (r_last)
363 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
364 pf_rulequeue);
365 else
366 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
367 } else {
368 if (check_ticket && ticket !=
369 ruleset->rules[rs_num].inactive.ticket)
370 return (NULL);
371 if (r_last)
372 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
373 pf_rulequeue);
374 else
375 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
376 }
377 if (!r_last) {
378 while ((rule != NULL) && (rule->nr != rule_number))
379 rule = TAILQ_NEXT(rule, entries);
380 }
381 if (rule == NULL)
382 return (NULL);
383
384 return (&rule->rpool);
385 }
386
387 int
388 pf_get_ruleset_number(u_int8_t action)
389 {
390 switch (action) {
391 case PF_SCRUB:
392 case PF_NOSCRUB:
393 return (PF_RULESET_SCRUB);
394 break;
395 case PF_PASS:
396 case PF_DROP:
397 return (PF_RULESET_FILTER);
398 break;
399 case PF_NAT:
400 case PF_NONAT:
401 return (PF_RULESET_NAT);
402 break;
403 case PF_BINAT:
404 case PF_NOBINAT:
405 return (PF_RULESET_BINAT);
406 break;
407 case PF_RDR:
408 case PF_NORDR:
409 return (PF_RULESET_RDR);
410 break;
411 default:
412 return (PF_RULESET_MAX);
413 break;
414 }
415 }
416
417 void
418 pf_init_ruleset(struct pf_ruleset *ruleset)
419 {
420 int i;
421
422 memset(ruleset, 0, sizeof(struct pf_ruleset));
423 for (i = 0; i < PF_RULESET_MAX; i++) {
424 TAILQ_INIT(&ruleset->rules[i].queues[0]);
425 TAILQ_INIT(&ruleset->rules[i].queues[1]);
426 ruleset->rules[i].active.ptr = &ruleset->rules[i].queues[0];
427 ruleset->rules[i].inactive.ptr = &ruleset->rules[i].queues[1];
428 }
429 }
430
431 struct pf_anchor *
432 pf_find_anchor(const char *path)
433 {
434 static struct pf_anchor key;
435
436 memset(&key, 0, sizeof(key));
437 strlcpy(key.path, path, sizeof(key.path));
438 return (RB_FIND(pf_anchor_global, &pf_anchors, &key));
439 }
440
441 struct pf_ruleset *
442 pf_find_ruleset(const char *path)
443 {
444 struct pf_anchor *anchor;
445
446 while (*path == '/')
447 path++;
448 if (!*path)
449 return (&pf_main_ruleset);
450 anchor = pf_find_anchor(path);
451 if (anchor == NULL)
452 return (NULL);
453 else
454 return (&anchor->ruleset);
455 }
456
457 struct pf_ruleset *
458 pf_find_or_create_ruleset(const char *path)
459 {
460 static char p[MAXPATHLEN];
461 char *q = NULL /* XXX gcc */, *r;
462 struct pf_ruleset *ruleset;
463 struct pf_anchor *anchor = NULL /* XXX gcc */,
464 *dup, *parent = NULL;
465
466 while (*path == '/')
467 path++;
468 ruleset = pf_find_ruleset(path);
469 if (ruleset != NULL)
470 return (ruleset);
471 strlcpy(p, path, sizeof(p));
472 while (parent == NULL && (q = strrchr(p, '/')) != NULL) {
473 *q = 0;
474 if ((ruleset = pf_find_ruleset(p)) != NULL) {
475 parent = ruleset->anchor;
476 break;
477 }
478 }
479 if (q == NULL)
480 q = p;
481 else
482 q++;
483 strlcpy(p, path, sizeof(p));
484 if (!*q)
485 return (NULL);
486 while ((r = strchr(q, '/')) != NULL || *q) {
487 if (r != NULL)
488 *r = 0;
489 if (!*q || strlen(q) >= PF_ANCHOR_NAME_SIZE ||
490 (parent != NULL && strlen(parent->path) >=
491 MAXPATHLEN - PF_ANCHOR_NAME_SIZE - 1))
492 return (NULL);
493 anchor = (struct pf_anchor *)malloc(sizeof(*anchor), M_TEMP,
494 M_NOWAIT);
495 if (anchor == NULL)
496 return (NULL);
497 memset(anchor, 0, sizeof(*anchor));
498 RB_INIT(&anchor->children);
499 strlcpy(anchor->name, q, sizeof(anchor->name));
500 if (parent != NULL) {
501 strlcpy(anchor->path, parent->path,
502 sizeof(anchor->path));
503 strlcat(anchor->path, "/", sizeof(anchor->path));
504 }
505 strlcat(anchor->path, anchor->name, sizeof(anchor->path));
506 if ((dup = RB_INSERT(pf_anchor_global, &pf_anchors, anchor)) !=
507 NULL) {
508 printf("pf_find_or_create_ruleset: RB_INSERT1 "
509 "'%s' '%s' collides with '%s' '%s'\n",
510 anchor->path, anchor->name, dup->path, dup->name);
511 free(anchor, M_TEMP);
512 return (NULL);
513 }
514 if (parent != NULL) {
515 anchor->parent = parent;
516 if ((dup = RB_INSERT(pf_anchor_node, &parent->children,
517 anchor)) != NULL) {
518 printf("pf_find_or_create_ruleset: "
519 "RB_INSERT2 '%s' '%s' collides with "
520 "'%s' '%s'\n", anchor->path, anchor->name,
521 dup->path, dup->name);
522 RB_REMOVE(pf_anchor_global, &pf_anchors,
523 anchor);
524 free(anchor, M_TEMP);
525 return (NULL);
526 }
527 }
528 pf_init_ruleset(&anchor->ruleset);
529 anchor->ruleset.anchor = anchor;
530 parent = anchor;
531 if (r != NULL)
532 q = r + 1;
533 else
534 *q = 0;
535 }
536 return (&anchor->ruleset);
537 }
538
539 void
540 pf_remove_if_empty_ruleset(struct pf_ruleset *ruleset)
541 {
542 struct pf_anchor *parent;
543 int i;
544
545 while (ruleset != NULL) {
546 if (ruleset == &pf_main_ruleset || ruleset->anchor == NULL ||
547 !RB_EMPTY(&ruleset->anchor->children) ||
548 ruleset->anchor->refcnt > 0 || ruleset->tables > 0 ||
549 ruleset->topen)
550 return;
551 for (i = 0; i < PF_RULESET_MAX; ++i)
552 if (!TAILQ_EMPTY(ruleset->rules[i].active.ptr) ||
553 !TAILQ_EMPTY(ruleset->rules[i].inactive.ptr) ||
554 ruleset->rules[i].inactive.open)
555 return;
556 RB_REMOVE(pf_anchor_global, &pf_anchors, ruleset->anchor);
557 if ((parent = ruleset->anchor->parent) != NULL)
558 RB_REMOVE(pf_anchor_node, &parent->children,
559 ruleset->anchor);
560 free(ruleset->anchor, M_TEMP);
561 if (parent == NULL)
562 return;
563 ruleset = &parent->ruleset;
564 }
565 }
566
567 int
568 pf_anchor_setup(struct pf_rule *r, const struct pf_ruleset *s,
569 const char *name)
570 {
571 static char *p, path[MAXPATHLEN];
572 struct pf_ruleset *ruleset;
573
574 r->anchor = NULL;
575 r->anchor_relative = 0;
576 r->anchor_wildcard = 0;
577 if (!name[0])
578 return (0);
579 if (name[0] == '/')
580 strlcpy(path, name + 1, sizeof(path));
581 else {
582 /* relative path */
583 r->anchor_relative = 1;
584 if (s->anchor == NULL || !s->anchor->path[0])
585 path[0] = 0;
586 else
587 strlcpy(path, s->anchor->path, sizeof(path));
588 while (name[0] == '.' && name[1] == '.' && name[2] == '/') {
589 if (!path[0]) {
590 printf("pf_anchor_setup: .. beyond root\n");
591 return (1);
592 }
593 if ((p = strrchr(path, '/')) != NULL)
594 *p = 0;
595 else
596 path[0] = 0;
597 r->anchor_relative++;
598 name += 3;
599 }
600 if (path[0])
601 strlcat(path, "/", sizeof(path));
602 strlcat(path, name, sizeof(path));
603 }
604 if ((p = strrchr(path, '/')) != NULL && !strcmp(p, "/*")) {
605 r->anchor_wildcard = 1;
606 *p = 0;
607 }
608 ruleset = pf_find_or_create_ruleset(path);
609 if (ruleset == NULL || ruleset->anchor == NULL) {
610 printf("pf_anchor_setup: ruleset\n");
611 return (1);
612 }
613 r->anchor = ruleset->anchor;
614 r->anchor->refcnt++;
615 return (0);
616 }
617
618 int
619 pf_anchor_copyout(const struct pf_ruleset *rs, const struct pf_rule *r,
620 struct pfioc_rule *pr)
621 {
622 pr->anchor_call[0] = 0;
623 if (r->anchor == NULL)
624 return (0);
625 if (!r->anchor_relative) {
626 strlcpy(pr->anchor_call, "/", sizeof(pr->anchor_call));
627 strlcat(pr->anchor_call, r->anchor->path,
628 sizeof(pr->anchor_call));
629 } else {
630 char a[MAXPATHLEN], b[MAXPATHLEN], *p;
631 int i;
632
633 if (rs->anchor == NULL)
634 a[0] = 0;
635 else
636 strlcpy(a, rs->anchor->path, sizeof(a));
637 strlcpy(b, r->anchor->path, sizeof(b));
638 for (i = 1; i < r->anchor_relative; ++i) {
639 if ((p = strrchr(a, '/')) == NULL)
640 p = a;
641 *p = 0;
642 strlcat(pr->anchor_call, "../",
643 sizeof(pr->anchor_call));
644 }
645 if (strncmp(a, b, strlen(a))) {
646 printf("pf_anchor_copyout: '%s' '%s'\n", a, b);
647 return (1);
648 }
649 if (strlen(b) > strlen(a))
650 strlcat(pr->anchor_call, b + (a[0] ? strlen(a) + 1 : 0),
651 sizeof(pr->anchor_call));
652 }
653 if (r->anchor_wildcard)
654 strlcat(pr->anchor_call, pr->anchor_call[0] ? "/*" : "*",
655 sizeof(pr->anchor_call));
656 return (0);
657 }
658
659 void
660 pf_anchor_remove(struct pf_rule *r)
661 {
662 if (r->anchor == NULL)
663 return;
664 if (r->anchor->refcnt <= 0) {
665 printf("pf_anchor_remove: broken refcount");
666 r->anchor = NULL;
667 return;
668 }
669 if (!--r->anchor->refcnt)
670 pf_remove_if_empty_ruleset(&r->anchor->ruleset);
671 r->anchor = NULL;
672 }
673
674 void
675 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
676 {
677 struct pf_pooladdr *mv_pool_pa;
678
679 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
680 TAILQ_REMOVE(poola, mv_pool_pa, entries);
681 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
682 }
683 }
684
685 void
686 pf_empty_pool(struct pf_palist *poola)
687 {
688 struct pf_pooladdr *empty_pool_pa;
689
690 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
691 pfi_dynaddr_remove(&empty_pool_pa->addr);
692 pf_tbladdr_remove(&empty_pool_pa->addr);
693 pfi_detach_rule(empty_pool_pa->kif);
694 TAILQ_REMOVE(poola, empty_pool_pa, entries);
695 pool_put(&pf_pooladdr_pl, empty_pool_pa);
696 }
697 }
698
699 void
700 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
701 {
702 if (rulequeue != NULL) {
703 if (rule->states <= 0) {
704 /*
705 * XXX - we need to remove the table *before* detaching
706 * the rule to make sure the table code does not delete
707 * the anchor under our feet.
708 */
709 pf_tbladdr_remove(&rule->src.addr);
710 pf_tbladdr_remove(&rule->dst.addr);
711 if (rule->overload_tbl)
712 pfr_detach_table(rule->overload_tbl);
713 }
714 TAILQ_REMOVE(rulequeue, rule, entries);
715 rule->entries.tqe_prev = NULL;
716 rule->nr = -1;
717 }
718
719 if (rule->states > 0 || rule->src_nodes > 0 ||
720 rule->entries.tqe_prev != NULL)
721 return;
722 pf_tag_unref(rule->tag);
723 pf_tag_unref(rule->match_tag);
724 #ifdef ALTQ
725 if (rule->pqid != rule->qid)
726 pf_qid_unref(rule->pqid);
727 pf_qid_unref(rule->qid);
728 #endif
729 pf_rtlabel_remove(&rule->src.addr);
730 pf_rtlabel_remove(&rule->dst.addr);
731 pfi_dynaddr_remove(&rule->src.addr);
732 pfi_dynaddr_remove(&rule->dst.addr);
733 if (rulequeue == NULL) {
734 pf_tbladdr_remove(&rule->src.addr);
735 pf_tbladdr_remove(&rule->dst.addr);
736 if (rule->overload_tbl)
737 pfr_detach_table(rule->overload_tbl);
738 }
739 pfi_detach_rule(rule->kif);
740 pf_anchor_remove(rule);
741 pf_empty_pool(&rule->rpool.list);
742 pool_put(&pf_rule_pl, rule);
743 }
744
745 static u_int16_t
746 tagname2tag(struct pf_tags *head, char *tagname)
747 {
748 struct pf_tagname *tag, *p = NULL;
749 u_int16_t new_tagid = 1;
750
751 TAILQ_FOREACH(tag, head, entries)
752 if (strcmp(tagname, tag->name) == 0) {
753 tag->ref++;
754 return (tag->tag);
755 }
756
757 /*
758 * to avoid fragmentation, we do a linear search from the beginning
759 * and take the first free slot we find. if there is none or the list
760 * is empty, append a new entry at the end.
761 */
762
763 /* new entry */
764 if (!TAILQ_EMPTY(head))
765 for (p = TAILQ_FIRST(head); p != NULL &&
766 p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
767 new_tagid = p->tag + 1;
768
769 if (new_tagid > TAGID_MAX)
770 return (0);
771
772 /* allocate and fill new struct pf_tagname */
773 tag = (struct pf_tagname *)malloc(sizeof(struct pf_tagname),
774 M_TEMP, M_NOWAIT);
775 if (tag == NULL)
776 return (0);
777 bzero(tag, sizeof(struct pf_tagname));
778 strlcpy(tag->name, tagname, sizeof(tag->name));
779 tag->tag = new_tagid;
780 tag->ref++;
781
782 if (p != NULL) /* insert new entry before p */
783 TAILQ_INSERT_BEFORE(p, tag, entries);
784 else /* either list empty or no free slot in between */
785 TAILQ_INSERT_TAIL(head, tag, entries);
786
787 return (tag->tag);
788 }
789
790 static void
791 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
792 {
793 struct pf_tagname *tag;
794
795 TAILQ_FOREACH(tag, head, entries)
796 if (tag->tag == tagid) {
797 strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
798 return;
799 }
800 }
801
802 static void
803 tag_unref(struct pf_tags *head, u_int16_t tag)
804 {
805 struct pf_tagname *p, *next;
806
807 if (tag == 0)
808 return;
809
810 for (p = TAILQ_FIRST(head); p != NULL; p = next) {
811 next = TAILQ_NEXT(p, entries);
812 if (tag == p->tag) {
813 if (--p->ref == 0) {
814 TAILQ_REMOVE(head, p, entries);
815 free(p, M_TEMP);
816 }
817 break;
818 }
819 }
820 }
821
822 u_int16_t
823 pf_tagname2tag(char *tagname)
824 {
825 return (tagname2tag(&pf_tags, tagname));
826 }
827
828 void
829 pf_tag2tagname(u_int16_t tagid, char *p)
830 {
831 return (tag2tagname(&pf_tags, tagid, p));
832 }
833
834 void
835 pf_tag_ref(u_int16_t tag)
836 {
837 struct pf_tagname *t;
838
839 TAILQ_FOREACH(t, &pf_tags, entries)
840 if (t->tag == tag)
841 break;
842 if (t != NULL)
843 t->ref++;
844 }
845
846 void
847 pf_tag_unref(u_int16_t tag)
848 {
849 return (tag_unref(&pf_tags, tag));
850 }
851
852 int
853 pf_rtlabel_add(struct pf_addr_wrap *a)
854 {
855 #ifdef __OpenBSD__
856 if (a->type == PF_ADDR_RTLABEL &&
857 (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0)
858 return (-1);
859 #endif
860 return (0);
861 }
862
863 void
864 pf_rtlabel_remove(struct pf_addr_wrap *a)
865 {
866 #ifdef __OpenBSD__
867 if (a->type == PF_ADDR_RTLABEL)
868 rtlabel_unref(a->v.rtlabel);
869 #endif
870 }
871
872 void
873 pf_rtlabel_copyout(struct pf_addr_wrap *a)
874 {
875 #ifdef __OpenBSD__
876 const char *name;
877
878 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) {
879 if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL)
880 strlcpy(a->v.rtlabelname, "?",
881 sizeof(a->v.rtlabelname));
882 else
883 strlcpy(a->v.rtlabelname, name,
884 sizeof(a->v.rtlabelname));
885 }
886 #endif
887 }
888
889 #ifdef ALTQ
890 u_int32_t
891 pf_qname2qid(char *qname)
892 {
893 return ((u_int32_t)tagname2tag(&pf_qids, qname));
894 }
895
896 void
897 pf_qid2qname(u_int32_t qid, char *p)
898 {
899 return (tag2tagname(&pf_qids, (u_int16_t)qid, p));
900 }
901
902 void
903 pf_qid_unref(u_int32_t qid)
904 {
905 return (tag_unref(&pf_qids, (u_int16_t)qid));
906 }
907
908 int
909 pf_begin_altq(u_int32_t *ticket)
910 {
911 struct pf_altq *altq;
912 int error = 0;
913
914 /* Purge the old altq list */
915 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
916 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
917 if (altq->qname[0] == 0) {
918 /* detach and destroy the discipline */
919 error = altq_remove(altq);
920 } else
921 pf_qid_unref(altq->qid);
922 pool_put(&pf_altq_pl, altq);
923 }
924 if (error)
925 return (error);
926 *ticket = ++ticket_altqs_inactive;
927 altqs_inactive_open = 1;
928 return (0);
929 }
930
931 int
932 pf_rollback_altq(u_int32_t ticket)
933 {
934 struct pf_altq *altq;
935 int error = 0;
936
937 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
938 return (0);
939 /* Purge the old altq list */
940 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
941 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
942 if (altq->qname[0] == 0) {
943 /* detach and destroy the discipline */
944 error = altq_remove(altq);
945 } else
946 pf_qid_unref(altq->qid);
947 pool_put(&pf_altq_pl, altq);
948 }
949 altqs_inactive_open = 0;
950 return (error);
951 }
952
953 int
954 pf_commit_altq(u_int32_t ticket)
955 {
956 struct pf_altqqueue *old_altqs;
957 struct pf_altq *altq;
958 int s, err, error = 0;
959
960 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
961 return (EBUSY);
962
963 /* swap altqs, keep the old. */
964 s = splsoftnet();
965 old_altqs = pf_altqs_active;
966 pf_altqs_active = pf_altqs_inactive;
967 pf_altqs_inactive = old_altqs;
968 ticket_altqs_active = ticket_altqs_inactive;
969
970 /* Attach new disciplines */
971 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
972 if (altq->qname[0] == 0) {
973 /* attach the discipline */
974 error = altq_pfattach(altq);
975 if (error == 0 && pf_altq_running)
976 error = pf_enable_altq(altq);
977 if (error != 0) {
978 splx(s);
979 return (error);
980 }
981 }
982 }
983
984 /* Purge the old altq list */
985 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
986 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
987 if (altq->qname[0] == 0) {
988 /* detach and destroy the discipline */
989 if (pf_altq_running)
990 error = pf_disable_altq(altq);
991 err = altq_pfdetach(altq);
992 if (err != 0 && error == 0)
993 error = err;
994 err = altq_remove(altq);
995 if (err != 0 && error == 0)
996 error = err;
997 } else
998 pf_qid_unref(altq->qid);
999 pool_put(&pf_altq_pl, altq);
1000 }
1001 splx(s);
1002
1003 altqs_inactive_open = 0;
1004 return (error);
1005 }
1006
1007 int
1008 pf_enable_altq(struct pf_altq *altq)
1009 {
1010 struct ifnet *ifp;
1011 struct tb_profile tb;
1012 int s, error = 0;
1013
1014 if ((ifp = ifunit(altq->ifname)) == NULL)
1015 return (EINVAL);
1016
1017 if (ifp->if_snd.altq_type != ALTQT_NONE)
1018 error = altq_enable(&ifp->if_snd);
1019
1020 /* set tokenbucket regulator */
1021 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
1022 tb.rate = altq->ifbandwidth;
1023 tb.depth = altq->tbrsize;
1024 #ifdef __NetBSD__
1025 s = splnet();
1026 #else
1027 s = splimp();
1028 #endif
1029 error = tbr_set(&ifp->if_snd, &tb);
1030 splx(s);
1031 }
1032
1033 return (error);
1034 }
1035
1036 int
1037 pf_disable_altq(struct pf_altq *altq)
1038 {
1039 struct ifnet *ifp;
1040 struct tb_profile tb;
1041 int s, error;
1042
1043 if ((ifp = ifunit(altq->ifname)) == NULL)
1044 return (EINVAL);
1045
1046 /*
1047 * when the discipline is no longer referenced, it was overridden
1048 * by a new one. if so, just return.
1049 */
1050 if (altq->altq_disc != ifp->if_snd.altq_disc)
1051 return (0);
1052
1053 error = altq_disable(&ifp->if_snd);
1054
1055 if (error == 0) {
1056 /* clear tokenbucket regulator */
1057 tb.rate = 0;
1058 #ifdef __NetBSD__
1059 s = splnet();
1060 #else
1061 s = splimp();
1062 #endif
1063 error = tbr_set(&ifp->if_snd, &tb);
1064 splx(s);
1065 }
1066
1067 return (error);
1068 }
1069 #endif /* ALTQ */
1070
1071 int
1072 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
1073 {
1074 struct pf_ruleset *rs;
1075 struct pf_rule *rule;
1076
1077 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1078 return (EINVAL);
1079 rs = pf_find_or_create_ruleset(anchor);
1080 if (rs == NULL)
1081 return (EINVAL);
1082 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL)
1083 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
1084 *ticket = ++rs->rules[rs_num].inactive.ticket;
1085 rs->rules[rs_num].inactive.open = 1;
1086 return (0);
1087 }
1088
1089 int
1090 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
1091 {
1092 struct pf_ruleset *rs;
1093 struct pf_rule *rule;
1094
1095 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1096 return (EINVAL);
1097 rs = pf_find_ruleset(anchor);
1098 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1099 rs->rules[rs_num].inactive.ticket != ticket)
1100 return (0);
1101 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL)
1102 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
1103 rs->rules[rs_num].inactive.open = 0;
1104 return (0);
1105 }
1106
1107 int
1108 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1109 {
1110 struct pf_ruleset *rs;
1111 struct pf_rule *rule;
1112 struct pf_rulequeue *old_rules;
1113 int s;
1114
1115 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1116 return (EINVAL);
1117 rs = pf_find_ruleset(anchor);
1118 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1119 ticket != rs->rules[rs_num].inactive.ticket)
1120 return (EBUSY);
1121
1122 /* Swap rules, keep the old. */
1123 s = splsoftnet();
1124 old_rules = rs->rules[rs_num].active.ptr;
1125 rs->rules[rs_num].active.ptr =
1126 rs->rules[rs_num].inactive.ptr;
1127 rs->rules[rs_num].inactive.ptr = old_rules;
1128 rs->rules[rs_num].active.ticket =
1129 rs->rules[rs_num].inactive.ticket;
1130 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1131
1132 /* Purge the old rule list. */
1133 while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1134 pf_rm_rule(old_rules, rule);
1135 rs->rules[rs_num].inactive.open = 0;
1136 pf_remove_if_empty_ruleset(rs);
1137 splx(s);
1138 return (0);
1139 }
1140
1141 int
1142 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct lwp *l)
1143 {
1144 struct pf_pooladdr *pa = NULL;
1145 struct pf_pool *pool = NULL;
1146 int s;
1147 int error = 0;
1148
1149 /* XXX keep in sync with switch() below */
1150 if (kauth_authorize_network(l->l_cred, KAUTH_NETWORK_FIREWALL,
1151 KAUTH_REQ_NETWORK_FIREWALL_FW, NULL, NULL, NULL))
1152 switch (cmd) {
1153 case DIOCGETRULES:
1154 case DIOCGETRULE:
1155 case DIOCGETADDRS:
1156 case DIOCGETADDR:
1157 case DIOCGETSTATE:
1158 case DIOCSETSTATUSIF:
1159 case DIOCGETSTATUS:
1160 case DIOCCLRSTATUS:
1161 case DIOCNATLOOK:
1162 case DIOCSETDEBUG:
1163 case DIOCGETSTATES:
1164 case DIOCGETTIMEOUT:
1165 case DIOCCLRRULECTRS:
1166 case DIOCGETLIMIT:
1167 case DIOCGETALTQS:
1168 case DIOCGETALTQ:
1169 case DIOCGETQSTATS:
1170 case DIOCGETRULESETS:
1171 case DIOCGETRULESET:
1172 case DIOCRGETTABLES:
1173 case DIOCRGETTSTATS:
1174 case DIOCRCLRTSTATS:
1175 case DIOCRCLRADDRS:
1176 case DIOCRADDADDRS:
1177 case DIOCRDELADDRS:
1178 case DIOCRSETADDRS:
1179 case DIOCRGETADDRS:
1180 case DIOCRGETASTATS:
1181 case DIOCRCLRASTATS:
1182 case DIOCRTSTADDRS:
1183 case DIOCOSFPGET:
1184 case DIOCGETSRCNODES:
1185 case DIOCCLRSRCNODES:
1186 case DIOCIGETIFACES:
1187 case DIOCICLRISTATS:
1188 case DIOCSETIFFLAG:
1189 case DIOCCLRIFFLAG:
1190 break;
1191 case DIOCRCLRTABLES:
1192 case DIOCRADDTABLES:
1193 case DIOCRDELTABLES:
1194 case DIOCRSETTFLAGS:
1195 if (((struct pfioc_table *)addr)->pfrio_flags &
1196 PFR_FLAG_DUMMY)
1197 break; /* dummy operation ok */
1198 return (EPERM);
1199 default:
1200 return (EPERM);
1201 }
1202
1203 if (!(flags & FWRITE))
1204 switch (cmd) {
1205 case DIOCGETRULES:
1206 case DIOCGETRULE:
1207 case DIOCGETADDRS:
1208 case DIOCGETADDR:
1209 case DIOCGETSTATE:
1210 case DIOCGETSTATUS:
1211 case DIOCGETSTATES:
1212 case DIOCGETTIMEOUT:
1213 case DIOCGETLIMIT:
1214 case DIOCGETALTQS:
1215 case DIOCGETALTQ:
1216 case DIOCGETQSTATS:
1217 case DIOCGETRULESETS:
1218 case DIOCGETRULESET:
1219 case DIOCRGETTABLES:
1220 case DIOCRGETTSTATS:
1221 case DIOCRGETADDRS:
1222 case DIOCRGETASTATS:
1223 case DIOCRTSTADDRS:
1224 case DIOCOSFPGET:
1225 case DIOCGETSRCNODES:
1226 case DIOCIGETIFACES:
1227 break;
1228 case DIOCRCLRTABLES:
1229 case DIOCRADDTABLES:
1230 case DIOCRDELTABLES:
1231 case DIOCRCLRTSTATS:
1232 case DIOCRCLRADDRS:
1233 case DIOCRADDADDRS:
1234 case DIOCRDELADDRS:
1235 case DIOCRSETADDRS:
1236 case DIOCRSETTFLAGS:
1237 if (((struct pfioc_table *)addr)->pfrio_flags &
1238 PFR_FLAG_DUMMY)
1239 break; /* dummy operation ok */
1240 return (EACCES);
1241 default:
1242 return (EACCES);
1243 }
1244
1245 s = splsoftnet();
1246 switch (cmd) {
1247
1248 case DIOCSTART:
1249 if (pf_status.running)
1250 error = EEXIST;
1251 else {
1252 #ifdef __NetBSD__
1253 error = pf_pfil_attach();
1254 if (error)
1255 break;
1256 #endif
1257 pf_status.running = 1;
1258 pf_status.since = time_second;
1259 if (pf_status.stateid == 0) {
1260 pf_status.stateid = time_second;
1261 pf_status.stateid = pf_status.stateid << 32;
1262 }
1263 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1264 }
1265 break;
1266
1267 case DIOCSTOP:
1268 if (!pf_status.running)
1269 error = ENOENT;
1270 else {
1271 #ifdef __NetBSD__
1272 error = pf_pfil_detach();
1273 if (error)
1274 break;
1275 #endif
1276 pf_status.running = 0;
1277 pf_status.since = time_second;
1278 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1279 }
1280 break;
1281
1282 case DIOCADDRULE: {
1283 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1284 struct pf_ruleset *ruleset;
1285 struct pf_rule *rule, *tail;
1286 struct pf_pooladdr *pa;
1287 int rs_num;
1288
1289 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1290 ruleset = pf_find_ruleset(pr->anchor);
1291 if (ruleset == NULL) {
1292 error = EINVAL;
1293 break;
1294 }
1295 rs_num = pf_get_ruleset_number(pr->rule.action);
1296 if (rs_num >= PF_RULESET_MAX) {
1297 error = EINVAL;
1298 break;
1299 }
1300 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1301 error = EINVAL;
1302 break;
1303 }
1304 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
1305 error = EBUSY;
1306 break;
1307 }
1308 if (pr->pool_ticket != ticket_pabuf) {
1309 error = EBUSY;
1310 break;
1311 }
1312 rule = pool_get(&pf_rule_pl, PR_NOWAIT);
1313 if (rule == NULL) {
1314 error = ENOMEM;
1315 break;
1316 }
1317 bcopy(&pr->rule, rule, sizeof(struct pf_rule));
1318 rule->anchor = NULL;
1319 rule->kif = NULL;
1320 TAILQ_INIT(&rule->rpool.list);
1321 /* initialize refcounting */
1322 rule->states = 0;
1323 rule->src_nodes = 0;
1324 rule->entries.tqe_prev = NULL;
1325 #ifndef INET
1326 if (rule->af == AF_INET) {
1327 pool_put(&pf_rule_pl, rule);
1328 error = EAFNOSUPPORT;
1329 break;
1330 }
1331 #endif /* INET */
1332 #ifndef INET6
1333 if (rule->af == AF_INET6) {
1334 pool_put(&pf_rule_pl, rule);
1335 error = EAFNOSUPPORT;
1336 break;
1337 }
1338 #endif /* INET6 */
1339 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
1340 pf_rulequeue);
1341 if (tail)
1342 rule->nr = tail->nr + 1;
1343 else
1344 rule->nr = 0;
1345 if (rule->ifname[0]) {
1346 rule->kif = pfi_attach_rule(rule->ifname);
1347 if (rule->kif == NULL) {
1348 pool_put(&pf_rule_pl, rule);
1349 error = EINVAL;
1350 break;
1351 }
1352 }
1353
1354 #ifdef ALTQ
1355 /* set queue IDs */
1356 if (rule->qname[0] != 0) {
1357 if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
1358 error = EBUSY;
1359 else if (rule->pqname[0] != 0) {
1360 if ((rule->pqid =
1361 pf_qname2qid(rule->pqname)) == 0)
1362 error = EBUSY;
1363 } else
1364 rule->pqid = rule->qid;
1365 }
1366 #endif
1367 if (rule->tagname[0])
1368 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
1369 error = EBUSY;
1370 if (rule->match_tagname[0])
1371 if ((rule->match_tag =
1372 pf_tagname2tag(rule->match_tagname)) == 0)
1373 error = EBUSY;
1374 if (rule->rt && !rule->direction)
1375 error = EINVAL;
1376 if (pf_rtlabel_add(&rule->src.addr) ||
1377 pf_rtlabel_add(&rule->dst.addr))
1378 error = EBUSY;
1379 if (pfi_dynaddr_setup(&rule->src.addr, rule->af))
1380 error = EINVAL;
1381 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af))
1382 error = EINVAL;
1383 if (pf_tbladdr_setup(ruleset, &rule->src.addr))
1384 error = EINVAL;
1385 if (pf_tbladdr_setup(ruleset, &rule->dst.addr))
1386 error = EINVAL;
1387 if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1388 error = EINVAL;
1389 TAILQ_FOREACH(pa, &pf_pabuf, entries)
1390 if (pf_tbladdr_setup(ruleset, &pa->addr))
1391 error = EINVAL;
1392
1393 if (rule->overload_tblname[0]) {
1394 if ((rule->overload_tbl = pfr_attach_table(ruleset,
1395 rule->overload_tblname)) == NULL)
1396 error = EINVAL;
1397 else
1398 rule->overload_tbl->pfrkt_flags |=
1399 PFR_TFLAG_ACTIVE;
1400 }
1401
1402 pf_mv_pool(&pf_pabuf, &rule->rpool.list);
1403 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
1404 (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
1405 (rule->rt > PF_FASTROUTE)) &&
1406 (TAILQ_FIRST(&rule->rpool.list) == NULL))
1407 error = EINVAL;
1408
1409 if (error) {
1410 pf_rm_rule(NULL, rule);
1411 break;
1412 }
1413 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
1414 rule->evaluations = rule->packets = rule->bytes = 0;
1415 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
1416 rule, entries);
1417 break;
1418 }
1419
1420 case DIOCGETRULES: {
1421 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1422 struct pf_ruleset *ruleset;
1423 struct pf_rule *tail;
1424 int rs_num;
1425
1426 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1427 ruleset = pf_find_ruleset(pr->anchor);
1428 if (ruleset == NULL) {
1429 error = EINVAL;
1430 break;
1431 }
1432 rs_num = pf_get_ruleset_number(pr->rule.action);
1433 if (rs_num >= PF_RULESET_MAX) {
1434 error = EINVAL;
1435 break;
1436 }
1437 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
1438 pf_rulequeue);
1439 if (tail)
1440 pr->nr = tail->nr + 1;
1441 else
1442 pr->nr = 0;
1443 pr->ticket = ruleset->rules[rs_num].active.ticket;
1444 break;
1445 }
1446
1447 case DIOCGETRULE: {
1448 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1449 struct pf_ruleset *ruleset;
1450 struct pf_rule *rule;
1451 int rs_num, i;
1452
1453 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1454 ruleset = pf_find_ruleset(pr->anchor);
1455 if (ruleset == NULL) {
1456 error = EINVAL;
1457 break;
1458 }
1459 rs_num = pf_get_ruleset_number(pr->rule.action);
1460 if (rs_num >= PF_RULESET_MAX) {
1461 error = EINVAL;
1462 break;
1463 }
1464 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
1465 error = EBUSY;
1466 break;
1467 }
1468 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
1469 while ((rule != NULL) && (rule->nr != pr->nr))
1470 rule = TAILQ_NEXT(rule, entries);
1471 if (rule == NULL) {
1472 error = EBUSY;
1473 break;
1474 }
1475 bcopy(rule, &pr->rule, sizeof(struct pf_rule));
1476 if (pf_anchor_copyout(ruleset, rule, pr)) {
1477 error = EBUSY;
1478 break;
1479 }
1480 pfi_dynaddr_copyout(&pr->rule.src.addr);
1481 pfi_dynaddr_copyout(&pr->rule.dst.addr);
1482 pf_tbladdr_copyout(&pr->rule.src.addr);
1483 pf_tbladdr_copyout(&pr->rule.dst.addr);
1484 pf_rtlabel_copyout(&pr->rule.src.addr);
1485 pf_rtlabel_copyout(&pr->rule.dst.addr);
1486 for (i = 0; i < PF_SKIP_COUNT; ++i)
1487 if (rule->skip[i].ptr == NULL)
1488 pr->rule.skip[i].nr = -1;
1489 else
1490 pr->rule.skip[i].nr =
1491 rule->skip[i].ptr->nr;
1492 break;
1493 }
1494
1495 case DIOCCHANGERULE: {
1496 struct pfioc_rule *pcr = (struct pfioc_rule *)addr;
1497 struct pf_ruleset *ruleset;
1498 struct pf_rule *oldrule = NULL, *newrule = NULL;
1499 u_int32_t nr = 0;
1500 int rs_num;
1501
1502 if (!(pcr->action == PF_CHANGE_REMOVE ||
1503 pcr->action == PF_CHANGE_GET_TICKET) &&
1504 pcr->pool_ticket != ticket_pabuf) {
1505 error = EBUSY;
1506 break;
1507 }
1508
1509 if (pcr->action < PF_CHANGE_ADD_HEAD ||
1510 pcr->action > PF_CHANGE_GET_TICKET) {
1511 error = EINVAL;
1512 break;
1513 }
1514 ruleset = pf_find_ruleset(pcr->anchor);
1515 if (ruleset == NULL) {
1516 error = EINVAL;
1517 break;
1518 }
1519 rs_num = pf_get_ruleset_number(pcr->rule.action);
1520 if (rs_num >= PF_RULESET_MAX) {
1521 error = EINVAL;
1522 break;
1523 }
1524
1525 if (pcr->action == PF_CHANGE_GET_TICKET) {
1526 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
1527 break;
1528 } else {
1529 if (pcr->ticket !=
1530 ruleset->rules[rs_num].active.ticket) {
1531 error = EINVAL;
1532 break;
1533 }
1534 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1535 error = EINVAL;
1536 break;
1537 }
1538 }
1539
1540 if (pcr->action != PF_CHANGE_REMOVE) {
1541 newrule = pool_get(&pf_rule_pl, PR_NOWAIT);
1542 if (newrule == NULL) {
1543 error = ENOMEM;
1544 break;
1545 }
1546 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule));
1547 TAILQ_INIT(&newrule->rpool.list);
1548 /* initialize refcounting */
1549 newrule->states = 0;
1550 newrule->entries.tqe_prev = NULL;
1551 #ifndef INET
1552 if (newrule->af == AF_INET) {
1553 pool_put(&pf_rule_pl, newrule);
1554 error = EAFNOSUPPORT;
1555 break;
1556 }
1557 #endif /* INET */
1558 #ifndef INET6
1559 if (newrule->af == AF_INET6) {
1560 pool_put(&pf_rule_pl, newrule);
1561 error = EAFNOSUPPORT;
1562 break;
1563 }
1564 #endif /* INET6 */
1565 if (newrule->ifname[0]) {
1566 newrule->kif = pfi_attach_rule(newrule->ifname);
1567 if (newrule->kif == NULL) {
1568 pool_put(&pf_rule_pl, newrule);
1569 error = EINVAL;
1570 break;
1571 }
1572 } else
1573 newrule->kif = NULL;
1574
1575 #ifdef ALTQ
1576 /* set queue IDs */
1577 if (newrule->qname[0] != 0) {
1578 if ((newrule->qid =
1579 pf_qname2qid(newrule->qname)) == 0)
1580 error = EBUSY;
1581 else if (newrule->pqname[0] != 0) {
1582 if ((newrule->pqid =
1583 pf_qname2qid(newrule->pqname)) == 0)
1584 error = EBUSY;
1585 } else
1586 newrule->pqid = newrule->qid;
1587 }
1588 #endif /* ALTQ */
1589 if (newrule->tagname[0])
1590 if ((newrule->tag =
1591 pf_tagname2tag(newrule->tagname)) == 0)
1592 error = EBUSY;
1593 if (newrule->match_tagname[0])
1594 if ((newrule->match_tag = pf_tagname2tag(
1595 newrule->match_tagname)) == 0)
1596 error = EBUSY;
1597 if (newrule->rt && !newrule->direction)
1598 error = EINVAL;
1599 if (pf_rtlabel_add(&newrule->src.addr) ||
1600 pf_rtlabel_add(&newrule->dst.addr))
1601 error = EBUSY;
1602 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af))
1603 error = EINVAL;
1604 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af))
1605 error = EINVAL;
1606 if (pf_tbladdr_setup(ruleset, &newrule->src.addr))
1607 error = EINVAL;
1608 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr))
1609 error = EINVAL;
1610 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1611 error = EINVAL;
1612
1613 if (newrule->overload_tblname[0]) {
1614 if ((newrule->overload_tbl = pfr_attach_table(
1615 ruleset, newrule->overload_tblname)) ==
1616 NULL)
1617 error = EINVAL;
1618 else
1619 newrule->overload_tbl->pfrkt_flags |=
1620 PFR_TFLAG_ACTIVE;
1621 }
1622
1623 pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
1624 if (((((newrule->action == PF_NAT) ||
1625 (newrule->action == PF_RDR) ||
1626 (newrule->action == PF_BINAT) ||
1627 (newrule->rt > PF_FASTROUTE)) &&
1628 !pcr->anchor[0])) &&
1629 (TAILQ_FIRST(&newrule->rpool.list) == NULL))
1630 error = EINVAL;
1631
1632 if (error) {
1633 pf_rm_rule(NULL, newrule);
1634 break;
1635 }
1636 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
1637 newrule->evaluations = newrule->packets = 0;
1638 newrule->bytes = 0;
1639 }
1640 pf_empty_pool(&pf_pabuf);
1641
1642 if (pcr->action == PF_CHANGE_ADD_HEAD)
1643 oldrule = TAILQ_FIRST(
1644 ruleset->rules[rs_num].active.ptr);
1645 else if (pcr->action == PF_CHANGE_ADD_TAIL)
1646 oldrule = TAILQ_LAST(
1647 ruleset->rules[rs_num].active.ptr, pf_rulequeue);
1648 else {
1649 oldrule = TAILQ_FIRST(
1650 ruleset->rules[rs_num].active.ptr);
1651 while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1652 oldrule = TAILQ_NEXT(oldrule, entries);
1653 if (oldrule == NULL) {
1654 if (newrule != NULL)
1655 pf_rm_rule(NULL, newrule);
1656 error = EINVAL;
1657 break;
1658 }
1659 }
1660
1661 if (pcr->action == PF_CHANGE_REMOVE)
1662 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
1663 else {
1664 if (oldrule == NULL)
1665 TAILQ_INSERT_TAIL(
1666 ruleset->rules[rs_num].active.ptr,
1667 newrule, entries);
1668 else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1669 pcr->action == PF_CHANGE_ADD_BEFORE)
1670 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1671 else
1672 TAILQ_INSERT_AFTER(
1673 ruleset->rules[rs_num].active.ptr,
1674 oldrule, newrule, entries);
1675 }
1676
1677 nr = 0;
1678 TAILQ_FOREACH(oldrule,
1679 ruleset->rules[rs_num].active.ptr, entries)
1680 oldrule->nr = nr++;
1681
1682 ruleset->rules[rs_num].active.ticket++;
1683
1684 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
1685 pf_remove_if_empty_ruleset(ruleset);
1686
1687 break;
1688 }
1689
1690 case DIOCCLRSTATES: {
1691 struct pf_state *state;
1692 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1693 int killed = 0;
1694
1695 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1696 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1697 state->u.s.kif->pfik_name)) {
1698 state->timeout = PFTM_PURGE;
1699 #if NPFSYNC
1700 /* don't send out individual delete messages */
1701 state->sync_flags = PFSTATE_NOSYNC;
1702 #endif
1703 killed++;
1704 }
1705 }
1706 pf_purge_expired_states();
1707 pf_status.states = 0;
1708 psk->psk_af = killed;
1709 #if NPFSYNC
1710 pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
1711 #endif
1712 break;
1713 }
1714
1715 case DIOCKILLSTATES: {
1716 struct pf_state *state;
1717 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1718 int killed = 0;
1719
1720 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1721 if ((!psk->psk_af || state->af == psk->psk_af)
1722 && (!psk->psk_proto || psk->psk_proto ==
1723 state->proto) &&
1724 PF_MATCHA(psk->psk_src.neg,
1725 &psk->psk_src.addr.v.a.addr,
1726 &psk->psk_src.addr.v.a.mask,
1727 &state->lan.addr, state->af) &&
1728 PF_MATCHA(psk->psk_dst.neg,
1729 &psk->psk_dst.addr.v.a.addr,
1730 &psk->psk_dst.addr.v.a.mask,
1731 &state->ext.addr, state->af) &&
1732 (psk->psk_src.port_op == 0 ||
1733 pf_match_port(psk->psk_src.port_op,
1734 psk->psk_src.port[0], psk->psk_src.port[1],
1735 state->lan.port)) &&
1736 (psk->psk_dst.port_op == 0 ||
1737 pf_match_port(psk->psk_dst.port_op,
1738 psk->psk_dst.port[0], psk->psk_dst.port[1],
1739 state->ext.port)) &&
1740 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1741 state->u.s.kif->pfik_name))) {
1742 state->timeout = PFTM_PURGE;
1743 killed++;
1744 }
1745 }
1746 pf_purge_expired_states();
1747 psk->psk_af = killed;
1748 break;
1749 }
1750
1751 case DIOCADDSTATE: {
1752 struct pfioc_state *ps = (struct pfioc_state *)addr;
1753 struct pf_state *state;
1754 struct pfi_kif *kif;
1755
1756 if (ps->state.timeout >= PFTM_MAX &&
1757 ps->state.timeout != PFTM_UNTIL_PACKET) {
1758 error = EINVAL;
1759 break;
1760 }
1761 state = pool_get(&pf_state_pl, PR_NOWAIT);
1762 if (state == NULL) {
1763 error = ENOMEM;
1764 break;
1765 }
1766 kif = pfi_lookup_create(ps->state.u.ifname);
1767 if (kif == NULL) {
1768 pool_put(&pf_state_pl, state);
1769 error = ENOENT;
1770 break;
1771 }
1772 bcopy(&ps->state, state, sizeof(struct pf_state));
1773 bzero(&state->u, sizeof(state->u));
1774 state->rule.ptr = &pf_default_rule;
1775 state->nat_rule.ptr = NULL;
1776 state->anchor.ptr = NULL;
1777 state->rt_kif = NULL;
1778 state->creation = time_second;
1779 state->pfsync_time = 0;
1780 state->packets[0] = state->packets[1] = 0;
1781 state->bytes[0] = state->bytes[1] = 0;
1782
1783 if (pf_insert_state(kif, state)) {
1784 pfi_maybe_destroy(kif);
1785 pool_put(&pf_state_pl, state);
1786 error = ENOMEM;
1787 }
1788 break;
1789 }
1790
1791 case DIOCGETSTATE: {
1792 struct pfioc_state *ps = (struct pfioc_state *)addr;
1793 struct pf_state *state;
1794 u_int32_t nr;
1795
1796 nr = 0;
1797 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1798 if (nr >= ps->nr)
1799 break;
1800 nr++;
1801 }
1802 if (state == NULL) {
1803 error = EBUSY;
1804 break;
1805 }
1806 bcopy(state, &ps->state, sizeof(struct pf_state));
1807 ps->state.rule.nr = state->rule.ptr->nr;
1808 ps->state.nat_rule.nr = (state->nat_rule.ptr == NULL) ?
1809 -1 : state->nat_rule.ptr->nr;
1810 ps->state.anchor.nr = (state->anchor.ptr == NULL) ?
1811 -1 : state->anchor.ptr->nr;
1812 ps->state.expire = pf_state_expires(state);
1813 if (ps->state.expire > time_second)
1814 ps->state.expire -= time_second;
1815 else
1816 ps->state.expire = 0;
1817 break;
1818 }
1819
1820 case DIOCGETSTATES: {
1821 struct pfioc_states *ps = (struct pfioc_states *)addr;
1822 struct pf_state *state;
1823 struct pf_state *p, pstore;
1824 struct pfi_kif *kif;
1825 u_int32_t nr = 0;
1826 int space = ps->ps_len;
1827
1828 if (space == 0) {
1829 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states)
1830 nr += kif->pfik_states;
1831 ps->ps_len = sizeof(struct pf_state) * nr;
1832 break;
1833 }
1834
1835 p = ps->ps_states;
1836 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states)
1837 RB_FOREACH(state, pf_state_tree_ext_gwy,
1838 &kif->pfik_ext_gwy) {
1839 int secs = time_second;
1840
1841 if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len)
1842 break;
1843
1844 bcopy(state, &pstore, sizeof(pstore));
1845 strlcpy(pstore.u.ifname, kif->pfik_name,
1846 sizeof(pstore.u.ifname));
1847 pstore.rule.nr = state->rule.ptr->nr;
1848 pstore.nat_rule.nr = (state->nat_rule.ptr ==
1849 NULL) ? -1 : state->nat_rule.ptr->nr;
1850 pstore.anchor.nr = (state->anchor.ptr ==
1851 NULL) ? -1 : state->anchor.ptr->nr;
1852 pstore.creation = secs - pstore.creation;
1853 pstore.expire = pf_state_expires(state);
1854 if (pstore.expire > secs)
1855 pstore.expire -= secs;
1856 else
1857 pstore.expire = 0;
1858 error = copyout(&pstore, p, sizeof(*p));
1859 if (error)
1860 goto fail;
1861 p++;
1862 nr++;
1863 }
1864 ps->ps_len = sizeof(struct pf_state) * nr;
1865 break;
1866 }
1867
1868 case DIOCGETSTATUS: {
1869 struct pf_status *s = (struct pf_status *)addr;
1870 bcopy(&pf_status, s, sizeof(struct pf_status));
1871 pfi_fill_oldstatus(s);
1872 break;
1873 }
1874
1875 case DIOCSETSTATUSIF: {
1876 struct pfioc_if *pi = (struct pfioc_if *)addr;
1877
1878 if (pi->ifname[0] == 0) {
1879 bzero(pf_status.ifname, IFNAMSIZ);
1880 break;
1881 }
1882 if (ifunit(pi->ifname) == NULL) {
1883 error = EINVAL;
1884 break;
1885 }
1886 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
1887 break;
1888 }
1889
1890 case DIOCCLRSTATUS: {
1891 bzero(pf_status.counters, sizeof(pf_status.counters));
1892 bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
1893 bzero(pf_status.scounters, sizeof(pf_status.scounters));
1894 if (*pf_status.ifname)
1895 pfi_clr_istats(pf_status.ifname, NULL,
1896 PFI_FLAG_INSTANCE);
1897 break;
1898 }
1899
1900 case DIOCNATLOOK: {
1901 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr;
1902 struct pf_state *state;
1903 struct pf_state key;
1904 int m = 0, direction = pnl->direction;
1905
1906 key.af = pnl->af;
1907 key.proto = pnl->proto;
1908
1909 if (!pnl->proto ||
1910 PF_AZERO(&pnl->saddr, pnl->af) ||
1911 PF_AZERO(&pnl->daddr, pnl->af) ||
1912 !pnl->dport || !pnl->sport)
1913 error = EINVAL;
1914 else {
1915 /*
1916 * userland gives us source and dest of connection,
1917 * reverse the lookup so we ask for what happens with
1918 * the return traffic, enabling us to find it in the
1919 * state tree.
1920 */
1921 if (direction == PF_IN) {
1922 PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af);
1923 key.ext.port = pnl->dport;
1924 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af);
1925 key.gwy.port = pnl->sport;
1926 state = pf_find_state_all(&key, PF_EXT_GWY, &m);
1927 } else {
1928 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af);
1929 key.lan.port = pnl->dport;
1930 PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af);
1931 key.ext.port = pnl->sport;
1932 state = pf_find_state_all(&key, PF_LAN_EXT, &m);
1933 }
1934 if (m > 1)
1935 error = E2BIG; /* more than one state */
1936 else if (state != NULL) {
1937 if (direction == PF_IN) {
1938 PF_ACPY(&pnl->rsaddr, &state->lan.addr,
1939 state->af);
1940 pnl->rsport = state->lan.port;
1941 PF_ACPY(&pnl->rdaddr, &pnl->daddr,
1942 pnl->af);
1943 pnl->rdport = pnl->dport;
1944 } else {
1945 PF_ACPY(&pnl->rdaddr, &state->gwy.addr,
1946 state->af);
1947 pnl->rdport = state->gwy.port;
1948 PF_ACPY(&pnl->rsaddr, &pnl->saddr,
1949 pnl->af);
1950 pnl->rsport = pnl->sport;
1951 }
1952 } else
1953 error = ENOENT;
1954 }
1955 break;
1956 }
1957
1958 case DIOCSETTIMEOUT: {
1959 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1960 int old;
1961
1962 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
1963 pt->seconds < 0) {
1964 error = EINVAL;
1965 goto fail;
1966 }
1967 old = pf_default_rule.timeout[pt->timeout];
1968 pf_default_rule.timeout[pt->timeout] = pt->seconds;
1969 pt->seconds = old;
1970 break;
1971 }
1972
1973 case DIOCGETTIMEOUT: {
1974 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1975
1976 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
1977 error = EINVAL;
1978 goto fail;
1979 }
1980 pt->seconds = pf_default_rule.timeout[pt->timeout];
1981 break;
1982 }
1983
1984 case DIOCGETLIMIT: {
1985 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
1986
1987 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
1988 error = EINVAL;
1989 goto fail;
1990 }
1991 pl->limit = pf_pool_limits[pl->index].limit;
1992 break;
1993 }
1994
1995 case DIOCSETLIMIT: {
1996 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
1997 int old_limit;
1998
1999 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
2000 pf_pool_limits[pl->index].pp == NULL) {
2001 error = EINVAL;
2002 goto fail;
2003 }
2004 #ifdef __OpenBSD__
2005 if (pool_sethardlimit(pf_pool_limits[pl->index].pp,
2006 pl->limit, NULL, 0) != 0) {
2007 error = EBUSY;
2008 goto fail;
2009 }
2010 #else
2011 pool_sethardlimit(pf_pool_limits[pl->index].pp,
2012 pl->limit, NULL, 0);
2013 #endif
2014 old_limit = pf_pool_limits[pl->index].limit;
2015 pf_pool_limits[pl->index].limit = pl->limit;
2016 pl->limit = old_limit;
2017 break;
2018 }
2019
2020 case DIOCSETDEBUG: {
2021 u_int32_t *level = (u_int32_t *)addr;
2022
2023 pf_status.debug = *level;
2024 break;
2025 }
2026
2027 case DIOCCLRRULECTRS: {
2028 struct pf_ruleset *ruleset = &pf_main_ruleset;
2029 struct pf_rule *rule;
2030
2031 TAILQ_FOREACH(rule,
2032 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries)
2033 rule->evaluations = rule->packets =
2034 rule->bytes = 0;
2035 break;
2036 }
2037
2038 #ifdef ALTQ
2039 case DIOCSTARTALTQ: {
2040 struct pf_altq *altq;
2041
2042 /* enable all altq interfaces on active list */
2043 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
2044 if (altq->qname[0] == 0) {
2045 error = pf_enable_altq(altq);
2046 if (error != 0)
2047 break;
2048 }
2049 }
2050 if (error == 0)
2051 pf_altq_running = 1;
2052 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
2053 break;
2054 }
2055
2056 case DIOCSTOPALTQ: {
2057 struct pf_altq *altq;
2058
2059 /* disable all altq interfaces on active list */
2060 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
2061 if (altq->qname[0] == 0) {
2062 error = pf_disable_altq(altq);
2063 if (error != 0)
2064 break;
2065 }
2066 }
2067 if (error == 0)
2068 pf_altq_running = 0;
2069 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
2070 break;
2071 }
2072
2073 case DIOCADDALTQ: {
2074 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2075 struct pf_altq *altq, *a;
2076
2077 if (pa->ticket != ticket_altqs_inactive) {
2078 error = EBUSY;
2079 break;
2080 }
2081 altq = pool_get(&pf_altq_pl, PR_NOWAIT);
2082 if (altq == NULL) {
2083 error = ENOMEM;
2084 break;
2085 }
2086 bcopy(&pa->altq, altq, sizeof(struct pf_altq));
2087
2088 /*
2089 * if this is for a queue, find the discipline and
2090 * copy the necessary fields
2091 */
2092 if (altq->qname[0] != 0) {
2093 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
2094 error = EBUSY;
2095 pool_put(&pf_altq_pl, altq);
2096 break;
2097 }
2098 TAILQ_FOREACH(a, pf_altqs_inactive, entries) {
2099 if (strncmp(a->ifname, altq->ifname,
2100 IFNAMSIZ) == 0 && a->qname[0] == 0) {
2101 altq->altq_disc = a->altq_disc;
2102 break;
2103 }
2104 }
2105 }
2106
2107 error = altq_add(altq);
2108 if (error) {
2109 pool_put(&pf_altq_pl, altq);
2110 break;
2111 }
2112
2113 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries);
2114 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2115 break;
2116 }
2117
2118 case DIOCGETALTQS: {
2119 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2120 struct pf_altq *altq;
2121
2122 pa->nr = 0;
2123 TAILQ_FOREACH(altq, pf_altqs_active, entries)
2124 pa->nr++;
2125 pa->ticket = ticket_altqs_active;
2126 break;
2127 }
2128
2129 case DIOCGETALTQ: {
2130 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2131 struct pf_altq *altq;
2132 u_int32_t nr;
2133
2134 if (pa->ticket != ticket_altqs_active) {
2135 error = EBUSY;
2136 break;
2137 }
2138 nr = 0;
2139 altq = TAILQ_FIRST(pf_altqs_active);
2140 while ((altq != NULL) && (nr < pa->nr)) {
2141 altq = TAILQ_NEXT(altq, entries);
2142 nr++;
2143 }
2144 if (altq == NULL) {
2145 error = EBUSY;
2146 break;
2147 }
2148 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2149 break;
2150 }
2151
2152 case DIOCCHANGEALTQ:
2153 /* CHANGEALTQ not supported yet! */
2154 error = ENODEV;
2155 break;
2156
2157 case DIOCGETQSTATS: {
2158 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr;
2159 struct pf_altq *altq;
2160 u_int32_t nr;
2161 int nbytes;
2162
2163 if (pq->ticket != ticket_altqs_active) {
2164 error = EBUSY;
2165 break;
2166 }
2167 nbytes = pq->nbytes;
2168 nr = 0;
2169 altq = TAILQ_FIRST(pf_altqs_active);
2170 while ((altq != NULL) && (nr < pq->nr)) {
2171 altq = TAILQ_NEXT(altq, entries);
2172 nr++;
2173 }
2174 if (altq == NULL) {
2175 error = EBUSY;
2176 break;
2177 }
2178 error = altq_getqstats(altq, pq->buf, &nbytes);
2179 if (error == 0) {
2180 pq->scheduler = altq->scheduler;
2181 pq->nbytes = nbytes;
2182 }
2183 break;
2184 }
2185 #endif /* ALTQ */
2186
2187 case DIOCBEGINADDRS: {
2188 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2189
2190 pf_empty_pool(&pf_pabuf);
2191 pp->ticket = ++ticket_pabuf;
2192 break;
2193 }
2194
2195 case DIOCADDADDR: {
2196 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2197
2198 #ifndef INET
2199 if (pp->af == AF_INET) {
2200 error = EAFNOSUPPORT;
2201 break;
2202 }
2203 #endif /* INET */
2204 #ifndef INET6
2205 if (pp->af == AF_INET6) {
2206 error = EAFNOSUPPORT;
2207 break;
2208 }
2209 #endif /* INET6 */
2210 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2211 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2212 pp->addr.addr.type != PF_ADDR_TABLE) {
2213 error = EINVAL;
2214 break;
2215 }
2216 pa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
2217 if (pa == NULL) {
2218 error = ENOMEM;
2219 break;
2220 }
2221 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr));
2222 if (pa->ifname[0]) {
2223 pa->kif = pfi_attach_rule(pa->ifname);
2224 if (pa->kif == NULL) {
2225 pool_put(&pf_pooladdr_pl, pa);
2226 error = EINVAL;
2227 break;
2228 }
2229 }
2230 if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
2231 pfi_dynaddr_remove(&pa->addr);
2232 pfi_detach_rule(pa->kif);
2233 pool_put(&pf_pooladdr_pl, pa);
2234 error = EINVAL;
2235 break;
2236 }
2237 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
2238 break;
2239 }
2240
2241 case DIOCGETADDRS: {
2242 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2243
2244 pp->nr = 0;
2245 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2246 pp->r_num, 0, 1, 0);
2247 if (pool == NULL) {
2248 error = EBUSY;
2249 break;
2250 }
2251 TAILQ_FOREACH(pa, &pool->list, entries)
2252 pp->nr++;
2253 break;
2254 }
2255
2256 case DIOCGETADDR: {
2257 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2258 u_int32_t nr = 0;
2259
2260 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2261 pp->r_num, 0, 1, 1);
2262 if (pool == NULL) {
2263 error = EBUSY;
2264 break;
2265 }
2266 pa = TAILQ_FIRST(&pool->list);
2267 while ((pa != NULL) && (nr < pp->nr)) {
2268 pa = TAILQ_NEXT(pa, entries);
2269 nr++;
2270 }
2271 if (pa == NULL) {
2272 error = EBUSY;
2273 break;
2274 }
2275 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr));
2276 pfi_dynaddr_copyout(&pp->addr.addr);
2277 pf_tbladdr_copyout(&pp->addr.addr);
2278 pf_rtlabel_copyout(&pp->addr.addr);
2279 break;
2280 }
2281
2282 case DIOCCHANGEADDR: {
2283 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr;
2284 struct pf_pooladdr *oldpa = NULL, *newpa = NULL;
2285 struct pf_ruleset *ruleset;
2286
2287 if (pca->action < PF_CHANGE_ADD_HEAD ||
2288 pca->action > PF_CHANGE_REMOVE) {
2289 error = EINVAL;
2290 break;
2291 }
2292 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
2293 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
2294 pca->addr.addr.type != PF_ADDR_TABLE) {
2295 error = EINVAL;
2296 break;
2297 }
2298
2299 ruleset = pf_find_ruleset(pca->anchor);
2300 if (ruleset == NULL) {
2301 error = EBUSY;
2302 break;
2303 }
2304 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
2305 pca->r_num, pca->r_last, 1, 1);
2306 if (pool == NULL) {
2307 error = EBUSY;
2308 break;
2309 }
2310 if (pca->action != PF_CHANGE_REMOVE) {
2311 newpa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
2312 if (newpa == NULL) {
2313 error = ENOMEM;
2314 break;
2315 }
2316 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
2317 #ifndef INET
2318 if (pca->af == AF_INET) {
2319 pool_put(&pf_pooladdr_pl, newpa);
2320 error = EAFNOSUPPORT;
2321 break;
2322 }
2323 #endif /* INET */
2324 #ifndef INET6
2325 if (pca->af == AF_INET6) {
2326 pool_put(&pf_pooladdr_pl, newpa);
2327 error = EAFNOSUPPORT;
2328 break;
2329 }
2330 #endif /* INET6 */
2331 if (newpa->ifname[0]) {
2332 newpa->kif = pfi_attach_rule(newpa->ifname);
2333 if (newpa->kif == NULL) {
2334 pool_put(&pf_pooladdr_pl, newpa);
2335 error = EINVAL;
2336 break;
2337 }
2338 } else
2339 newpa->kif = NULL;
2340 if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
2341 pf_tbladdr_setup(ruleset, &newpa->addr)) {
2342 pfi_dynaddr_remove(&newpa->addr);
2343 pfi_detach_rule(newpa->kif);
2344 pool_put(&pf_pooladdr_pl, newpa);
2345 error = EINVAL;
2346 break;
2347 }
2348 }
2349
2350 if (pca->action == PF_CHANGE_ADD_HEAD)
2351 oldpa = TAILQ_FIRST(&pool->list);
2352 else if (pca->action == PF_CHANGE_ADD_TAIL)
2353 oldpa = TAILQ_LAST(&pool->list, pf_palist);
2354 else {
2355 int i = 0;
2356
2357 oldpa = TAILQ_FIRST(&pool->list);
2358 while ((oldpa != NULL) && (i < pca->nr)) {
2359 oldpa = TAILQ_NEXT(oldpa, entries);
2360 i++;
2361 }
2362 if (oldpa == NULL) {
2363 error = EINVAL;
2364 break;
2365 }
2366 }
2367
2368 if (pca->action == PF_CHANGE_REMOVE) {
2369 TAILQ_REMOVE(&pool->list, oldpa, entries);
2370 pfi_dynaddr_remove(&oldpa->addr);
2371 pf_tbladdr_remove(&oldpa->addr);
2372 pfi_detach_rule(oldpa->kif);
2373 pool_put(&pf_pooladdr_pl, oldpa);
2374 } else {
2375 if (oldpa == NULL)
2376 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
2377 else if (pca->action == PF_CHANGE_ADD_HEAD ||
2378 pca->action == PF_CHANGE_ADD_BEFORE)
2379 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
2380 else
2381 TAILQ_INSERT_AFTER(&pool->list, oldpa,
2382 newpa, entries);
2383 }
2384
2385 pool->cur = TAILQ_FIRST(&pool->list);
2386 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
2387 pca->af);
2388 break;
2389 }
2390
2391 case DIOCGETRULESETS: {
2392 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2393 struct pf_ruleset *ruleset;
2394 struct pf_anchor *anchor;
2395
2396 pr->path[sizeof(pr->path) - 1] = 0;
2397 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2398 error = EINVAL;
2399 break;
2400 }
2401 pr->nr = 0;
2402 if (ruleset->anchor == NULL) {
2403 /* XXX kludge for pf_main_ruleset */
2404 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2405 if (anchor->parent == NULL)
2406 pr->nr++;
2407 } else {
2408 RB_FOREACH(anchor, pf_anchor_node,
2409 &ruleset->anchor->children)
2410 pr->nr++;
2411 }
2412 break;
2413 }
2414
2415 case DIOCGETRULESET: {
2416 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2417 struct pf_ruleset *ruleset;
2418 struct pf_anchor *anchor;
2419 u_int32_t nr = 0;
2420
2421 pr->path[sizeof(pr->path) - 1] = 0;
2422 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2423 error = EINVAL;
2424 break;
2425 }
2426 pr->name[0] = 0;
2427 if (ruleset->anchor == NULL) {
2428 /* XXX kludge for pf_main_ruleset */
2429 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2430 if (anchor->parent == NULL && nr++ == pr->nr) {
2431 strlcpy(pr->name, anchor->name,
2432 sizeof(pr->name));
2433 break;
2434 }
2435 } else {
2436 RB_FOREACH(anchor, pf_anchor_node,
2437 &ruleset->anchor->children)
2438 if (nr++ == pr->nr) {
2439 strlcpy(pr->name, anchor->name,
2440 sizeof(pr->name));
2441 break;
2442 }
2443 }
2444 if (!pr->name[0])
2445 error = EBUSY;
2446 break;
2447 }
2448
2449 case DIOCRCLRTABLES: {
2450 struct pfioc_table *io = (struct pfioc_table *)addr;
2451
2452 if (io->pfrio_esize != 0) {
2453 error = ENODEV;
2454 break;
2455 }
2456 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
2457 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2458 break;
2459 }
2460
2461 case DIOCRADDTABLES: {
2462 struct pfioc_table *io = (struct pfioc_table *)addr;
2463
2464 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2465 error = ENODEV;
2466 break;
2467 }
2468 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
2469 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2470 break;
2471 }
2472
2473 case DIOCRDELTABLES: {
2474 struct pfioc_table *io = (struct pfioc_table *)addr;
2475
2476 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2477 error = ENODEV;
2478 break;
2479 }
2480 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
2481 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2482 break;
2483 }
2484
2485 case DIOCRGETTABLES: {
2486 struct pfioc_table *io = (struct pfioc_table *)addr;
2487
2488 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2489 error = ENODEV;
2490 break;
2491 }
2492 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
2493 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2494 break;
2495 }
2496
2497 case DIOCRGETTSTATS: {
2498 struct pfioc_table *io = (struct pfioc_table *)addr;
2499
2500 if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
2501 error = ENODEV;
2502 break;
2503 }
2504 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
2505 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2506 break;
2507 }
2508
2509 case DIOCRCLRTSTATS: {
2510 struct pfioc_table *io = (struct pfioc_table *)addr;
2511
2512 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2513 error = ENODEV;
2514 break;
2515 }
2516 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
2517 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2518 break;
2519 }
2520
2521 case DIOCRSETTFLAGS: {
2522 struct pfioc_table *io = (struct pfioc_table *)addr;
2523
2524 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2525 error = ENODEV;
2526 break;
2527 }
2528 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
2529 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
2530 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2531 break;
2532 }
2533
2534 case DIOCRCLRADDRS: {
2535 struct pfioc_table *io = (struct pfioc_table *)addr;
2536
2537 if (io->pfrio_esize != 0) {
2538 error = ENODEV;
2539 break;
2540 }
2541 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
2542 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2543 break;
2544 }
2545
2546 case DIOCRADDADDRS: {
2547 struct pfioc_table *io = (struct pfioc_table *)addr;
2548
2549 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2550 error = ENODEV;
2551 break;
2552 }
2553 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
2554 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
2555 PFR_FLAG_USERIOCTL);
2556 break;
2557 }
2558
2559 case DIOCRDELADDRS: {
2560 struct pfioc_table *io = (struct pfioc_table *)addr;
2561
2562 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2563 error = ENODEV;
2564 break;
2565 }
2566 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
2567 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
2568 PFR_FLAG_USERIOCTL);
2569 break;
2570 }
2571
2572 case DIOCRSETADDRS: {
2573 struct pfioc_table *io = (struct pfioc_table *)addr;
2574
2575 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2576 error = ENODEV;
2577 break;
2578 }
2579 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
2580 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
2581 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
2582 PFR_FLAG_USERIOCTL);
2583 break;
2584 }
2585
2586 case DIOCRGETADDRS: {
2587 struct pfioc_table *io = (struct pfioc_table *)addr;
2588
2589 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2590 error = ENODEV;
2591 break;
2592 }
2593 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
2594 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2595 break;
2596 }
2597
2598 case DIOCRGETASTATS: {
2599 struct pfioc_table *io = (struct pfioc_table *)addr;
2600
2601 if (io->pfrio_esize != sizeof(struct pfr_astats)) {
2602 error = ENODEV;
2603 break;
2604 }
2605 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
2606 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2607 break;
2608 }
2609
2610 case DIOCRCLRASTATS: {
2611 struct pfioc_table *io = (struct pfioc_table *)addr;
2612
2613 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2614 error = ENODEV;
2615 break;
2616 }
2617 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
2618 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
2619 PFR_FLAG_USERIOCTL);
2620 break;
2621 }
2622
2623 case DIOCRTSTADDRS: {
2624 struct pfioc_table *io = (struct pfioc_table *)addr;
2625
2626 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2627 error = ENODEV;
2628 break;
2629 }
2630 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
2631 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
2632 PFR_FLAG_USERIOCTL);
2633 break;
2634 }
2635
2636 case DIOCRINADEFINE: {
2637 struct pfioc_table *io = (struct pfioc_table *)addr;
2638
2639 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2640 error = ENODEV;
2641 break;
2642 }
2643 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
2644 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
2645 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2646 break;
2647 }
2648
2649 case DIOCOSFPADD: {
2650 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2651 error = pf_osfp_add(io);
2652 break;
2653 }
2654
2655 case DIOCOSFPGET: {
2656 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2657 error = pf_osfp_get(io);
2658 break;
2659 }
2660
2661 case DIOCXBEGIN: {
2662 struct pfioc_trans *io = (struct pfioc_trans *)
2663 addr;
2664 static struct pfioc_trans_e ioe;
2665 static struct pfr_table table;
2666 int i;
2667
2668 if (io->esize != sizeof(ioe)) {
2669 error = ENODEV;
2670 goto fail;
2671 }
2672 for (i = 0; i < io->size; i++) {
2673 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2674 error = EFAULT;
2675 goto fail;
2676 }
2677 switch (ioe.rs_num) {
2678 #ifdef ALTQ
2679 case PF_RULESET_ALTQ:
2680 if (ioe.anchor[0]) {
2681 error = EINVAL;
2682 goto fail;
2683 }
2684 if ((error = pf_begin_altq(&ioe.ticket)))
2685 goto fail;
2686 break;
2687 #endif /* ALTQ */
2688 case PF_RULESET_TABLE:
2689 bzero(&table, sizeof(table));
2690 strlcpy(table.pfrt_anchor, ioe.anchor,
2691 sizeof(table.pfrt_anchor));
2692 if ((error = pfr_ina_begin(&table,
2693 &ioe.ticket, NULL, 0)))
2694 goto fail;
2695 break;
2696 default:
2697 if ((error = pf_begin_rules(&ioe.ticket,
2698 ioe.rs_num, ioe.anchor)))
2699 goto fail;
2700 break;
2701 }
2702 if (copyout(&ioe, io->array+i, sizeof(io->array[i]))) {
2703 error = EFAULT;
2704 goto fail;
2705 }
2706 }
2707 break;
2708 }
2709
2710 case DIOCXROLLBACK: {
2711 struct pfioc_trans *io = (struct pfioc_trans *)
2712 addr;
2713 static struct pfioc_trans_e ioe;
2714 static struct pfr_table table;
2715 int i;
2716
2717 if (io->esize != sizeof(ioe)) {
2718 error = ENODEV;
2719 goto fail;
2720 }
2721 for (i = 0; i < io->size; i++) {
2722 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2723 error = EFAULT;
2724 goto fail;
2725 }
2726 switch (ioe.rs_num) {
2727 #ifdef ALTQ
2728 case PF_RULESET_ALTQ:
2729 if (ioe.anchor[0]) {
2730 error = EINVAL;
2731 goto fail;
2732 }
2733 if ((error = pf_rollback_altq(ioe.ticket)))
2734 goto fail; /* really bad */
2735 break;
2736 #endif /* ALTQ */
2737 case PF_RULESET_TABLE:
2738 bzero(&table, sizeof(table));
2739 strlcpy(table.pfrt_anchor, ioe.anchor,
2740 sizeof(table.pfrt_anchor));
2741 if ((error = pfr_ina_rollback(&table,
2742 ioe.ticket, NULL, 0)))
2743 goto fail; /* really bad */
2744 break;
2745 default:
2746 if ((error = pf_rollback_rules(ioe.ticket,
2747 ioe.rs_num, ioe.anchor)))
2748 goto fail; /* really bad */
2749 break;
2750 }
2751 }
2752 break;
2753 }
2754
2755 case DIOCXCOMMIT: {
2756 struct pfioc_trans *io = (struct pfioc_trans *)
2757 addr;
2758 static struct pfioc_trans_e ioe;
2759 static struct pfr_table table;
2760 struct pf_ruleset *rs;
2761 int i;
2762
2763 if (io->esize != sizeof(ioe)) {
2764 error = ENODEV;
2765 goto fail;
2766 }
2767 /* first makes sure everything will succeed */
2768 for (i = 0; i < io->size; i++) {
2769 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2770 error = EFAULT;
2771 goto fail;
2772 }
2773 switch (ioe.rs_num) {
2774 #ifdef ALTQ
2775 case PF_RULESET_ALTQ:
2776 if (ioe.anchor[0]) {
2777 error = EINVAL;
2778 goto fail;
2779 }
2780 if (!altqs_inactive_open || ioe.ticket !=
2781 ticket_altqs_inactive) {
2782 error = EBUSY;
2783 goto fail;
2784 }
2785 break;
2786 #endif /* ALTQ */
2787 case PF_RULESET_TABLE:
2788 rs = pf_find_ruleset(ioe.anchor);
2789 if (rs == NULL || !rs->topen || ioe.ticket !=
2790 rs->tticket) {
2791 error = EBUSY;
2792 goto fail;
2793 }
2794 break;
2795 default:
2796 if (ioe.rs_num < 0 || ioe.rs_num >=
2797 PF_RULESET_MAX) {
2798 error = EINVAL;
2799 goto fail;
2800 }
2801 rs = pf_find_ruleset(ioe.anchor);
2802 if (rs == NULL ||
2803 !rs->rules[ioe.rs_num].inactive.open ||
2804 rs->rules[ioe.rs_num].inactive.ticket !=
2805 ioe.ticket) {
2806 error = EBUSY;
2807 goto fail;
2808 }
2809 break;
2810 }
2811 }
2812 /* now do the commit - no errors should happen here */
2813 for (i = 0; i < io->size; i++) {
2814 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2815 error = EFAULT;
2816 goto fail;
2817 }
2818 switch (ioe.rs_num) {
2819 #ifdef ALTQ
2820 case PF_RULESET_ALTQ:
2821 if ((error = pf_commit_altq(ioe.ticket)))
2822 goto fail; /* really bad */
2823 break;
2824 #endif /* ALTQ */
2825 case PF_RULESET_TABLE:
2826 bzero(&table, sizeof(table));
2827 strlcpy(table.pfrt_anchor, ioe.anchor,
2828 sizeof(table.pfrt_anchor));
2829 if ((error = pfr_ina_commit(&table, ioe.ticket,
2830 NULL, NULL, 0)))
2831 goto fail; /* really bad */
2832 break;
2833 default:
2834 if ((error = pf_commit_rules(ioe.ticket,
2835 ioe.rs_num, ioe.anchor)))
2836 goto fail; /* really bad */
2837 break;
2838 }
2839 }
2840 break;
2841 }
2842
2843 case DIOCGETSRCNODES: {
2844 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr;
2845 struct pf_src_node *n;
2846 struct pf_src_node *p, pstore;
2847 u_int32_t nr = 0;
2848 int space = psn->psn_len;
2849
2850 if (space == 0) {
2851 RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
2852 nr++;
2853 psn->psn_len = sizeof(struct pf_src_node) * nr;
2854 break;
2855 }
2856
2857 p = psn->psn_src_nodes;
2858 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2859 int secs = time_second, diff;
2860
2861 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
2862 break;
2863
2864 bcopy(n, &pstore, sizeof(pstore));
2865 if (n->rule.ptr != NULL)
2866 pstore.rule.nr = n->rule.ptr->nr;
2867 pstore.creation = secs - pstore.creation;
2868 if (pstore.expire > secs)
2869 pstore.expire -= secs;
2870 else
2871 pstore.expire = 0;
2872
2873 /* adjust the connection rate estimate */
2874 diff = secs - n->conn_rate.last;
2875 if (diff >= n->conn_rate.seconds)
2876 pstore.conn_rate.count = 0;
2877 else
2878 pstore.conn_rate.count -=
2879 n->conn_rate.count * diff /
2880 n->conn_rate.seconds;
2881
2882 error = copyout(&pstore, p, sizeof(*p));
2883 if (error)
2884 goto fail;
2885 p++;
2886 nr++;
2887 }
2888 psn->psn_len = sizeof(struct pf_src_node) * nr;
2889 break;
2890 }
2891
2892 case DIOCCLRSRCNODES: {
2893 struct pf_src_node *n;
2894 struct pf_state *state;
2895
2896 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
2897 state->src_node = NULL;
2898 state->nat_src_node = NULL;
2899 }
2900 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2901 n->expire = 1;
2902 n->states = 0;
2903 }
2904 pf_purge_expired_src_nodes();
2905 pf_status.src_nodes = 0;
2906 break;
2907 }
2908
2909 case DIOCSETHOSTID: {
2910 u_int32_t *hostid = (u_int32_t *)addr;
2911
2912 if (*hostid == 0)
2913 pf_status.hostid = arc4random();
2914 else
2915 pf_status.hostid = *hostid;
2916 break;
2917 }
2918
2919 case DIOCOSFPFLUSH:
2920 pf_osfp_flush();
2921 break;
2922
2923 case DIOCIGETIFACES: {
2924 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2925
2926 if (io->pfiio_esize != sizeof(struct pfi_if)) {
2927 error = ENODEV;
2928 break;
2929 }
2930 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer,
2931 &io->pfiio_size, io->pfiio_flags);
2932 break;
2933 }
2934
2935 case DIOCICLRISTATS: {
2936 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2937
2938 error = pfi_clr_istats(io->pfiio_name, &io->pfiio_nzero,
2939 io->pfiio_flags);
2940 break;
2941 }
2942
2943 case DIOCSETIFFLAG: {
2944 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2945
2946 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
2947 break;
2948 }
2949
2950 case DIOCCLRIFFLAG: {
2951 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2952
2953 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
2954 break;
2955 }
2956
2957 default:
2958 error = ENODEV;
2959 break;
2960 }
2961 fail:
2962 splx(s);
2963 return (error);
2964 }
2965
2966 #ifdef __NetBSD__
2967 #ifdef INET
2968 int
2969 pfil4_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
2970 {
2971 int error;
2972
2973 /*
2974 * ensure that mbufs are writable beforehand
2975 * as it's assumed by pf code.
2976 * ip hdr (60 bytes) + tcp hdr (60 bytes) should be enough.
2977 * XXX inefficient
2978 */
2979 error = m_makewritable(mp, 0, 60 + 60, M_DONTWAIT);
2980 if (error) {
2981 m_freem(*mp);
2982 *mp = NULL;
2983 return error;
2984 }
2985
2986 /*
2987 * If the packet is out-bound, we can't delay checksums
2988 * here. For in-bound, the checksum has already been
2989 * validated.
2990 */
2991 if (dir == PFIL_OUT) {
2992 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
2993 in_delayed_cksum(*mp);
2994 (*mp)->m_pkthdr.csum_flags &=
2995 ~(M_CSUM_TCPv4|M_CSUM_UDPv4);
2996 }
2997 }
2998
2999 if (pf_test(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp, NULL)
3000 != PF_PASS) {
3001 m_freem(*mp);
3002 *mp = NULL;
3003 return EHOSTUNREACH;
3004 }
3005
3006 /*
3007 * we're not compatible with fast-forward.
3008 */
3009
3010 if (dir == PFIL_IN && *mp) {
3011 (*mp)->m_flags &= ~M_CANFASTFWD;
3012 }
3013
3014 return (0);
3015 }
3016 #endif /* INET */
3017
3018 #ifdef INET6
3019 int
3020 pfil6_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
3021 {
3022 int error;
3023
3024 /*
3025 * ensure that mbufs are writable beforehand
3026 * as it's assumed by pf code.
3027 * XXX inefficient
3028 */
3029 error = m_makewritable(mp, 0, M_COPYALL, M_DONTWAIT);
3030 if (error) {
3031 m_freem(*mp);
3032 *mp = NULL;
3033 return error;
3034 }
3035
3036 /*
3037 * If the packet is out-bound, we can't delay checksums
3038 * here. For in-bound, the checksum has already been
3039 * validated.
3040 */
3041 if (dir == PFIL_OUT) {
3042 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv6|M_CSUM_UDPv6)) {
3043 in6_delayed_cksum(*mp);
3044 (*mp)->m_pkthdr.csum_flags &=
3045 ~(M_CSUM_TCPv6|M_CSUM_UDPv6);
3046 }
3047 }
3048
3049 if (pf_test6(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp, NULL)
3050 != PF_PASS) {
3051 m_freem(*mp);
3052 *mp = NULL;
3053 return EHOSTUNREACH;
3054 } else
3055 return (0);
3056 }
3057 #endif
3058
3059 int
3060 pfil_ifnet_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
3061 {
3062 u_long cmd = (u_long)mp;
3063
3064 switch (cmd) {
3065 case PFIL_IFNET_ATTACH:
3066 pfi_attach_ifnet(ifp);
3067 break;
3068 case PFIL_IFNET_DETACH:
3069 pfi_detach_ifnet(ifp);
3070 break;
3071 }
3072
3073 return (0);
3074 }
3075
3076 int
3077 pfil_ifaddr_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
3078 {
3079 extern void pfi_kifaddr_update_if(struct ifnet *);
3080
3081 u_long cmd = (u_long)mp;
3082
3083 switch (cmd) {
3084 case SIOCSIFADDR:
3085 case SIOCAIFADDR:
3086 case SIOCDIFADDR:
3087 #ifdef INET6
3088 case SIOCAIFADDR_IN6:
3089 case SIOCDIFADDR_IN6:
3090 #endif
3091 pfi_kifaddr_update_if(ifp);
3092 break;
3093 default:
3094 panic("unexpected ioctl");
3095 }
3096
3097 return (0);
3098 }
3099
3100 static int
3101 pf_pfil_attach(void)
3102 {
3103 struct pfil_head *ph_inet;
3104 #ifdef INET6
3105 struct pfil_head *ph_inet6;
3106 #endif
3107 int error;
3108 int i;
3109
3110 if (pf_pfil_attached)
3111 return (0);
3112
3113 error = pfil_add_hook(pfil_ifnet_wrapper, NULL, PFIL_IFNET, &if_pfil);
3114 if (error)
3115 goto bad1;
3116 error = pfil_add_hook(pfil_ifaddr_wrapper, NULL, PFIL_IFADDR, &if_pfil);
3117 if (error)
3118 goto bad2;
3119
3120 ph_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3121 if (ph_inet)
3122 error = pfil_add_hook((void *)pfil4_wrapper, NULL,
3123 PFIL_IN|PFIL_OUT, ph_inet);
3124 else
3125 error = ENOENT;
3126 if (error)
3127 goto bad3;
3128
3129 #ifdef INET6
3130 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3131 if (ph_inet6)
3132 error = pfil_add_hook((void *)pfil6_wrapper, NULL,
3133 PFIL_IN|PFIL_OUT, ph_inet6);
3134 else
3135 error = ENOENT;
3136 if (error)
3137 goto bad4;
3138 #endif
3139
3140 for (i = 0; i < if_indexlim; i++)
3141 if (ifindex2ifnet[i])
3142 pfi_attach_ifnet(ifindex2ifnet[i]);
3143 pf_pfil_attached = 1;
3144
3145 return (0);
3146
3147 #ifdef INET6
3148 bad4:
3149 pfil_remove_hook(pfil4_wrapper, NULL, PFIL_IN|PFIL_OUT, ph_inet);
3150 #endif
3151 bad3:
3152 pfil_remove_hook(pfil_ifaddr_wrapper, NULL, PFIL_IFADDR, &if_pfil);
3153 bad2:
3154 pfil_remove_hook(pfil_ifnet_wrapper, NULL, PFIL_IFNET, &if_pfil);
3155 bad1:
3156 return (error);
3157 }
3158
3159 static int
3160 pf_pfil_detach(void)
3161 {
3162 struct pfil_head *ph_inet;
3163 #ifdef INET6
3164 struct pfil_head *ph_inet6;
3165 #endif
3166 int i;
3167
3168 if (pf_pfil_attached == 0)
3169 return (0);
3170
3171 for (i = 0; i < if_indexlim; i++)
3172 if (pfi_index2kif[i])
3173 pfi_detach_ifnet(ifindex2ifnet[i]);
3174
3175 pfil_remove_hook(pfil_ifaddr_wrapper, NULL, PFIL_IFADDR, &if_pfil);
3176 pfil_remove_hook(pfil_ifnet_wrapper, NULL, PFIL_IFNET, &if_pfil);
3177
3178 ph_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3179 if (ph_inet)
3180 pfil_remove_hook((void *)pfil4_wrapper, NULL,
3181 PFIL_IN|PFIL_OUT, ph_inet);
3182 #ifdef INET6
3183 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3184 if (ph_inet6)
3185 pfil_remove_hook((void *)pfil6_wrapper, NULL,
3186 PFIL_IN|PFIL_OUT, ph_inet6);
3187 #endif
3188 pf_pfil_attached = 0;
3189
3190 return (0);
3191 }
3192 #endif
3193