pf_ioctl.c revision 1.24 1 /* $NetBSD: pf_ioctl.c,v 1.24 2006/09/19 21:42:29 elad Exp $ */
2 /* $OpenBSD: pf_ioctl.c,v 1.139 2005/03/03 07:13:39 dhartmei Exp $ */
3
4 /*
5 * Copyright (c) 2001 Daniel Hartmeier
6 * Copyright (c) 2002,2003 Henning Brauer
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 *
13 * - Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * - Redistributions in binary form must reproduce the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer in the documentation and/or other materials provided
18 * with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Effort sponsored in part by the Defense Advanced Research Projects
34 * Agency (DARPA) and Air Force Research Laboratory, Air Force
35 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36 *
37 */
38
39 #ifdef _KERNEL_OPT
40 #include "opt_inet.h"
41 #include "opt_altq.h"
42 #include "opt_pfil_hooks.h"
43 #endif
44
45 #ifdef __OpenBSD__
46 #include "pfsync.h"
47 #else
48 #define NPFSYNC 0
49 #endif
50
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/mbuf.h>
54 #include <sys/filio.h>
55 #include <sys/fcntl.h>
56 #include <sys/socket.h>
57 #include <sys/socketvar.h>
58 #include <sys/kernel.h>
59 #include <sys/time.h>
60 #ifdef __OpenBSD__
61 #include <sys/timeout.h>
62 #else
63 #include <sys/callout.h>
64 #endif
65 #include <sys/pool.h>
66 #include <sys/malloc.h>
67 #ifdef __NetBSD__
68 #include <sys/conf.h>
69 #include <sys/lwp.h>
70 #include <sys/kauth.h>
71 #endif
72
73 #include <net/if.h>
74 #include <net/if_types.h>
75 #include <net/route.h>
76
77 #include <netinet/in.h>
78 #include <netinet/in_var.h>
79 #include <netinet/in_systm.h>
80 #include <netinet/ip.h>
81 #include <netinet/ip_var.h>
82 #include <netinet/ip_icmp.h>
83
84 #ifdef __OpenBSD__
85 #include <dev/rndvar.h>
86 #endif
87 #include <net/pfvar.h>
88
89 #if NPFSYNC > 0
90 #include <net/if_pfsync.h>
91 #endif /* NPFSYNC > 0 */
92
93 #ifdef INET6
94 #include <netinet/ip6.h>
95 #include <netinet/in_pcb.h>
96 #endif /* INET6 */
97
98 #ifdef ALTQ
99 #include <altq/altq.h>
100 #endif
101
102 void pfattach(int);
103 #ifdef _LKM
104 void pfdetach(void);
105 #endif
106 int pfopen(dev_t, int, int, struct lwp *);
107 int pfclose(dev_t, int, int, struct lwp *);
108 struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
109 u_int8_t, u_int8_t, u_int8_t);
110 int pf_get_ruleset_number(u_int8_t);
111 void pf_init_ruleset(struct pf_ruleset *);
112 int pf_anchor_setup(struct pf_rule *,
113 const struct pf_ruleset *, const char *);
114 int pf_anchor_copyout(const struct pf_ruleset *,
115 const struct pf_rule *, struct pfioc_rule *);
116 void pf_anchor_remove(struct pf_rule *);
117
118 void pf_mv_pool(struct pf_palist *, struct pf_palist *);
119 void pf_empty_pool(struct pf_palist *);
120 int pfioctl(dev_t, u_long, caddr_t, int, struct lwp *);
121 #ifdef ALTQ
122 int pf_begin_altq(u_int32_t *);
123 int pf_rollback_altq(u_int32_t);
124 int pf_commit_altq(u_int32_t);
125 int pf_enable_altq(struct pf_altq *);
126 int pf_disable_altq(struct pf_altq *);
127 #endif /* ALTQ */
128 int pf_begin_rules(u_int32_t *, int, const char *);
129 int pf_rollback_rules(u_int32_t, int, char *);
130 int pf_commit_rules(u_int32_t, int, char *);
131
132 #ifdef __NetBSD__
133 const struct cdevsw pf_cdevsw = {
134 pfopen, pfclose, noread, nowrite, pfioctl,
135 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER,
136 };
137
138 static int pf_pfil_attach(void);
139 static int pf_pfil_detach(void);
140
141 static int pf_pfil_attached = 0;
142 #endif
143
144 #ifdef __OpenBSD__
145 extern struct timeout pf_expire_to;
146 #else
147 extern struct callout pf_expire_to;
148 #endif
149
150 struct pf_rule pf_default_rule;
151 #ifdef ALTQ
152 static int pf_altq_running;
153 #endif
154
155 #define TAGID_MAX 50000
156 TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
157 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
158
159 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
160 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
161 #endif
162 static u_int16_t tagname2tag(struct pf_tags *, char *);
163 static void tag2tagname(struct pf_tags *, u_int16_t, char *);
164 static void tag_unref(struct pf_tags *, u_int16_t);
165 int pf_rtlabel_add(struct pf_addr_wrap *);
166 void pf_rtlabel_remove(struct pf_addr_wrap *);
167 void pf_rtlabel_copyout(struct pf_addr_wrap *);
168
169 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
170
171 #ifdef __NetBSD__
172 extern struct pfil_head if_pfil;
173 #endif
174
175 void
176 pfattach(int num)
177 {
178 u_int32_t *timeout = pf_default_rule.timeout;
179
180 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl",
181 &pool_allocator_nointr);
182 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
183 "pfsrctrpl", NULL);
184 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl",
185 NULL);
186 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl",
187 &pool_allocator_nointr);
188 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0,
189 "pfpooladdrpl", &pool_allocator_nointr);
190 pfr_initialize();
191 pfi_initialize();
192 pf_osfp_initialize();
193
194 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
195 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
196
197 RB_INIT(&tree_src_tracking);
198 RB_INIT(&pf_anchors);
199 pf_init_ruleset(&pf_main_ruleset);
200 TAILQ_INIT(&pf_altqs[0]);
201 TAILQ_INIT(&pf_altqs[1]);
202 TAILQ_INIT(&pf_pabuf);
203 pf_altqs_active = &pf_altqs[0];
204 pf_altqs_inactive = &pf_altqs[1];
205 TAILQ_INIT(&state_updates);
206
207 /* default rule should never be garbage collected */
208 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
209 pf_default_rule.action = PF_PASS;
210 pf_default_rule.nr = -1;
211
212 /* initialize default timeouts */
213 timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
214 timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
215 timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
216 timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
217 timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
218 timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
219 timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
220 timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
221 timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
222 timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
223 timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
224 timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
225 timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
226 timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
227 timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
228 timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
229 timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
230 timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
231
232 #ifdef __OpenBSD__
233 timeout_set(&pf_expire_to, pf_purge_timeout, &pf_expire_to);
234 timeout_add(&pf_expire_to, timeout[PFTM_INTERVAL] * hz);
235 #else
236 callout_init(&pf_expire_to);
237 callout_reset(&pf_expire_to, timeout[PFTM_INTERVAL] * hz,
238 pf_purge_timeout, &pf_expire_to);
239 #endif
240
241 pf_normalize_init();
242 bzero(&pf_status, sizeof(pf_status));
243 pf_status.debug = PF_DEBUG_URGENT;
244
245 /* XXX do our best to avoid a conflict */
246 pf_status.hostid = arc4random();
247 }
248
249 #ifdef _LKM
250 void
251 pfdetach(void)
252 {
253 struct pf_anchor *anchor;
254 struct pf_state *state;
255 struct pf_src_node *node;
256 struct pfioc_table pt;
257 u_int32_t ticket;
258 int i;
259 char r = '\0';
260
261 (void)pf_pfil_detach();
262
263 callout_stop(&pf_expire_to);
264 pf_status.running = 0;
265
266 /* clear the rulesets */
267 for (i = 0; i < PF_RULESET_MAX; i++)
268 if (pf_begin_rules(&ticket, i, &r) == 0)
269 pf_commit_rules(ticket, i, &r);
270 #ifdef ALTQ
271 if (pf_begin_altq(&ticket) == 0)
272 pf_commit_altq(ticket);
273 #endif
274
275 /* clear states */
276 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
277 state->timeout = PFTM_PURGE;
278 #if NPFSYNC
279 state->sync_flags = PFSTATE_NOSYNC;
280 #endif
281 }
282 pf_purge_expired_states();
283 #if NPFSYNC
284 pfsync_clear_states(pf_status.hostid, NULL);
285 #endif
286
287 /* clear source nodes */
288 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
289 state->src_node = NULL;
290 state->nat_src_node = NULL;
291 }
292 RB_FOREACH(node, pf_src_tree, &tree_src_tracking) {
293 node->expire = 1;
294 node->states = 0;
295 }
296 pf_purge_expired_src_nodes();
297
298 /* clear tables */
299 memset(&pt, '\0', sizeof(pt));
300 pfr_clr_tables(&pt.pfrio_table, &pt.pfrio_ndel, pt.pfrio_flags);
301
302 /* destroy anchors */
303 while ((anchor = RB_MIN(pf_anchor_global, &pf_anchors)) != NULL) {
304 for (i = 0; i < PF_RULESET_MAX; i++)
305 if (pf_begin_rules(&ticket, i, anchor->name) == 0)
306 pf_commit_rules(ticket, i, anchor->name);
307 }
308
309 /* destroy main ruleset */
310 pf_remove_if_empty_ruleset(&pf_main_ruleset);
311
312 /* destroy the pools */
313 pool_destroy(&pf_pooladdr_pl);
314 pool_destroy(&pf_altq_pl);
315 pool_destroy(&pf_state_pl);
316 pool_destroy(&pf_rule_pl);
317 pool_destroy(&pf_src_tree_pl);
318
319 /* destroy subsystems */
320 pf_normalize_destroy();
321 pf_osfp_destroy();
322 pfr_destroy();
323 pfi_destroy();
324 }
325 #endif
326
327 int
328 pfopen(dev_t dev, int flags, int fmt, struct lwp *l)
329 {
330 if (minor(dev) >= 1)
331 return (ENXIO);
332 return (0);
333 }
334
335 int
336 pfclose(dev_t dev, int flags, int fmt, struct lwp *l)
337 {
338 if (minor(dev) >= 1)
339 return (ENXIO);
340 return (0);
341 }
342
343 struct pf_pool *
344 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
345 u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
346 u_int8_t check_ticket)
347 {
348 struct pf_ruleset *ruleset;
349 struct pf_rule *rule;
350 int rs_num;
351
352 ruleset = pf_find_ruleset(anchor);
353 if (ruleset == NULL)
354 return (NULL);
355 rs_num = pf_get_ruleset_number(rule_action);
356 if (rs_num >= PF_RULESET_MAX)
357 return (NULL);
358 if (active) {
359 if (check_ticket && ticket !=
360 ruleset->rules[rs_num].active.ticket)
361 return (NULL);
362 if (r_last)
363 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
364 pf_rulequeue);
365 else
366 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
367 } else {
368 if (check_ticket && ticket !=
369 ruleset->rules[rs_num].inactive.ticket)
370 return (NULL);
371 if (r_last)
372 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
373 pf_rulequeue);
374 else
375 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
376 }
377 if (!r_last) {
378 while ((rule != NULL) && (rule->nr != rule_number))
379 rule = TAILQ_NEXT(rule, entries);
380 }
381 if (rule == NULL)
382 return (NULL);
383
384 return (&rule->rpool);
385 }
386
387 int
388 pf_get_ruleset_number(u_int8_t action)
389 {
390 switch (action) {
391 case PF_SCRUB:
392 case PF_NOSCRUB:
393 return (PF_RULESET_SCRUB);
394 break;
395 case PF_PASS:
396 case PF_DROP:
397 return (PF_RULESET_FILTER);
398 break;
399 case PF_NAT:
400 case PF_NONAT:
401 return (PF_RULESET_NAT);
402 break;
403 case PF_BINAT:
404 case PF_NOBINAT:
405 return (PF_RULESET_BINAT);
406 break;
407 case PF_RDR:
408 case PF_NORDR:
409 return (PF_RULESET_RDR);
410 break;
411 default:
412 return (PF_RULESET_MAX);
413 break;
414 }
415 }
416
417 void
418 pf_init_ruleset(struct pf_ruleset *ruleset)
419 {
420 int i;
421
422 memset(ruleset, 0, sizeof(struct pf_ruleset));
423 for (i = 0; i < PF_RULESET_MAX; i++) {
424 TAILQ_INIT(&ruleset->rules[i].queues[0]);
425 TAILQ_INIT(&ruleset->rules[i].queues[1]);
426 ruleset->rules[i].active.ptr = &ruleset->rules[i].queues[0];
427 ruleset->rules[i].inactive.ptr = &ruleset->rules[i].queues[1];
428 }
429 }
430
431 struct pf_anchor *
432 pf_find_anchor(const char *path)
433 {
434 static struct pf_anchor key;
435
436 memset(&key, 0, sizeof(key));
437 strlcpy(key.path, path, sizeof(key.path));
438 return (RB_FIND(pf_anchor_global, &pf_anchors, &key));
439 }
440
441 struct pf_ruleset *
442 pf_find_ruleset(const char *path)
443 {
444 struct pf_anchor *anchor;
445
446 while (*path == '/')
447 path++;
448 if (!*path)
449 return (&pf_main_ruleset);
450 anchor = pf_find_anchor(path);
451 if (anchor == NULL)
452 return (NULL);
453 else
454 return (&anchor->ruleset);
455 }
456
457 struct pf_ruleset *
458 pf_find_or_create_ruleset(const char *path)
459 {
460 static char p[MAXPATHLEN];
461 char *q = NULL /* XXX gcc */, *r;
462 struct pf_ruleset *ruleset;
463 struct pf_anchor *anchor = NULL /* XXX gcc */,
464 *dup, *parent = NULL;
465
466 while (*path == '/')
467 path++;
468 ruleset = pf_find_ruleset(path);
469 if (ruleset != NULL)
470 return (ruleset);
471 strlcpy(p, path, sizeof(p));
472 while (parent == NULL && (q = strrchr(p, '/')) != NULL) {
473 *q = 0;
474 if ((ruleset = pf_find_ruleset(p)) != NULL) {
475 parent = ruleset->anchor;
476 break;
477 }
478 }
479 if (q == NULL)
480 q = p;
481 else
482 q++;
483 strlcpy(p, path, sizeof(p));
484 if (!*q)
485 return (NULL);
486 while ((r = strchr(q, '/')) != NULL || *q) {
487 if (r != NULL)
488 *r = 0;
489 if (!*q || strlen(q) >= PF_ANCHOR_NAME_SIZE ||
490 (parent != NULL && strlen(parent->path) >=
491 MAXPATHLEN - PF_ANCHOR_NAME_SIZE - 1))
492 return (NULL);
493 anchor = (struct pf_anchor *)malloc(sizeof(*anchor), M_TEMP,
494 M_NOWAIT);
495 if (anchor == NULL)
496 return (NULL);
497 memset(anchor, 0, sizeof(*anchor));
498 RB_INIT(&anchor->children);
499 strlcpy(anchor->name, q, sizeof(anchor->name));
500 if (parent != NULL) {
501 strlcpy(anchor->path, parent->path,
502 sizeof(anchor->path));
503 strlcat(anchor->path, "/", sizeof(anchor->path));
504 }
505 strlcat(anchor->path, anchor->name, sizeof(anchor->path));
506 if ((dup = RB_INSERT(pf_anchor_global, &pf_anchors, anchor)) !=
507 NULL) {
508 printf("pf_find_or_create_ruleset: RB_INSERT1 "
509 "'%s' '%s' collides with '%s' '%s'\n",
510 anchor->path, anchor->name, dup->path, dup->name);
511 free(anchor, M_TEMP);
512 return (NULL);
513 }
514 if (parent != NULL) {
515 anchor->parent = parent;
516 if ((dup = RB_INSERT(pf_anchor_node, &parent->children,
517 anchor)) != NULL) {
518 printf("pf_find_or_create_ruleset: "
519 "RB_INSERT2 '%s' '%s' collides with "
520 "'%s' '%s'\n", anchor->path, anchor->name,
521 dup->path, dup->name);
522 RB_REMOVE(pf_anchor_global, &pf_anchors,
523 anchor);
524 free(anchor, M_TEMP);
525 return (NULL);
526 }
527 }
528 pf_init_ruleset(&anchor->ruleset);
529 anchor->ruleset.anchor = anchor;
530 parent = anchor;
531 if (r != NULL)
532 q = r + 1;
533 else
534 *q = 0;
535 }
536 return (&anchor->ruleset);
537 }
538
539 void
540 pf_remove_if_empty_ruleset(struct pf_ruleset *ruleset)
541 {
542 struct pf_anchor *parent;
543 int i;
544
545 while (ruleset != NULL) {
546 if (ruleset == &pf_main_ruleset || ruleset->anchor == NULL ||
547 !RB_EMPTY(&ruleset->anchor->children) ||
548 ruleset->anchor->refcnt > 0 || ruleset->tables > 0 ||
549 ruleset->topen)
550 return;
551 for (i = 0; i < PF_RULESET_MAX; ++i)
552 if (!TAILQ_EMPTY(ruleset->rules[i].active.ptr) ||
553 !TAILQ_EMPTY(ruleset->rules[i].inactive.ptr) ||
554 ruleset->rules[i].inactive.open)
555 return;
556 RB_REMOVE(pf_anchor_global, &pf_anchors, ruleset->anchor);
557 if ((parent = ruleset->anchor->parent) != NULL)
558 RB_REMOVE(pf_anchor_node, &parent->children,
559 ruleset->anchor);
560 free(ruleset->anchor, M_TEMP);
561 if (parent == NULL)
562 return;
563 ruleset = &parent->ruleset;
564 }
565 }
566
567 int
568 pf_anchor_setup(struct pf_rule *r, const struct pf_ruleset *s,
569 const char *name)
570 {
571 static char *p, path[MAXPATHLEN];
572 struct pf_ruleset *ruleset;
573
574 r->anchor = NULL;
575 r->anchor_relative = 0;
576 r->anchor_wildcard = 0;
577 if (!name[0])
578 return (0);
579 if (name[0] == '/')
580 strlcpy(path, name + 1, sizeof(path));
581 else {
582 /* relative path */
583 r->anchor_relative = 1;
584 if (s->anchor == NULL || !s->anchor->path[0])
585 path[0] = 0;
586 else
587 strlcpy(path, s->anchor->path, sizeof(path));
588 while (name[0] == '.' && name[1] == '.' && name[2] == '/') {
589 if (!path[0]) {
590 printf("pf_anchor_setup: .. beyond root\n");
591 return (1);
592 }
593 if ((p = strrchr(path, '/')) != NULL)
594 *p = 0;
595 else
596 path[0] = 0;
597 r->anchor_relative++;
598 name += 3;
599 }
600 if (path[0])
601 strlcat(path, "/", sizeof(path));
602 strlcat(path, name, sizeof(path));
603 }
604 if ((p = strrchr(path, '/')) != NULL && !strcmp(p, "/*")) {
605 r->anchor_wildcard = 1;
606 *p = 0;
607 }
608 ruleset = pf_find_or_create_ruleset(path);
609 if (ruleset == NULL || ruleset->anchor == NULL) {
610 printf("pf_anchor_setup: ruleset\n");
611 return (1);
612 }
613 r->anchor = ruleset->anchor;
614 r->anchor->refcnt++;
615 return (0);
616 }
617
618 int
619 pf_anchor_copyout(const struct pf_ruleset *rs, const struct pf_rule *r,
620 struct pfioc_rule *pr)
621 {
622 pr->anchor_call[0] = 0;
623 if (r->anchor == NULL)
624 return (0);
625 if (!r->anchor_relative) {
626 strlcpy(pr->anchor_call, "/", sizeof(pr->anchor_call));
627 strlcat(pr->anchor_call, r->anchor->path,
628 sizeof(pr->anchor_call));
629 } else {
630 char a[MAXPATHLEN], b[MAXPATHLEN], *p;
631 int i;
632
633 if (rs->anchor == NULL)
634 a[0] = 0;
635 else
636 strlcpy(a, rs->anchor->path, sizeof(a));
637 strlcpy(b, r->anchor->path, sizeof(b));
638 for (i = 1; i < r->anchor_relative; ++i) {
639 if ((p = strrchr(a, '/')) == NULL)
640 p = a;
641 *p = 0;
642 strlcat(pr->anchor_call, "../",
643 sizeof(pr->anchor_call));
644 }
645 if (strncmp(a, b, strlen(a))) {
646 printf("pf_anchor_copyout: '%s' '%s'\n", a, b);
647 return (1);
648 }
649 if (strlen(b) > strlen(a))
650 strlcat(pr->anchor_call, b + (a[0] ? strlen(a) + 1 : 0),
651 sizeof(pr->anchor_call));
652 }
653 if (r->anchor_wildcard)
654 strlcat(pr->anchor_call, pr->anchor_call[0] ? "/*" : "*",
655 sizeof(pr->anchor_call));
656 return (0);
657 }
658
659 void
660 pf_anchor_remove(struct pf_rule *r)
661 {
662 if (r->anchor == NULL)
663 return;
664 if (r->anchor->refcnt <= 0) {
665 printf("pf_anchor_remove: broken refcount");
666 r->anchor = NULL;
667 return;
668 }
669 if (!--r->anchor->refcnt)
670 pf_remove_if_empty_ruleset(&r->anchor->ruleset);
671 r->anchor = NULL;
672 }
673
674 void
675 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
676 {
677 struct pf_pooladdr *mv_pool_pa;
678
679 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
680 TAILQ_REMOVE(poola, mv_pool_pa, entries);
681 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
682 }
683 }
684
685 void
686 pf_empty_pool(struct pf_palist *poola)
687 {
688 struct pf_pooladdr *empty_pool_pa;
689
690 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
691 pfi_dynaddr_remove(&empty_pool_pa->addr);
692 pf_tbladdr_remove(&empty_pool_pa->addr);
693 pfi_detach_rule(empty_pool_pa->kif);
694 TAILQ_REMOVE(poola, empty_pool_pa, entries);
695 pool_put(&pf_pooladdr_pl, empty_pool_pa);
696 }
697 }
698
699 void
700 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
701 {
702 if (rulequeue != NULL) {
703 if (rule->states <= 0) {
704 /*
705 * XXX - we need to remove the table *before* detaching
706 * the rule to make sure the table code does not delete
707 * the anchor under our feet.
708 */
709 pf_tbladdr_remove(&rule->src.addr);
710 pf_tbladdr_remove(&rule->dst.addr);
711 if (rule->overload_tbl)
712 pfr_detach_table(rule->overload_tbl);
713 }
714 TAILQ_REMOVE(rulequeue, rule, entries);
715 rule->entries.tqe_prev = NULL;
716 rule->nr = -1;
717 }
718
719 if (rule->states > 0 || rule->src_nodes > 0 ||
720 rule->entries.tqe_prev != NULL)
721 return;
722 pf_tag_unref(rule->tag);
723 pf_tag_unref(rule->match_tag);
724 #ifdef ALTQ
725 if (rule->pqid != rule->qid)
726 pf_qid_unref(rule->pqid);
727 pf_qid_unref(rule->qid);
728 #endif
729 pf_rtlabel_remove(&rule->src.addr);
730 pf_rtlabel_remove(&rule->dst.addr);
731 pfi_dynaddr_remove(&rule->src.addr);
732 pfi_dynaddr_remove(&rule->dst.addr);
733 if (rulequeue == NULL) {
734 pf_tbladdr_remove(&rule->src.addr);
735 pf_tbladdr_remove(&rule->dst.addr);
736 if (rule->overload_tbl)
737 pfr_detach_table(rule->overload_tbl);
738 }
739 pfi_detach_rule(rule->kif);
740 pf_anchor_remove(rule);
741 pf_empty_pool(&rule->rpool.list);
742 pool_put(&pf_rule_pl, rule);
743 }
744
745 static u_int16_t
746 tagname2tag(struct pf_tags *head, char *tagname)
747 {
748 struct pf_tagname *tag, *p = NULL;
749 u_int16_t new_tagid = 1;
750
751 TAILQ_FOREACH(tag, head, entries)
752 if (strcmp(tagname, tag->name) == 0) {
753 tag->ref++;
754 return (tag->tag);
755 }
756
757 /*
758 * to avoid fragmentation, we do a linear search from the beginning
759 * and take the first free slot we find. if there is none or the list
760 * is empty, append a new entry at the end.
761 */
762
763 /* new entry */
764 if (!TAILQ_EMPTY(head))
765 for (p = TAILQ_FIRST(head); p != NULL &&
766 p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
767 new_tagid = p->tag + 1;
768
769 if (new_tagid > TAGID_MAX)
770 return (0);
771
772 /* allocate and fill new struct pf_tagname */
773 tag = (struct pf_tagname *)malloc(sizeof(struct pf_tagname),
774 M_TEMP, M_NOWAIT);
775 if (tag == NULL)
776 return (0);
777 bzero(tag, sizeof(struct pf_tagname));
778 strlcpy(tag->name, tagname, sizeof(tag->name));
779 tag->tag = new_tagid;
780 tag->ref++;
781
782 if (p != NULL) /* insert new entry before p */
783 TAILQ_INSERT_BEFORE(p, tag, entries);
784 else /* either list empty or no free slot in between */
785 TAILQ_INSERT_TAIL(head, tag, entries);
786
787 return (tag->tag);
788 }
789
790 static void
791 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
792 {
793 struct pf_tagname *tag;
794
795 TAILQ_FOREACH(tag, head, entries)
796 if (tag->tag == tagid) {
797 strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
798 return;
799 }
800 }
801
802 static void
803 tag_unref(struct pf_tags *head, u_int16_t tag)
804 {
805 struct pf_tagname *p, *next;
806
807 if (tag == 0)
808 return;
809
810 for (p = TAILQ_FIRST(head); p != NULL; p = next) {
811 next = TAILQ_NEXT(p, entries);
812 if (tag == p->tag) {
813 if (--p->ref == 0) {
814 TAILQ_REMOVE(head, p, entries);
815 free(p, M_TEMP);
816 }
817 break;
818 }
819 }
820 }
821
822 u_int16_t
823 pf_tagname2tag(char *tagname)
824 {
825 return (tagname2tag(&pf_tags, tagname));
826 }
827
828 void
829 pf_tag2tagname(u_int16_t tagid, char *p)
830 {
831 return (tag2tagname(&pf_tags, tagid, p));
832 }
833
834 void
835 pf_tag_ref(u_int16_t tag)
836 {
837 struct pf_tagname *t;
838
839 TAILQ_FOREACH(t, &pf_tags, entries)
840 if (t->tag == tag)
841 break;
842 if (t != NULL)
843 t->ref++;
844 }
845
846 void
847 pf_tag_unref(u_int16_t tag)
848 {
849 return (tag_unref(&pf_tags, tag));
850 }
851
852 int
853 pf_rtlabel_add(struct pf_addr_wrap *a)
854 {
855 #ifdef __OpenBSD__
856 if (a->type == PF_ADDR_RTLABEL &&
857 (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0)
858 return (-1);
859 #endif
860 return (0);
861 }
862
863 void
864 pf_rtlabel_remove(struct pf_addr_wrap *a)
865 {
866 #ifdef __OpenBSD__
867 if (a->type == PF_ADDR_RTLABEL)
868 rtlabel_unref(a->v.rtlabel);
869 #endif
870 }
871
872 void
873 pf_rtlabel_copyout(struct pf_addr_wrap *a)
874 {
875 #ifdef __OpenBSD__
876 const char *name;
877
878 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) {
879 if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL)
880 strlcpy(a->v.rtlabelname, "?",
881 sizeof(a->v.rtlabelname));
882 else
883 strlcpy(a->v.rtlabelname, name,
884 sizeof(a->v.rtlabelname));
885 }
886 #endif
887 }
888
889 #ifdef ALTQ
890 u_int32_t
891 pf_qname2qid(char *qname)
892 {
893 return ((u_int32_t)tagname2tag(&pf_qids, qname));
894 }
895
896 void
897 pf_qid2qname(u_int32_t qid, char *p)
898 {
899 return (tag2tagname(&pf_qids, (u_int16_t)qid, p));
900 }
901
902 void
903 pf_qid_unref(u_int32_t qid)
904 {
905 return (tag_unref(&pf_qids, (u_int16_t)qid));
906 }
907
908 int
909 pf_begin_altq(u_int32_t *ticket)
910 {
911 struct pf_altq *altq;
912 int error = 0;
913
914 /* Purge the old altq list */
915 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
916 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
917 if (altq->qname[0] == 0) {
918 /* detach and destroy the discipline */
919 error = altq_remove(altq);
920 } else
921 pf_qid_unref(altq->qid);
922 pool_put(&pf_altq_pl, altq);
923 }
924 if (error)
925 return (error);
926 *ticket = ++ticket_altqs_inactive;
927 altqs_inactive_open = 1;
928 return (0);
929 }
930
931 int
932 pf_rollback_altq(u_int32_t ticket)
933 {
934 struct pf_altq *altq;
935 int error = 0;
936
937 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
938 return (0);
939 /* Purge the old altq list */
940 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
941 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
942 if (altq->qname[0] == 0) {
943 /* detach and destroy the discipline */
944 error = altq_remove(altq);
945 } else
946 pf_qid_unref(altq->qid);
947 pool_put(&pf_altq_pl, altq);
948 }
949 altqs_inactive_open = 0;
950 return (error);
951 }
952
953 int
954 pf_commit_altq(u_int32_t ticket)
955 {
956 struct pf_altqqueue *old_altqs;
957 struct pf_altq *altq;
958 int s, err, error = 0;
959
960 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
961 return (EBUSY);
962
963 /* swap altqs, keep the old. */
964 s = splsoftnet();
965 old_altqs = pf_altqs_active;
966 pf_altqs_active = pf_altqs_inactive;
967 pf_altqs_inactive = old_altqs;
968 ticket_altqs_active = ticket_altqs_inactive;
969
970 /* Attach new disciplines */
971 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
972 if (altq->qname[0] == 0) {
973 /* attach the discipline */
974 error = altq_pfattach(altq);
975 if (error == 0 && pf_altq_running)
976 error = pf_enable_altq(altq);
977 if (error != 0) {
978 splx(s);
979 return (error);
980 }
981 }
982 }
983
984 /* Purge the old altq list */
985 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
986 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
987 if (altq->qname[0] == 0) {
988 /* detach and destroy the discipline */
989 if (pf_altq_running)
990 error = pf_disable_altq(altq);
991 err = altq_pfdetach(altq);
992 if (err != 0 && error == 0)
993 error = err;
994 err = altq_remove(altq);
995 if (err != 0 && error == 0)
996 error = err;
997 } else
998 pf_qid_unref(altq->qid);
999 pool_put(&pf_altq_pl, altq);
1000 }
1001 splx(s);
1002
1003 altqs_inactive_open = 0;
1004 return (error);
1005 }
1006
1007 int
1008 pf_enable_altq(struct pf_altq *altq)
1009 {
1010 struct ifnet *ifp;
1011 struct tb_profile tb;
1012 int s, error = 0;
1013
1014 if ((ifp = ifunit(altq->ifname)) == NULL)
1015 return (EINVAL);
1016
1017 if (ifp->if_snd.altq_type != ALTQT_NONE)
1018 error = altq_enable(&ifp->if_snd);
1019
1020 /* set tokenbucket regulator */
1021 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
1022 tb.rate = altq->ifbandwidth;
1023 tb.depth = altq->tbrsize;
1024 s = splimp();
1025 error = tbr_set(&ifp->if_snd, &tb);
1026 splx(s);
1027 }
1028
1029 return (error);
1030 }
1031
1032 int
1033 pf_disable_altq(struct pf_altq *altq)
1034 {
1035 struct ifnet *ifp;
1036 struct tb_profile tb;
1037 int s, error;
1038
1039 if ((ifp = ifunit(altq->ifname)) == NULL)
1040 return (EINVAL);
1041
1042 /*
1043 * when the discipline is no longer referenced, it was overridden
1044 * by a new one. if so, just return.
1045 */
1046 if (altq->altq_disc != ifp->if_snd.altq_disc)
1047 return (0);
1048
1049 error = altq_disable(&ifp->if_snd);
1050
1051 if (error == 0) {
1052 /* clear tokenbucket regulator */
1053 tb.rate = 0;
1054 s = splimp();
1055 error = tbr_set(&ifp->if_snd, &tb);
1056 splx(s);
1057 }
1058
1059 return (error);
1060 }
1061 #endif /* ALTQ */
1062
1063 int
1064 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
1065 {
1066 struct pf_ruleset *rs;
1067 struct pf_rule *rule;
1068
1069 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1070 return (EINVAL);
1071 rs = pf_find_or_create_ruleset(anchor);
1072 if (rs == NULL)
1073 return (EINVAL);
1074 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL)
1075 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
1076 *ticket = ++rs->rules[rs_num].inactive.ticket;
1077 rs->rules[rs_num].inactive.open = 1;
1078 return (0);
1079 }
1080
1081 int
1082 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
1083 {
1084 struct pf_ruleset *rs;
1085 struct pf_rule *rule;
1086
1087 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1088 return (EINVAL);
1089 rs = pf_find_ruleset(anchor);
1090 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1091 rs->rules[rs_num].inactive.ticket != ticket)
1092 return (0);
1093 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL)
1094 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
1095 rs->rules[rs_num].inactive.open = 0;
1096 return (0);
1097 }
1098
1099 int
1100 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1101 {
1102 struct pf_ruleset *rs;
1103 struct pf_rule *rule;
1104 struct pf_rulequeue *old_rules;
1105 int s;
1106
1107 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1108 return (EINVAL);
1109 rs = pf_find_ruleset(anchor);
1110 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1111 ticket != rs->rules[rs_num].inactive.ticket)
1112 return (EBUSY);
1113
1114 /* Swap rules, keep the old. */
1115 s = splsoftnet();
1116 old_rules = rs->rules[rs_num].active.ptr;
1117 rs->rules[rs_num].active.ptr =
1118 rs->rules[rs_num].inactive.ptr;
1119 rs->rules[rs_num].inactive.ptr = old_rules;
1120 rs->rules[rs_num].active.ticket =
1121 rs->rules[rs_num].inactive.ticket;
1122 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1123
1124 /* Purge the old rule list. */
1125 while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1126 pf_rm_rule(old_rules, rule);
1127 rs->rules[rs_num].inactive.open = 0;
1128 pf_remove_if_empty_ruleset(rs);
1129 splx(s);
1130 return (0);
1131 }
1132
1133 int
1134 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct lwp *l)
1135 {
1136 struct pf_pooladdr *pa = NULL;
1137 struct pf_pool *pool = NULL;
1138 int s;
1139 int error = 0;
1140
1141 /* XXX keep in sync with switch() below */
1142 if (kauth_authorize_network(l->l_cred, KAUTH_NETWORK_FIREWALL,
1143 KAUTH_REQ_NETWORK_FIREWALL_FW, NULL, NULL, NULL))
1144 switch (cmd) {
1145 case DIOCGETRULES:
1146 case DIOCGETRULE:
1147 case DIOCGETADDRS:
1148 case DIOCGETADDR:
1149 case DIOCGETSTATE:
1150 case DIOCSETSTATUSIF:
1151 case DIOCGETSTATUS:
1152 case DIOCCLRSTATUS:
1153 case DIOCNATLOOK:
1154 case DIOCSETDEBUG:
1155 case DIOCGETSTATES:
1156 case DIOCGETTIMEOUT:
1157 case DIOCCLRRULECTRS:
1158 case DIOCGETLIMIT:
1159 case DIOCGETALTQS:
1160 case DIOCGETALTQ:
1161 case DIOCGETQSTATS:
1162 case DIOCGETRULESETS:
1163 case DIOCGETRULESET:
1164 case DIOCRGETTABLES:
1165 case DIOCRGETTSTATS:
1166 case DIOCRCLRTSTATS:
1167 case DIOCRCLRADDRS:
1168 case DIOCRADDADDRS:
1169 case DIOCRDELADDRS:
1170 case DIOCRSETADDRS:
1171 case DIOCRGETADDRS:
1172 case DIOCRGETASTATS:
1173 case DIOCRCLRASTATS:
1174 case DIOCRTSTADDRS:
1175 case DIOCOSFPGET:
1176 case DIOCGETSRCNODES:
1177 case DIOCCLRSRCNODES:
1178 case DIOCIGETIFACES:
1179 case DIOCICLRISTATS:
1180 case DIOCSETIFFLAG:
1181 case DIOCCLRIFFLAG:
1182 break;
1183 case DIOCRCLRTABLES:
1184 case DIOCRADDTABLES:
1185 case DIOCRDELTABLES:
1186 case DIOCRSETTFLAGS:
1187 if (((struct pfioc_table *)addr)->pfrio_flags &
1188 PFR_FLAG_DUMMY)
1189 break; /* dummy operation ok */
1190 return (EPERM);
1191 default:
1192 return (EPERM);
1193 }
1194
1195 if (!(flags & FWRITE))
1196 switch (cmd) {
1197 case DIOCGETRULES:
1198 case DIOCGETRULE:
1199 case DIOCGETADDRS:
1200 case DIOCGETADDR:
1201 case DIOCGETSTATE:
1202 case DIOCGETSTATUS:
1203 case DIOCGETSTATES:
1204 case DIOCGETTIMEOUT:
1205 case DIOCGETLIMIT:
1206 case DIOCGETALTQS:
1207 case DIOCGETALTQ:
1208 case DIOCGETQSTATS:
1209 case DIOCGETRULESETS:
1210 case DIOCGETRULESET:
1211 case DIOCRGETTABLES:
1212 case DIOCRGETTSTATS:
1213 case DIOCRGETADDRS:
1214 case DIOCRGETASTATS:
1215 case DIOCRTSTADDRS:
1216 case DIOCOSFPGET:
1217 case DIOCGETSRCNODES:
1218 case DIOCIGETIFACES:
1219 break;
1220 case DIOCRCLRTABLES:
1221 case DIOCRADDTABLES:
1222 case DIOCRDELTABLES:
1223 case DIOCRCLRTSTATS:
1224 case DIOCRCLRADDRS:
1225 case DIOCRADDADDRS:
1226 case DIOCRDELADDRS:
1227 case DIOCRSETADDRS:
1228 case DIOCRSETTFLAGS:
1229 if (((struct pfioc_table *)addr)->pfrio_flags &
1230 PFR_FLAG_DUMMY)
1231 break; /* dummy operation ok */
1232 return (EACCES);
1233 default:
1234 return (EACCES);
1235 }
1236
1237 s = splsoftnet();
1238 switch (cmd) {
1239
1240 case DIOCSTART:
1241 if (pf_status.running)
1242 error = EEXIST;
1243 else {
1244 #ifdef __NetBSD__
1245 error = pf_pfil_attach();
1246 if (error)
1247 break;
1248 #endif
1249 pf_status.running = 1;
1250 pf_status.since = time_second;
1251 if (pf_status.stateid == 0) {
1252 pf_status.stateid = time_second;
1253 pf_status.stateid = pf_status.stateid << 32;
1254 }
1255 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1256 }
1257 break;
1258
1259 case DIOCSTOP:
1260 if (!pf_status.running)
1261 error = ENOENT;
1262 else {
1263 #ifdef __NetBSD__
1264 error = pf_pfil_detach();
1265 if (error)
1266 break;
1267 #endif
1268 pf_status.running = 0;
1269 pf_status.since = time_second;
1270 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1271 }
1272 break;
1273
1274 case DIOCADDRULE: {
1275 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1276 struct pf_ruleset *ruleset;
1277 struct pf_rule *rule, *tail;
1278 struct pf_pooladdr *pa;
1279 int rs_num;
1280
1281 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1282 ruleset = pf_find_ruleset(pr->anchor);
1283 if (ruleset == NULL) {
1284 error = EINVAL;
1285 break;
1286 }
1287 rs_num = pf_get_ruleset_number(pr->rule.action);
1288 if (rs_num >= PF_RULESET_MAX) {
1289 error = EINVAL;
1290 break;
1291 }
1292 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1293 error = EINVAL;
1294 break;
1295 }
1296 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
1297 error = EBUSY;
1298 break;
1299 }
1300 if (pr->pool_ticket != ticket_pabuf) {
1301 error = EBUSY;
1302 break;
1303 }
1304 rule = pool_get(&pf_rule_pl, PR_NOWAIT);
1305 if (rule == NULL) {
1306 error = ENOMEM;
1307 break;
1308 }
1309 bcopy(&pr->rule, rule, sizeof(struct pf_rule));
1310 rule->anchor = NULL;
1311 rule->kif = NULL;
1312 TAILQ_INIT(&rule->rpool.list);
1313 /* initialize refcounting */
1314 rule->states = 0;
1315 rule->src_nodes = 0;
1316 rule->entries.tqe_prev = NULL;
1317 #ifndef INET
1318 if (rule->af == AF_INET) {
1319 pool_put(&pf_rule_pl, rule);
1320 error = EAFNOSUPPORT;
1321 break;
1322 }
1323 #endif /* INET */
1324 #ifndef INET6
1325 if (rule->af == AF_INET6) {
1326 pool_put(&pf_rule_pl, rule);
1327 error = EAFNOSUPPORT;
1328 break;
1329 }
1330 #endif /* INET6 */
1331 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
1332 pf_rulequeue);
1333 if (tail)
1334 rule->nr = tail->nr + 1;
1335 else
1336 rule->nr = 0;
1337 if (rule->ifname[0]) {
1338 rule->kif = pfi_attach_rule(rule->ifname);
1339 if (rule->kif == NULL) {
1340 pool_put(&pf_rule_pl, rule);
1341 error = EINVAL;
1342 break;
1343 }
1344 }
1345
1346 #ifdef ALTQ
1347 /* set queue IDs */
1348 if (rule->qname[0] != 0) {
1349 if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
1350 error = EBUSY;
1351 else if (rule->pqname[0] != 0) {
1352 if ((rule->pqid =
1353 pf_qname2qid(rule->pqname)) == 0)
1354 error = EBUSY;
1355 } else
1356 rule->pqid = rule->qid;
1357 }
1358 #endif
1359 if (rule->tagname[0])
1360 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
1361 error = EBUSY;
1362 if (rule->match_tagname[0])
1363 if ((rule->match_tag =
1364 pf_tagname2tag(rule->match_tagname)) == 0)
1365 error = EBUSY;
1366 if (rule->rt && !rule->direction)
1367 error = EINVAL;
1368 if (pf_rtlabel_add(&rule->src.addr) ||
1369 pf_rtlabel_add(&rule->dst.addr))
1370 error = EBUSY;
1371 if (pfi_dynaddr_setup(&rule->src.addr, rule->af))
1372 error = EINVAL;
1373 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af))
1374 error = EINVAL;
1375 if (pf_tbladdr_setup(ruleset, &rule->src.addr))
1376 error = EINVAL;
1377 if (pf_tbladdr_setup(ruleset, &rule->dst.addr))
1378 error = EINVAL;
1379 if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1380 error = EINVAL;
1381 TAILQ_FOREACH(pa, &pf_pabuf, entries)
1382 if (pf_tbladdr_setup(ruleset, &pa->addr))
1383 error = EINVAL;
1384
1385 if (rule->overload_tblname[0]) {
1386 if ((rule->overload_tbl = pfr_attach_table(ruleset,
1387 rule->overload_tblname)) == NULL)
1388 error = EINVAL;
1389 else
1390 rule->overload_tbl->pfrkt_flags |=
1391 PFR_TFLAG_ACTIVE;
1392 }
1393
1394 pf_mv_pool(&pf_pabuf, &rule->rpool.list);
1395 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
1396 (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
1397 (rule->rt > PF_FASTROUTE)) &&
1398 (TAILQ_FIRST(&rule->rpool.list) == NULL))
1399 error = EINVAL;
1400
1401 if (error) {
1402 pf_rm_rule(NULL, rule);
1403 break;
1404 }
1405 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
1406 rule->evaluations = rule->packets = rule->bytes = 0;
1407 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
1408 rule, entries);
1409 break;
1410 }
1411
1412 case DIOCGETRULES: {
1413 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1414 struct pf_ruleset *ruleset;
1415 struct pf_rule *tail;
1416 int rs_num;
1417
1418 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1419 ruleset = pf_find_ruleset(pr->anchor);
1420 if (ruleset == NULL) {
1421 error = EINVAL;
1422 break;
1423 }
1424 rs_num = pf_get_ruleset_number(pr->rule.action);
1425 if (rs_num >= PF_RULESET_MAX) {
1426 error = EINVAL;
1427 break;
1428 }
1429 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
1430 pf_rulequeue);
1431 if (tail)
1432 pr->nr = tail->nr + 1;
1433 else
1434 pr->nr = 0;
1435 pr->ticket = ruleset->rules[rs_num].active.ticket;
1436 break;
1437 }
1438
1439 case DIOCGETRULE: {
1440 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1441 struct pf_ruleset *ruleset;
1442 struct pf_rule *rule;
1443 int rs_num, i;
1444
1445 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1446 ruleset = pf_find_ruleset(pr->anchor);
1447 if (ruleset == NULL) {
1448 error = EINVAL;
1449 break;
1450 }
1451 rs_num = pf_get_ruleset_number(pr->rule.action);
1452 if (rs_num >= PF_RULESET_MAX) {
1453 error = EINVAL;
1454 break;
1455 }
1456 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
1457 error = EBUSY;
1458 break;
1459 }
1460 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
1461 while ((rule != NULL) && (rule->nr != pr->nr))
1462 rule = TAILQ_NEXT(rule, entries);
1463 if (rule == NULL) {
1464 error = EBUSY;
1465 break;
1466 }
1467 bcopy(rule, &pr->rule, sizeof(struct pf_rule));
1468 if (pf_anchor_copyout(ruleset, rule, pr)) {
1469 error = EBUSY;
1470 break;
1471 }
1472 pfi_dynaddr_copyout(&pr->rule.src.addr);
1473 pfi_dynaddr_copyout(&pr->rule.dst.addr);
1474 pf_tbladdr_copyout(&pr->rule.src.addr);
1475 pf_tbladdr_copyout(&pr->rule.dst.addr);
1476 pf_rtlabel_copyout(&pr->rule.src.addr);
1477 pf_rtlabel_copyout(&pr->rule.dst.addr);
1478 for (i = 0; i < PF_SKIP_COUNT; ++i)
1479 if (rule->skip[i].ptr == NULL)
1480 pr->rule.skip[i].nr = -1;
1481 else
1482 pr->rule.skip[i].nr =
1483 rule->skip[i].ptr->nr;
1484 break;
1485 }
1486
1487 case DIOCCHANGERULE: {
1488 struct pfioc_rule *pcr = (struct pfioc_rule *)addr;
1489 struct pf_ruleset *ruleset;
1490 struct pf_rule *oldrule = NULL, *newrule = NULL;
1491 u_int32_t nr = 0;
1492 int rs_num;
1493
1494 if (!(pcr->action == PF_CHANGE_REMOVE ||
1495 pcr->action == PF_CHANGE_GET_TICKET) &&
1496 pcr->pool_ticket != ticket_pabuf) {
1497 error = EBUSY;
1498 break;
1499 }
1500
1501 if (pcr->action < PF_CHANGE_ADD_HEAD ||
1502 pcr->action > PF_CHANGE_GET_TICKET) {
1503 error = EINVAL;
1504 break;
1505 }
1506 ruleset = pf_find_ruleset(pcr->anchor);
1507 if (ruleset == NULL) {
1508 error = EINVAL;
1509 break;
1510 }
1511 rs_num = pf_get_ruleset_number(pcr->rule.action);
1512 if (rs_num >= PF_RULESET_MAX) {
1513 error = EINVAL;
1514 break;
1515 }
1516
1517 if (pcr->action == PF_CHANGE_GET_TICKET) {
1518 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
1519 break;
1520 } else {
1521 if (pcr->ticket !=
1522 ruleset->rules[rs_num].active.ticket) {
1523 error = EINVAL;
1524 break;
1525 }
1526 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1527 error = EINVAL;
1528 break;
1529 }
1530 }
1531
1532 if (pcr->action != PF_CHANGE_REMOVE) {
1533 newrule = pool_get(&pf_rule_pl, PR_NOWAIT);
1534 if (newrule == NULL) {
1535 error = ENOMEM;
1536 break;
1537 }
1538 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule));
1539 TAILQ_INIT(&newrule->rpool.list);
1540 /* initialize refcounting */
1541 newrule->states = 0;
1542 newrule->entries.tqe_prev = NULL;
1543 #ifndef INET
1544 if (newrule->af == AF_INET) {
1545 pool_put(&pf_rule_pl, newrule);
1546 error = EAFNOSUPPORT;
1547 break;
1548 }
1549 #endif /* INET */
1550 #ifndef INET6
1551 if (newrule->af == AF_INET6) {
1552 pool_put(&pf_rule_pl, newrule);
1553 error = EAFNOSUPPORT;
1554 break;
1555 }
1556 #endif /* INET6 */
1557 if (newrule->ifname[0]) {
1558 newrule->kif = pfi_attach_rule(newrule->ifname);
1559 if (newrule->kif == NULL) {
1560 pool_put(&pf_rule_pl, newrule);
1561 error = EINVAL;
1562 break;
1563 }
1564 } else
1565 newrule->kif = NULL;
1566
1567 #ifdef ALTQ
1568 /* set queue IDs */
1569 if (newrule->qname[0] != 0) {
1570 if ((newrule->qid =
1571 pf_qname2qid(newrule->qname)) == 0)
1572 error = EBUSY;
1573 else if (newrule->pqname[0] != 0) {
1574 if ((newrule->pqid =
1575 pf_qname2qid(newrule->pqname)) == 0)
1576 error = EBUSY;
1577 } else
1578 newrule->pqid = newrule->qid;
1579 }
1580 #endif /* ALTQ */
1581 if (newrule->tagname[0])
1582 if ((newrule->tag =
1583 pf_tagname2tag(newrule->tagname)) == 0)
1584 error = EBUSY;
1585 if (newrule->match_tagname[0])
1586 if ((newrule->match_tag = pf_tagname2tag(
1587 newrule->match_tagname)) == 0)
1588 error = EBUSY;
1589 if (newrule->rt && !newrule->direction)
1590 error = EINVAL;
1591 if (pf_rtlabel_add(&newrule->src.addr) ||
1592 pf_rtlabel_add(&newrule->dst.addr))
1593 error = EBUSY;
1594 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af))
1595 error = EINVAL;
1596 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af))
1597 error = EINVAL;
1598 if (pf_tbladdr_setup(ruleset, &newrule->src.addr))
1599 error = EINVAL;
1600 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr))
1601 error = EINVAL;
1602 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1603 error = EINVAL;
1604
1605 if (newrule->overload_tblname[0]) {
1606 if ((newrule->overload_tbl = pfr_attach_table(
1607 ruleset, newrule->overload_tblname)) ==
1608 NULL)
1609 error = EINVAL;
1610 else
1611 newrule->overload_tbl->pfrkt_flags |=
1612 PFR_TFLAG_ACTIVE;
1613 }
1614
1615 pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
1616 if (((((newrule->action == PF_NAT) ||
1617 (newrule->action == PF_RDR) ||
1618 (newrule->action == PF_BINAT) ||
1619 (newrule->rt > PF_FASTROUTE)) &&
1620 !pcr->anchor[0])) &&
1621 (TAILQ_FIRST(&newrule->rpool.list) == NULL))
1622 error = EINVAL;
1623
1624 if (error) {
1625 pf_rm_rule(NULL, newrule);
1626 break;
1627 }
1628 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
1629 newrule->evaluations = newrule->packets = 0;
1630 newrule->bytes = 0;
1631 }
1632 pf_empty_pool(&pf_pabuf);
1633
1634 if (pcr->action == PF_CHANGE_ADD_HEAD)
1635 oldrule = TAILQ_FIRST(
1636 ruleset->rules[rs_num].active.ptr);
1637 else if (pcr->action == PF_CHANGE_ADD_TAIL)
1638 oldrule = TAILQ_LAST(
1639 ruleset->rules[rs_num].active.ptr, pf_rulequeue);
1640 else {
1641 oldrule = TAILQ_FIRST(
1642 ruleset->rules[rs_num].active.ptr);
1643 while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1644 oldrule = TAILQ_NEXT(oldrule, entries);
1645 if (oldrule == NULL) {
1646 if (newrule != NULL)
1647 pf_rm_rule(NULL, newrule);
1648 error = EINVAL;
1649 break;
1650 }
1651 }
1652
1653 if (pcr->action == PF_CHANGE_REMOVE)
1654 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
1655 else {
1656 if (oldrule == NULL)
1657 TAILQ_INSERT_TAIL(
1658 ruleset->rules[rs_num].active.ptr,
1659 newrule, entries);
1660 else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1661 pcr->action == PF_CHANGE_ADD_BEFORE)
1662 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1663 else
1664 TAILQ_INSERT_AFTER(
1665 ruleset->rules[rs_num].active.ptr,
1666 oldrule, newrule, entries);
1667 }
1668
1669 nr = 0;
1670 TAILQ_FOREACH(oldrule,
1671 ruleset->rules[rs_num].active.ptr, entries)
1672 oldrule->nr = nr++;
1673
1674 ruleset->rules[rs_num].active.ticket++;
1675
1676 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
1677 pf_remove_if_empty_ruleset(ruleset);
1678
1679 break;
1680 }
1681
1682 case DIOCCLRSTATES: {
1683 struct pf_state *state;
1684 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1685 int killed = 0;
1686
1687 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1688 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1689 state->u.s.kif->pfik_name)) {
1690 state->timeout = PFTM_PURGE;
1691 #if NPFSYNC
1692 /* don't send out individual delete messages */
1693 state->sync_flags = PFSTATE_NOSYNC;
1694 #endif
1695 killed++;
1696 }
1697 }
1698 pf_purge_expired_states();
1699 pf_status.states = 0;
1700 psk->psk_af = killed;
1701 #if NPFSYNC
1702 pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
1703 #endif
1704 break;
1705 }
1706
1707 case DIOCKILLSTATES: {
1708 struct pf_state *state;
1709 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1710 int killed = 0;
1711
1712 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1713 if ((!psk->psk_af || state->af == psk->psk_af)
1714 && (!psk->psk_proto || psk->psk_proto ==
1715 state->proto) &&
1716 PF_MATCHA(psk->psk_src.neg,
1717 &psk->psk_src.addr.v.a.addr,
1718 &psk->psk_src.addr.v.a.mask,
1719 &state->lan.addr, state->af) &&
1720 PF_MATCHA(psk->psk_dst.neg,
1721 &psk->psk_dst.addr.v.a.addr,
1722 &psk->psk_dst.addr.v.a.mask,
1723 &state->ext.addr, state->af) &&
1724 (psk->psk_src.port_op == 0 ||
1725 pf_match_port(psk->psk_src.port_op,
1726 psk->psk_src.port[0], psk->psk_src.port[1],
1727 state->lan.port)) &&
1728 (psk->psk_dst.port_op == 0 ||
1729 pf_match_port(psk->psk_dst.port_op,
1730 psk->psk_dst.port[0], psk->psk_dst.port[1],
1731 state->ext.port)) &&
1732 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1733 state->u.s.kif->pfik_name))) {
1734 state->timeout = PFTM_PURGE;
1735 killed++;
1736 }
1737 }
1738 pf_purge_expired_states();
1739 psk->psk_af = killed;
1740 break;
1741 }
1742
1743 case DIOCADDSTATE: {
1744 struct pfioc_state *ps = (struct pfioc_state *)addr;
1745 struct pf_state *state;
1746 struct pfi_kif *kif;
1747
1748 if (ps->state.timeout >= PFTM_MAX &&
1749 ps->state.timeout != PFTM_UNTIL_PACKET) {
1750 error = EINVAL;
1751 break;
1752 }
1753 state = pool_get(&pf_state_pl, PR_NOWAIT);
1754 if (state == NULL) {
1755 error = ENOMEM;
1756 break;
1757 }
1758 kif = pfi_lookup_create(ps->state.u.ifname);
1759 if (kif == NULL) {
1760 pool_put(&pf_state_pl, state);
1761 error = ENOENT;
1762 break;
1763 }
1764 bcopy(&ps->state, state, sizeof(struct pf_state));
1765 bzero(&state->u, sizeof(state->u));
1766 state->rule.ptr = &pf_default_rule;
1767 state->nat_rule.ptr = NULL;
1768 state->anchor.ptr = NULL;
1769 state->rt_kif = NULL;
1770 state->creation = time_second;
1771 state->pfsync_time = 0;
1772 state->packets[0] = state->packets[1] = 0;
1773 state->bytes[0] = state->bytes[1] = 0;
1774
1775 if (pf_insert_state(kif, state)) {
1776 pfi_maybe_destroy(kif);
1777 pool_put(&pf_state_pl, state);
1778 error = ENOMEM;
1779 }
1780 break;
1781 }
1782
1783 case DIOCGETSTATE: {
1784 struct pfioc_state *ps = (struct pfioc_state *)addr;
1785 struct pf_state *state;
1786 u_int32_t nr;
1787
1788 nr = 0;
1789 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1790 if (nr >= ps->nr)
1791 break;
1792 nr++;
1793 }
1794 if (state == NULL) {
1795 error = EBUSY;
1796 break;
1797 }
1798 bcopy(state, &ps->state, sizeof(struct pf_state));
1799 ps->state.rule.nr = state->rule.ptr->nr;
1800 ps->state.nat_rule.nr = (state->nat_rule.ptr == NULL) ?
1801 -1 : state->nat_rule.ptr->nr;
1802 ps->state.anchor.nr = (state->anchor.ptr == NULL) ?
1803 -1 : state->anchor.ptr->nr;
1804 ps->state.expire = pf_state_expires(state);
1805 if (ps->state.expire > time_second)
1806 ps->state.expire -= time_second;
1807 else
1808 ps->state.expire = 0;
1809 break;
1810 }
1811
1812 case DIOCGETSTATES: {
1813 struct pfioc_states *ps = (struct pfioc_states *)addr;
1814 struct pf_state *state;
1815 struct pf_state *p, pstore;
1816 struct pfi_kif *kif;
1817 u_int32_t nr = 0;
1818 int space = ps->ps_len;
1819
1820 if (space == 0) {
1821 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states)
1822 nr += kif->pfik_states;
1823 ps->ps_len = sizeof(struct pf_state) * nr;
1824 break;
1825 }
1826
1827 p = ps->ps_states;
1828 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states)
1829 RB_FOREACH(state, pf_state_tree_ext_gwy,
1830 &kif->pfik_ext_gwy) {
1831 int secs = time_second;
1832
1833 if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len)
1834 break;
1835
1836 bcopy(state, &pstore, sizeof(pstore));
1837 strlcpy(pstore.u.ifname, kif->pfik_name,
1838 sizeof(pstore.u.ifname));
1839 pstore.rule.nr = state->rule.ptr->nr;
1840 pstore.nat_rule.nr = (state->nat_rule.ptr ==
1841 NULL) ? -1 : state->nat_rule.ptr->nr;
1842 pstore.anchor.nr = (state->anchor.ptr ==
1843 NULL) ? -1 : state->anchor.ptr->nr;
1844 pstore.creation = secs - pstore.creation;
1845 pstore.expire = pf_state_expires(state);
1846 if (pstore.expire > secs)
1847 pstore.expire -= secs;
1848 else
1849 pstore.expire = 0;
1850 error = copyout(&pstore, p, sizeof(*p));
1851 if (error)
1852 goto fail;
1853 p++;
1854 nr++;
1855 }
1856 ps->ps_len = sizeof(struct pf_state) * nr;
1857 break;
1858 }
1859
1860 case DIOCGETSTATUS: {
1861 struct pf_status *s = (struct pf_status *)addr;
1862 bcopy(&pf_status, s, sizeof(struct pf_status));
1863 pfi_fill_oldstatus(s);
1864 break;
1865 }
1866
1867 case DIOCSETSTATUSIF: {
1868 struct pfioc_if *pi = (struct pfioc_if *)addr;
1869
1870 if (pi->ifname[0] == 0) {
1871 bzero(pf_status.ifname, IFNAMSIZ);
1872 break;
1873 }
1874 if (ifunit(pi->ifname) == NULL) {
1875 error = EINVAL;
1876 break;
1877 }
1878 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
1879 break;
1880 }
1881
1882 case DIOCCLRSTATUS: {
1883 bzero(pf_status.counters, sizeof(pf_status.counters));
1884 bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
1885 bzero(pf_status.scounters, sizeof(pf_status.scounters));
1886 if (*pf_status.ifname)
1887 pfi_clr_istats(pf_status.ifname, NULL,
1888 PFI_FLAG_INSTANCE);
1889 break;
1890 }
1891
1892 case DIOCNATLOOK: {
1893 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr;
1894 struct pf_state *state;
1895 struct pf_state key;
1896 int m = 0, direction = pnl->direction;
1897
1898 key.af = pnl->af;
1899 key.proto = pnl->proto;
1900
1901 if (!pnl->proto ||
1902 PF_AZERO(&pnl->saddr, pnl->af) ||
1903 PF_AZERO(&pnl->daddr, pnl->af) ||
1904 !pnl->dport || !pnl->sport)
1905 error = EINVAL;
1906 else {
1907 /*
1908 * userland gives us source and dest of connection,
1909 * reverse the lookup so we ask for what happens with
1910 * the return traffic, enabling us to find it in the
1911 * state tree.
1912 */
1913 if (direction == PF_IN) {
1914 PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af);
1915 key.ext.port = pnl->dport;
1916 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af);
1917 key.gwy.port = pnl->sport;
1918 state = pf_find_state_all(&key, PF_EXT_GWY, &m);
1919 } else {
1920 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af);
1921 key.lan.port = pnl->dport;
1922 PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af);
1923 key.ext.port = pnl->sport;
1924 state = pf_find_state_all(&key, PF_LAN_EXT, &m);
1925 }
1926 if (m > 1)
1927 error = E2BIG; /* more than one state */
1928 else if (state != NULL) {
1929 if (direction == PF_IN) {
1930 PF_ACPY(&pnl->rsaddr, &state->lan.addr,
1931 state->af);
1932 pnl->rsport = state->lan.port;
1933 PF_ACPY(&pnl->rdaddr, &pnl->daddr,
1934 pnl->af);
1935 pnl->rdport = pnl->dport;
1936 } else {
1937 PF_ACPY(&pnl->rdaddr, &state->gwy.addr,
1938 state->af);
1939 pnl->rdport = state->gwy.port;
1940 PF_ACPY(&pnl->rsaddr, &pnl->saddr,
1941 pnl->af);
1942 pnl->rsport = pnl->sport;
1943 }
1944 } else
1945 error = ENOENT;
1946 }
1947 break;
1948 }
1949
1950 case DIOCSETTIMEOUT: {
1951 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1952 int old;
1953
1954 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
1955 pt->seconds < 0) {
1956 error = EINVAL;
1957 goto fail;
1958 }
1959 old = pf_default_rule.timeout[pt->timeout];
1960 pf_default_rule.timeout[pt->timeout] = pt->seconds;
1961 pt->seconds = old;
1962 break;
1963 }
1964
1965 case DIOCGETTIMEOUT: {
1966 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1967
1968 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
1969 error = EINVAL;
1970 goto fail;
1971 }
1972 pt->seconds = pf_default_rule.timeout[pt->timeout];
1973 break;
1974 }
1975
1976 case DIOCGETLIMIT: {
1977 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
1978
1979 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
1980 error = EINVAL;
1981 goto fail;
1982 }
1983 pl->limit = pf_pool_limits[pl->index].limit;
1984 break;
1985 }
1986
1987 case DIOCSETLIMIT: {
1988 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
1989 int old_limit;
1990
1991 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
1992 pf_pool_limits[pl->index].pp == NULL) {
1993 error = EINVAL;
1994 goto fail;
1995 }
1996 #ifdef __OpenBSD__
1997 if (pool_sethardlimit(pf_pool_limits[pl->index].pp,
1998 pl->limit, NULL, 0) != 0) {
1999 error = EBUSY;
2000 goto fail;
2001 }
2002 #else
2003 pool_sethardlimit(pf_pool_limits[pl->index].pp,
2004 pl->limit, NULL, 0);
2005 #endif
2006 old_limit = pf_pool_limits[pl->index].limit;
2007 pf_pool_limits[pl->index].limit = pl->limit;
2008 pl->limit = old_limit;
2009 break;
2010 }
2011
2012 case DIOCSETDEBUG: {
2013 u_int32_t *level = (u_int32_t *)addr;
2014
2015 pf_status.debug = *level;
2016 break;
2017 }
2018
2019 case DIOCCLRRULECTRS: {
2020 struct pf_ruleset *ruleset = &pf_main_ruleset;
2021 struct pf_rule *rule;
2022
2023 TAILQ_FOREACH(rule,
2024 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries)
2025 rule->evaluations = rule->packets =
2026 rule->bytes = 0;
2027 break;
2028 }
2029
2030 #ifdef ALTQ
2031 case DIOCSTARTALTQ: {
2032 struct pf_altq *altq;
2033
2034 /* enable all altq interfaces on active list */
2035 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
2036 if (altq->qname[0] == 0) {
2037 error = pf_enable_altq(altq);
2038 if (error != 0)
2039 break;
2040 }
2041 }
2042 if (error == 0)
2043 pf_altq_running = 1;
2044 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
2045 break;
2046 }
2047
2048 case DIOCSTOPALTQ: {
2049 struct pf_altq *altq;
2050
2051 /* disable all altq interfaces on active list */
2052 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
2053 if (altq->qname[0] == 0) {
2054 error = pf_disable_altq(altq);
2055 if (error != 0)
2056 break;
2057 }
2058 }
2059 if (error == 0)
2060 pf_altq_running = 0;
2061 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
2062 break;
2063 }
2064
2065 case DIOCADDALTQ: {
2066 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2067 struct pf_altq *altq, *a;
2068
2069 if (pa->ticket != ticket_altqs_inactive) {
2070 error = EBUSY;
2071 break;
2072 }
2073 altq = pool_get(&pf_altq_pl, PR_NOWAIT);
2074 if (altq == NULL) {
2075 error = ENOMEM;
2076 break;
2077 }
2078 bcopy(&pa->altq, altq, sizeof(struct pf_altq));
2079
2080 /*
2081 * if this is for a queue, find the discipline and
2082 * copy the necessary fields
2083 */
2084 if (altq->qname[0] != 0) {
2085 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
2086 error = EBUSY;
2087 pool_put(&pf_altq_pl, altq);
2088 break;
2089 }
2090 TAILQ_FOREACH(a, pf_altqs_inactive, entries) {
2091 if (strncmp(a->ifname, altq->ifname,
2092 IFNAMSIZ) == 0 && a->qname[0] == 0) {
2093 altq->altq_disc = a->altq_disc;
2094 break;
2095 }
2096 }
2097 }
2098
2099 error = altq_add(altq);
2100 if (error) {
2101 pool_put(&pf_altq_pl, altq);
2102 break;
2103 }
2104
2105 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries);
2106 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2107 break;
2108 }
2109
2110 case DIOCGETALTQS: {
2111 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2112 struct pf_altq *altq;
2113
2114 pa->nr = 0;
2115 TAILQ_FOREACH(altq, pf_altqs_active, entries)
2116 pa->nr++;
2117 pa->ticket = ticket_altqs_active;
2118 break;
2119 }
2120
2121 case DIOCGETALTQ: {
2122 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2123 struct pf_altq *altq;
2124 u_int32_t nr;
2125
2126 if (pa->ticket != ticket_altqs_active) {
2127 error = EBUSY;
2128 break;
2129 }
2130 nr = 0;
2131 altq = TAILQ_FIRST(pf_altqs_active);
2132 while ((altq != NULL) && (nr < pa->nr)) {
2133 altq = TAILQ_NEXT(altq, entries);
2134 nr++;
2135 }
2136 if (altq == NULL) {
2137 error = EBUSY;
2138 break;
2139 }
2140 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2141 break;
2142 }
2143
2144 case DIOCCHANGEALTQ:
2145 /* CHANGEALTQ not supported yet! */
2146 error = ENODEV;
2147 break;
2148
2149 case DIOCGETQSTATS: {
2150 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr;
2151 struct pf_altq *altq;
2152 u_int32_t nr;
2153 int nbytes;
2154
2155 if (pq->ticket != ticket_altqs_active) {
2156 error = EBUSY;
2157 break;
2158 }
2159 nbytes = pq->nbytes;
2160 nr = 0;
2161 altq = TAILQ_FIRST(pf_altqs_active);
2162 while ((altq != NULL) && (nr < pq->nr)) {
2163 altq = TAILQ_NEXT(altq, entries);
2164 nr++;
2165 }
2166 if (altq == NULL) {
2167 error = EBUSY;
2168 break;
2169 }
2170 error = altq_getqstats(altq, pq->buf, &nbytes);
2171 if (error == 0) {
2172 pq->scheduler = altq->scheduler;
2173 pq->nbytes = nbytes;
2174 }
2175 break;
2176 }
2177 #endif /* ALTQ */
2178
2179 case DIOCBEGINADDRS: {
2180 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2181
2182 pf_empty_pool(&pf_pabuf);
2183 pp->ticket = ++ticket_pabuf;
2184 break;
2185 }
2186
2187 case DIOCADDADDR: {
2188 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2189
2190 #ifndef INET
2191 if (pp->af == AF_INET) {
2192 error = EAFNOSUPPORT;
2193 break;
2194 }
2195 #endif /* INET */
2196 #ifndef INET6
2197 if (pp->af == AF_INET6) {
2198 error = EAFNOSUPPORT;
2199 break;
2200 }
2201 #endif /* INET6 */
2202 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2203 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2204 pp->addr.addr.type != PF_ADDR_TABLE) {
2205 error = EINVAL;
2206 break;
2207 }
2208 pa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
2209 if (pa == NULL) {
2210 error = ENOMEM;
2211 break;
2212 }
2213 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr));
2214 if (pa->ifname[0]) {
2215 pa->kif = pfi_attach_rule(pa->ifname);
2216 if (pa->kif == NULL) {
2217 pool_put(&pf_pooladdr_pl, pa);
2218 error = EINVAL;
2219 break;
2220 }
2221 }
2222 if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
2223 pfi_dynaddr_remove(&pa->addr);
2224 pfi_detach_rule(pa->kif);
2225 pool_put(&pf_pooladdr_pl, pa);
2226 error = EINVAL;
2227 break;
2228 }
2229 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
2230 break;
2231 }
2232
2233 case DIOCGETADDRS: {
2234 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2235
2236 pp->nr = 0;
2237 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2238 pp->r_num, 0, 1, 0);
2239 if (pool == NULL) {
2240 error = EBUSY;
2241 break;
2242 }
2243 TAILQ_FOREACH(pa, &pool->list, entries)
2244 pp->nr++;
2245 break;
2246 }
2247
2248 case DIOCGETADDR: {
2249 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2250 u_int32_t nr = 0;
2251
2252 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2253 pp->r_num, 0, 1, 1);
2254 if (pool == NULL) {
2255 error = EBUSY;
2256 break;
2257 }
2258 pa = TAILQ_FIRST(&pool->list);
2259 while ((pa != NULL) && (nr < pp->nr)) {
2260 pa = TAILQ_NEXT(pa, entries);
2261 nr++;
2262 }
2263 if (pa == NULL) {
2264 error = EBUSY;
2265 break;
2266 }
2267 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr));
2268 pfi_dynaddr_copyout(&pp->addr.addr);
2269 pf_tbladdr_copyout(&pp->addr.addr);
2270 pf_rtlabel_copyout(&pp->addr.addr);
2271 break;
2272 }
2273
2274 case DIOCCHANGEADDR: {
2275 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr;
2276 struct pf_pooladdr *oldpa = NULL, *newpa = NULL;
2277 struct pf_ruleset *ruleset;
2278
2279 if (pca->action < PF_CHANGE_ADD_HEAD ||
2280 pca->action > PF_CHANGE_REMOVE) {
2281 error = EINVAL;
2282 break;
2283 }
2284 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
2285 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
2286 pca->addr.addr.type != PF_ADDR_TABLE) {
2287 error = EINVAL;
2288 break;
2289 }
2290
2291 ruleset = pf_find_ruleset(pca->anchor);
2292 if (ruleset == NULL) {
2293 error = EBUSY;
2294 break;
2295 }
2296 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
2297 pca->r_num, pca->r_last, 1, 1);
2298 if (pool == NULL) {
2299 error = EBUSY;
2300 break;
2301 }
2302 if (pca->action != PF_CHANGE_REMOVE) {
2303 newpa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
2304 if (newpa == NULL) {
2305 error = ENOMEM;
2306 break;
2307 }
2308 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
2309 #ifndef INET
2310 if (pca->af == AF_INET) {
2311 pool_put(&pf_pooladdr_pl, newpa);
2312 error = EAFNOSUPPORT;
2313 break;
2314 }
2315 #endif /* INET */
2316 #ifndef INET6
2317 if (pca->af == AF_INET6) {
2318 pool_put(&pf_pooladdr_pl, newpa);
2319 error = EAFNOSUPPORT;
2320 break;
2321 }
2322 #endif /* INET6 */
2323 if (newpa->ifname[0]) {
2324 newpa->kif = pfi_attach_rule(newpa->ifname);
2325 if (newpa->kif == NULL) {
2326 pool_put(&pf_pooladdr_pl, newpa);
2327 error = EINVAL;
2328 break;
2329 }
2330 } else
2331 newpa->kif = NULL;
2332 if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
2333 pf_tbladdr_setup(ruleset, &newpa->addr)) {
2334 pfi_dynaddr_remove(&newpa->addr);
2335 pfi_detach_rule(newpa->kif);
2336 pool_put(&pf_pooladdr_pl, newpa);
2337 error = EINVAL;
2338 break;
2339 }
2340 }
2341
2342 if (pca->action == PF_CHANGE_ADD_HEAD)
2343 oldpa = TAILQ_FIRST(&pool->list);
2344 else if (pca->action == PF_CHANGE_ADD_TAIL)
2345 oldpa = TAILQ_LAST(&pool->list, pf_palist);
2346 else {
2347 int i = 0;
2348
2349 oldpa = TAILQ_FIRST(&pool->list);
2350 while ((oldpa != NULL) && (i < pca->nr)) {
2351 oldpa = TAILQ_NEXT(oldpa, entries);
2352 i++;
2353 }
2354 if (oldpa == NULL) {
2355 error = EINVAL;
2356 break;
2357 }
2358 }
2359
2360 if (pca->action == PF_CHANGE_REMOVE) {
2361 TAILQ_REMOVE(&pool->list, oldpa, entries);
2362 pfi_dynaddr_remove(&oldpa->addr);
2363 pf_tbladdr_remove(&oldpa->addr);
2364 pfi_detach_rule(oldpa->kif);
2365 pool_put(&pf_pooladdr_pl, oldpa);
2366 } else {
2367 if (oldpa == NULL)
2368 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
2369 else if (pca->action == PF_CHANGE_ADD_HEAD ||
2370 pca->action == PF_CHANGE_ADD_BEFORE)
2371 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
2372 else
2373 TAILQ_INSERT_AFTER(&pool->list, oldpa,
2374 newpa, entries);
2375 }
2376
2377 pool->cur = TAILQ_FIRST(&pool->list);
2378 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
2379 pca->af);
2380 break;
2381 }
2382
2383 case DIOCGETRULESETS: {
2384 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2385 struct pf_ruleset *ruleset;
2386 struct pf_anchor *anchor;
2387
2388 pr->path[sizeof(pr->path) - 1] = 0;
2389 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2390 error = EINVAL;
2391 break;
2392 }
2393 pr->nr = 0;
2394 if (ruleset->anchor == NULL) {
2395 /* XXX kludge for pf_main_ruleset */
2396 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2397 if (anchor->parent == NULL)
2398 pr->nr++;
2399 } else {
2400 RB_FOREACH(anchor, pf_anchor_node,
2401 &ruleset->anchor->children)
2402 pr->nr++;
2403 }
2404 break;
2405 }
2406
2407 case DIOCGETRULESET: {
2408 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2409 struct pf_ruleset *ruleset;
2410 struct pf_anchor *anchor;
2411 u_int32_t nr = 0;
2412
2413 pr->path[sizeof(pr->path) - 1] = 0;
2414 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2415 error = EINVAL;
2416 break;
2417 }
2418 pr->name[0] = 0;
2419 if (ruleset->anchor == NULL) {
2420 /* XXX kludge for pf_main_ruleset */
2421 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2422 if (anchor->parent == NULL && nr++ == pr->nr) {
2423 strlcpy(pr->name, anchor->name,
2424 sizeof(pr->name));
2425 break;
2426 }
2427 } else {
2428 RB_FOREACH(anchor, pf_anchor_node,
2429 &ruleset->anchor->children)
2430 if (nr++ == pr->nr) {
2431 strlcpy(pr->name, anchor->name,
2432 sizeof(pr->name));
2433 break;
2434 }
2435 }
2436 if (!pr->name[0])
2437 error = EBUSY;
2438 break;
2439 }
2440
2441 case DIOCRCLRTABLES: {
2442 struct pfioc_table *io = (struct pfioc_table *)addr;
2443
2444 if (io->pfrio_esize != 0) {
2445 error = ENODEV;
2446 break;
2447 }
2448 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
2449 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2450 break;
2451 }
2452
2453 case DIOCRADDTABLES: {
2454 struct pfioc_table *io = (struct pfioc_table *)addr;
2455
2456 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2457 error = ENODEV;
2458 break;
2459 }
2460 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
2461 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2462 break;
2463 }
2464
2465 case DIOCRDELTABLES: {
2466 struct pfioc_table *io = (struct pfioc_table *)addr;
2467
2468 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2469 error = ENODEV;
2470 break;
2471 }
2472 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
2473 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2474 break;
2475 }
2476
2477 case DIOCRGETTABLES: {
2478 struct pfioc_table *io = (struct pfioc_table *)addr;
2479
2480 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2481 error = ENODEV;
2482 break;
2483 }
2484 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
2485 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2486 break;
2487 }
2488
2489 case DIOCRGETTSTATS: {
2490 struct pfioc_table *io = (struct pfioc_table *)addr;
2491
2492 if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
2493 error = ENODEV;
2494 break;
2495 }
2496 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
2497 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2498 break;
2499 }
2500
2501 case DIOCRCLRTSTATS: {
2502 struct pfioc_table *io = (struct pfioc_table *)addr;
2503
2504 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2505 error = ENODEV;
2506 break;
2507 }
2508 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
2509 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2510 break;
2511 }
2512
2513 case DIOCRSETTFLAGS: {
2514 struct pfioc_table *io = (struct pfioc_table *)addr;
2515
2516 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2517 error = ENODEV;
2518 break;
2519 }
2520 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
2521 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
2522 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2523 break;
2524 }
2525
2526 case DIOCRCLRADDRS: {
2527 struct pfioc_table *io = (struct pfioc_table *)addr;
2528
2529 if (io->pfrio_esize != 0) {
2530 error = ENODEV;
2531 break;
2532 }
2533 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
2534 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2535 break;
2536 }
2537
2538 case DIOCRADDADDRS: {
2539 struct pfioc_table *io = (struct pfioc_table *)addr;
2540
2541 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2542 error = ENODEV;
2543 break;
2544 }
2545 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
2546 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
2547 PFR_FLAG_USERIOCTL);
2548 break;
2549 }
2550
2551 case DIOCRDELADDRS: {
2552 struct pfioc_table *io = (struct pfioc_table *)addr;
2553
2554 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2555 error = ENODEV;
2556 break;
2557 }
2558 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
2559 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
2560 PFR_FLAG_USERIOCTL);
2561 break;
2562 }
2563
2564 case DIOCRSETADDRS: {
2565 struct pfioc_table *io = (struct pfioc_table *)addr;
2566
2567 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2568 error = ENODEV;
2569 break;
2570 }
2571 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
2572 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
2573 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
2574 PFR_FLAG_USERIOCTL);
2575 break;
2576 }
2577
2578 case DIOCRGETADDRS: {
2579 struct pfioc_table *io = (struct pfioc_table *)addr;
2580
2581 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2582 error = ENODEV;
2583 break;
2584 }
2585 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
2586 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2587 break;
2588 }
2589
2590 case DIOCRGETASTATS: {
2591 struct pfioc_table *io = (struct pfioc_table *)addr;
2592
2593 if (io->pfrio_esize != sizeof(struct pfr_astats)) {
2594 error = ENODEV;
2595 break;
2596 }
2597 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
2598 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2599 break;
2600 }
2601
2602 case DIOCRCLRASTATS: {
2603 struct pfioc_table *io = (struct pfioc_table *)addr;
2604
2605 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2606 error = ENODEV;
2607 break;
2608 }
2609 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
2610 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
2611 PFR_FLAG_USERIOCTL);
2612 break;
2613 }
2614
2615 case DIOCRTSTADDRS: {
2616 struct pfioc_table *io = (struct pfioc_table *)addr;
2617
2618 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2619 error = ENODEV;
2620 break;
2621 }
2622 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
2623 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
2624 PFR_FLAG_USERIOCTL);
2625 break;
2626 }
2627
2628 case DIOCRINADEFINE: {
2629 struct pfioc_table *io = (struct pfioc_table *)addr;
2630
2631 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2632 error = ENODEV;
2633 break;
2634 }
2635 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
2636 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
2637 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2638 break;
2639 }
2640
2641 case DIOCOSFPADD: {
2642 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2643 error = pf_osfp_add(io);
2644 break;
2645 }
2646
2647 case DIOCOSFPGET: {
2648 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2649 error = pf_osfp_get(io);
2650 break;
2651 }
2652
2653 case DIOCXBEGIN: {
2654 struct pfioc_trans *io = (struct pfioc_trans *)
2655 addr;
2656 static struct pfioc_trans_e ioe;
2657 static struct pfr_table table;
2658 int i;
2659
2660 if (io->esize != sizeof(ioe)) {
2661 error = ENODEV;
2662 goto fail;
2663 }
2664 for (i = 0; i < io->size; i++) {
2665 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2666 error = EFAULT;
2667 goto fail;
2668 }
2669 switch (ioe.rs_num) {
2670 #ifdef ALTQ
2671 case PF_RULESET_ALTQ:
2672 if (ioe.anchor[0]) {
2673 error = EINVAL;
2674 goto fail;
2675 }
2676 if ((error = pf_begin_altq(&ioe.ticket)))
2677 goto fail;
2678 break;
2679 #endif /* ALTQ */
2680 case PF_RULESET_TABLE:
2681 bzero(&table, sizeof(table));
2682 strlcpy(table.pfrt_anchor, ioe.anchor,
2683 sizeof(table.pfrt_anchor));
2684 if ((error = pfr_ina_begin(&table,
2685 &ioe.ticket, NULL, 0)))
2686 goto fail;
2687 break;
2688 default:
2689 if ((error = pf_begin_rules(&ioe.ticket,
2690 ioe.rs_num, ioe.anchor)))
2691 goto fail;
2692 break;
2693 }
2694 if (copyout(&ioe, io->array+i, sizeof(io->array[i]))) {
2695 error = EFAULT;
2696 goto fail;
2697 }
2698 }
2699 break;
2700 }
2701
2702 case DIOCXROLLBACK: {
2703 struct pfioc_trans *io = (struct pfioc_trans *)
2704 addr;
2705 static struct pfioc_trans_e ioe;
2706 static struct pfr_table table;
2707 int i;
2708
2709 if (io->esize != sizeof(ioe)) {
2710 error = ENODEV;
2711 goto fail;
2712 }
2713 for (i = 0; i < io->size; i++) {
2714 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2715 error = EFAULT;
2716 goto fail;
2717 }
2718 switch (ioe.rs_num) {
2719 #ifdef ALTQ
2720 case PF_RULESET_ALTQ:
2721 if (ioe.anchor[0]) {
2722 error = EINVAL;
2723 goto fail;
2724 }
2725 if ((error = pf_rollback_altq(ioe.ticket)))
2726 goto fail; /* really bad */
2727 break;
2728 #endif /* ALTQ */
2729 case PF_RULESET_TABLE:
2730 bzero(&table, sizeof(table));
2731 strlcpy(table.pfrt_anchor, ioe.anchor,
2732 sizeof(table.pfrt_anchor));
2733 if ((error = pfr_ina_rollback(&table,
2734 ioe.ticket, NULL, 0)))
2735 goto fail; /* really bad */
2736 break;
2737 default:
2738 if ((error = pf_rollback_rules(ioe.ticket,
2739 ioe.rs_num, ioe.anchor)))
2740 goto fail; /* really bad */
2741 break;
2742 }
2743 }
2744 break;
2745 }
2746
2747 case DIOCXCOMMIT: {
2748 struct pfioc_trans *io = (struct pfioc_trans *)
2749 addr;
2750 static struct pfioc_trans_e ioe;
2751 static struct pfr_table table;
2752 struct pf_ruleset *rs;
2753 int i;
2754
2755 if (io->esize != sizeof(ioe)) {
2756 error = ENODEV;
2757 goto fail;
2758 }
2759 /* first makes sure everything will succeed */
2760 for (i = 0; i < io->size; i++) {
2761 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2762 error = EFAULT;
2763 goto fail;
2764 }
2765 switch (ioe.rs_num) {
2766 #ifdef ALTQ
2767 case PF_RULESET_ALTQ:
2768 if (ioe.anchor[0]) {
2769 error = EINVAL;
2770 goto fail;
2771 }
2772 if (!altqs_inactive_open || ioe.ticket !=
2773 ticket_altqs_inactive) {
2774 error = EBUSY;
2775 goto fail;
2776 }
2777 break;
2778 #endif /* ALTQ */
2779 case PF_RULESET_TABLE:
2780 rs = pf_find_ruleset(ioe.anchor);
2781 if (rs == NULL || !rs->topen || ioe.ticket !=
2782 rs->tticket) {
2783 error = EBUSY;
2784 goto fail;
2785 }
2786 break;
2787 default:
2788 if (ioe.rs_num < 0 || ioe.rs_num >=
2789 PF_RULESET_MAX) {
2790 error = EINVAL;
2791 goto fail;
2792 }
2793 rs = pf_find_ruleset(ioe.anchor);
2794 if (rs == NULL ||
2795 !rs->rules[ioe.rs_num].inactive.open ||
2796 rs->rules[ioe.rs_num].inactive.ticket !=
2797 ioe.ticket) {
2798 error = EBUSY;
2799 goto fail;
2800 }
2801 break;
2802 }
2803 }
2804 /* now do the commit - no errors should happen here */
2805 for (i = 0; i < io->size; i++) {
2806 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2807 error = EFAULT;
2808 goto fail;
2809 }
2810 switch (ioe.rs_num) {
2811 #ifdef ALTQ
2812 case PF_RULESET_ALTQ:
2813 if ((error = pf_commit_altq(ioe.ticket)))
2814 goto fail; /* really bad */
2815 break;
2816 #endif /* ALTQ */
2817 case PF_RULESET_TABLE:
2818 bzero(&table, sizeof(table));
2819 strlcpy(table.pfrt_anchor, ioe.anchor,
2820 sizeof(table.pfrt_anchor));
2821 if ((error = pfr_ina_commit(&table, ioe.ticket,
2822 NULL, NULL, 0)))
2823 goto fail; /* really bad */
2824 break;
2825 default:
2826 if ((error = pf_commit_rules(ioe.ticket,
2827 ioe.rs_num, ioe.anchor)))
2828 goto fail; /* really bad */
2829 break;
2830 }
2831 }
2832 break;
2833 }
2834
2835 case DIOCGETSRCNODES: {
2836 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr;
2837 struct pf_src_node *n;
2838 struct pf_src_node *p, pstore;
2839 u_int32_t nr = 0;
2840 int space = psn->psn_len;
2841
2842 if (space == 0) {
2843 RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
2844 nr++;
2845 psn->psn_len = sizeof(struct pf_src_node) * nr;
2846 break;
2847 }
2848
2849 p = psn->psn_src_nodes;
2850 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2851 int secs = time_second, diff;
2852
2853 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
2854 break;
2855
2856 bcopy(n, &pstore, sizeof(pstore));
2857 if (n->rule.ptr != NULL)
2858 pstore.rule.nr = n->rule.ptr->nr;
2859 pstore.creation = secs - pstore.creation;
2860 if (pstore.expire > secs)
2861 pstore.expire -= secs;
2862 else
2863 pstore.expire = 0;
2864
2865 /* adjust the connection rate estimate */
2866 diff = secs - n->conn_rate.last;
2867 if (diff >= n->conn_rate.seconds)
2868 pstore.conn_rate.count = 0;
2869 else
2870 pstore.conn_rate.count -=
2871 n->conn_rate.count * diff /
2872 n->conn_rate.seconds;
2873
2874 error = copyout(&pstore, p, sizeof(*p));
2875 if (error)
2876 goto fail;
2877 p++;
2878 nr++;
2879 }
2880 psn->psn_len = sizeof(struct pf_src_node) * nr;
2881 break;
2882 }
2883
2884 case DIOCCLRSRCNODES: {
2885 struct pf_src_node *n;
2886 struct pf_state *state;
2887
2888 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
2889 state->src_node = NULL;
2890 state->nat_src_node = NULL;
2891 }
2892 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2893 n->expire = 1;
2894 n->states = 0;
2895 }
2896 pf_purge_expired_src_nodes();
2897 pf_status.src_nodes = 0;
2898 break;
2899 }
2900
2901 case DIOCSETHOSTID: {
2902 u_int32_t *hostid = (u_int32_t *)addr;
2903
2904 if (*hostid == 0)
2905 pf_status.hostid = arc4random();
2906 else
2907 pf_status.hostid = *hostid;
2908 break;
2909 }
2910
2911 case DIOCOSFPFLUSH:
2912 pf_osfp_flush();
2913 break;
2914
2915 case DIOCIGETIFACES: {
2916 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2917
2918 if (io->pfiio_esize != sizeof(struct pfi_if)) {
2919 error = ENODEV;
2920 break;
2921 }
2922 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer,
2923 &io->pfiio_size, io->pfiio_flags);
2924 break;
2925 }
2926
2927 case DIOCICLRISTATS: {
2928 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2929
2930 error = pfi_clr_istats(io->pfiio_name, &io->pfiio_nzero,
2931 io->pfiio_flags);
2932 break;
2933 }
2934
2935 case DIOCSETIFFLAG: {
2936 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2937
2938 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
2939 break;
2940 }
2941
2942 case DIOCCLRIFFLAG: {
2943 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2944
2945 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
2946 break;
2947 }
2948
2949 default:
2950 error = ENODEV;
2951 break;
2952 }
2953 fail:
2954 splx(s);
2955 return (error);
2956 }
2957
2958 #ifdef __NetBSD__
2959 #ifdef INET
2960 int
2961 pfil4_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
2962 {
2963 int error;
2964
2965 /*
2966 * ensure that mbufs are writable beforehand
2967 * as it's assumed by pf code.
2968 * ip hdr (60 bytes) + tcp hdr (60 bytes) should be enough.
2969 * XXX inefficient
2970 */
2971 error = m_makewritable(mp, 0, 60 + 60, M_DONTWAIT);
2972 if (error) {
2973 m_freem(*mp);
2974 *mp = NULL;
2975 return error;
2976 }
2977
2978 /*
2979 * If the packet is out-bound, we can't delay checksums
2980 * here. For in-bound, the checksum has already been
2981 * validated.
2982 */
2983 if (dir == PFIL_OUT) {
2984 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
2985 in_delayed_cksum(*mp);
2986 (*mp)->m_pkthdr.csum_flags &=
2987 ~(M_CSUM_TCPv4|M_CSUM_UDPv4);
2988 }
2989 }
2990
2991 if (pf_test(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp, NULL)
2992 != PF_PASS) {
2993 m_freem(*mp);
2994 *mp = NULL;
2995 return EHOSTUNREACH;
2996 }
2997
2998 /*
2999 * we're not compatible with fast-forward.
3000 */
3001
3002 if (dir == PFIL_IN && *mp) {
3003 (*mp)->m_flags &= ~M_CANFASTFWD;
3004 }
3005
3006 return (0);
3007 }
3008 #endif /* INET */
3009
3010 #ifdef INET6
3011 int
3012 pfil6_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
3013 {
3014 int error;
3015
3016 /*
3017 * ensure that mbufs are writable beforehand
3018 * as it's assumed by pf code.
3019 * XXX inefficient
3020 */
3021 error = m_makewritable(mp, 0, M_COPYALL, M_DONTWAIT);
3022 if (error) {
3023 m_freem(*mp);
3024 *mp = NULL;
3025 return error;
3026 }
3027
3028 /*
3029 * If the packet is out-bound, we can't delay checksums
3030 * here. For in-bound, the checksum has already been
3031 * validated.
3032 */
3033 if (dir == PFIL_OUT) {
3034 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv6|M_CSUM_UDPv6)) {
3035 in6_delayed_cksum(*mp);
3036 (*mp)->m_pkthdr.csum_flags &=
3037 ~(M_CSUM_TCPv6|M_CSUM_UDPv6);
3038 }
3039 }
3040
3041 if (pf_test6(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp, NULL)
3042 != PF_PASS) {
3043 m_freem(*mp);
3044 *mp = NULL;
3045 return EHOSTUNREACH;
3046 } else
3047 return (0);
3048 }
3049 #endif
3050
3051 int
3052 pfil_ifnet_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
3053 {
3054 u_long cmd = (u_long)mp;
3055
3056 switch (cmd) {
3057 case PFIL_IFNET_ATTACH:
3058 pfi_attach_ifnet(ifp);
3059 break;
3060 case PFIL_IFNET_DETACH:
3061 pfi_detach_ifnet(ifp);
3062 break;
3063 }
3064
3065 return (0);
3066 }
3067
3068 int
3069 pfil_ifaddr_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
3070 {
3071 extern void pfi_kifaddr_update_if(struct ifnet *);
3072
3073 u_long cmd = (u_long)mp;
3074
3075 switch (cmd) {
3076 case SIOCSIFADDR:
3077 case SIOCAIFADDR:
3078 case SIOCDIFADDR:
3079 #ifdef INET6
3080 case SIOCAIFADDR_IN6:
3081 case SIOCDIFADDR_IN6:
3082 #endif
3083 pfi_kifaddr_update_if(ifp);
3084 break;
3085 default:
3086 panic("unexpected ioctl");
3087 }
3088
3089 return (0);
3090 }
3091
3092 static int
3093 pf_pfil_attach(void)
3094 {
3095 struct pfil_head *ph_inet;
3096 #ifdef INET6
3097 struct pfil_head *ph_inet6;
3098 #endif
3099 int error;
3100 int i;
3101
3102 if (pf_pfil_attached)
3103 return (0);
3104
3105 error = pfil_add_hook(pfil_ifnet_wrapper, NULL, PFIL_IFNET, &if_pfil);
3106 if (error)
3107 goto bad1;
3108 error = pfil_add_hook(pfil_ifaddr_wrapper, NULL, PFIL_IFADDR, &if_pfil);
3109 if (error)
3110 goto bad2;
3111
3112 ph_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3113 if (ph_inet)
3114 error = pfil_add_hook((void *)pfil4_wrapper, NULL,
3115 PFIL_IN|PFIL_OUT, ph_inet);
3116 else
3117 error = ENOENT;
3118 if (error)
3119 goto bad3;
3120
3121 #ifdef INET6
3122 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3123 if (ph_inet6)
3124 error = pfil_add_hook((void *)pfil6_wrapper, NULL,
3125 PFIL_IN|PFIL_OUT, ph_inet6);
3126 else
3127 error = ENOENT;
3128 if (error)
3129 goto bad4;
3130 #endif
3131
3132 for (i = 0; i < if_indexlim; i++)
3133 if (ifindex2ifnet[i])
3134 pfi_attach_ifnet(ifindex2ifnet[i]);
3135 pf_pfil_attached = 1;
3136
3137 return (0);
3138
3139 #ifdef INET6
3140 bad4:
3141 pfil_remove_hook(pfil4_wrapper, NULL, PFIL_IN|PFIL_OUT, ph_inet);
3142 #endif
3143 bad3:
3144 pfil_remove_hook(pfil_ifaddr_wrapper, NULL, PFIL_IFADDR, &if_pfil);
3145 bad2:
3146 pfil_remove_hook(pfil_ifnet_wrapper, NULL, PFIL_IFNET, &if_pfil);
3147 bad1:
3148 return (error);
3149 }
3150
3151 static int
3152 pf_pfil_detach(void)
3153 {
3154 struct pfil_head *ph_inet;
3155 #ifdef INET6
3156 struct pfil_head *ph_inet6;
3157 #endif
3158 int i;
3159
3160 if (pf_pfil_attached == 0)
3161 return (0);
3162
3163 for (i = 0; i < if_indexlim; i++)
3164 if (pfi_index2kif[i])
3165 pfi_detach_ifnet(ifindex2ifnet[i]);
3166
3167 pfil_remove_hook(pfil_ifaddr_wrapper, NULL, PFIL_IFADDR, &if_pfil);
3168 pfil_remove_hook(pfil_ifnet_wrapper, NULL, PFIL_IFNET, &if_pfil);
3169
3170 ph_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3171 if (ph_inet)
3172 pfil_remove_hook((void *)pfil4_wrapper, NULL,
3173 PFIL_IN|PFIL_OUT, ph_inet);
3174 #ifdef INET6
3175 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3176 if (ph_inet6)
3177 pfil_remove_hook((void *)pfil6_wrapper, NULL,
3178 PFIL_IN|PFIL_OUT, ph_inet6);
3179 #endif
3180 pf_pfil_attached = 0;
3181
3182 return (0);
3183 }
3184 #endif
3185