pf_ioctl.c revision 1.26 1 /* $NetBSD: pf_ioctl.c,v 1.26 2006/10/12 01:32:10 christos Exp $ */
2 /* $OpenBSD: pf_ioctl.c,v 1.139 2005/03/03 07:13:39 dhartmei Exp $ */
3
4 /*
5 * Copyright (c) 2001 Daniel Hartmeier
6 * Copyright (c) 2002,2003 Henning Brauer
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 *
13 * - Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * - Redistributions in binary form must reproduce the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer in the documentation and/or other materials provided
18 * with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Effort sponsored in part by the Defense Advanced Research Projects
34 * Agency (DARPA) and Air Force Research Laboratory, Air Force
35 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36 *
37 */
38
39 #ifdef _KERNEL_OPT
40 #include "opt_inet.h"
41 #include "opt_pfil_hooks.h"
42 #endif
43
44 #ifdef __OpenBSD__
45 #include "pfsync.h"
46 #else
47 #define NPFSYNC 0
48 #endif
49
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/mbuf.h>
53 #include <sys/filio.h>
54 #include <sys/fcntl.h>
55 #include <sys/socket.h>
56 #include <sys/socketvar.h>
57 #include <sys/kernel.h>
58 #include <sys/time.h>
59 #ifdef __OpenBSD__
60 #include <sys/timeout.h>
61 #else
62 #include <sys/callout.h>
63 #endif
64 #include <sys/pool.h>
65 #include <sys/malloc.h>
66 #ifdef __NetBSD__
67 #include <sys/conf.h>
68 #include <sys/lwp.h>
69 #include <sys/kauth.h>
70 #endif
71
72 #include <net/if.h>
73 #include <net/if_types.h>
74 #include <net/route.h>
75
76 #include <netinet/in.h>
77 #include <netinet/in_var.h>
78 #include <netinet/in_systm.h>
79 #include <netinet/ip.h>
80 #include <netinet/ip_var.h>
81 #include <netinet/ip_icmp.h>
82
83 #ifdef __OpenBSD__
84 #include <dev/rndvar.h>
85 #endif
86 #include <net/pfvar.h>
87
88 #if NPFSYNC > 0
89 #include <net/if_pfsync.h>
90 #endif /* NPFSYNC > 0 */
91
92 #ifdef INET6
93 #include <netinet/ip6.h>
94 #include <netinet/in_pcb.h>
95 #endif /* INET6 */
96
97 #ifdef ALTQ_NEW
98 #include <altq/altq.h>
99 #endif
100
101 void pfattach(int);
102 #ifdef _LKM
103 void pfdetach(void);
104 #endif
105 int pfopen(dev_t, int, int, struct lwp *);
106 int pfclose(dev_t, int, int, struct lwp *);
107 struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
108 u_int8_t, u_int8_t, u_int8_t);
109 int pf_get_ruleset_number(u_int8_t);
110 void pf_init_ruleset(struct pf_ruleset *);
111 int pf_anchor_setup(struct pf_rule *,
112 const struct pf_ruleset *, const char *);
113 int pf_anchor_copyout(const struct pf_ruleset *,
114 const struct pf_rule *, struct pfioc_rule *);
115 void pf_anchor_remove(struct pf_rule *);
116
117 void pf_mv_pool(struct pf_palist *, struct pf_palist *);
118 void pf_empty_pool(struct pf_palist *);
119 int pfioctl(dev_t, u_long, caddr_t, int, struct lwp *);
120 #ifdef ALTQ_NEW
121 int pf_begin_altq(u_int32_t *);
122 int pf_rollback_altq(u_int32_t);
123 int pf_commit_altq(u_int32_t);
124 int pf_enable_altq(struct pf_altq *);
125 int pf_disable_altq(struct pf_altq *);
126 #endif /* ALTQ_NEW */
127 int pf_begin_rules(u_int32_t *, int, const char *);
128 int pf_rollback_rules(u_int32_t, int, char *);
129 int pf_commit_rules(u_int32_t, int, char *);
130
131 #ifdef __NetBSD__
132 const struct cdevsw pf_cdevsw = {
133 pfopen, pfclose, noread, nowrite, pfioctl,
134 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER,
135 };
136
137 static int pf_pfil_attach(void);
138 static int pf_pfil_detach(void);
139
140 static int pf_pfil_attached = 0;
141 #endif
142
143 #ifdef __OpenBSD__
144 extern struct timeout pf_expire_to;
145 #else
146 extern struct callout pf_expire_to;
147 #endif
148
149 struct pf_rule pf_default_rule;
150 #ifdef ALTQ_NEW
151 static int pf_altq_running;
152 #endif
153
154 #define TAGID_MAX 50000
155 TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
156 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
157
158 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
159 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
160 #endif
161 static u_int16_t tagname2tag(struct pf_tags *, char *);
162 static void tag2tagname(struct pf_tags *, u_int16_t, char *);
163 static void tag_unref(struct pf_tags *, u_int16_t);
164 int pf_rtlabel_add(struct pf_addr_wrap *);
165 void pf_rtlabel_remove(struct pf_addr_wrap *);
166 void pf_rtlabel_copyout(struct pf_addr_wrap *);
167
168 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
169
170 #ifdef __NetBSD__
171 extern struct pfil_head if_pfil;
172 #endif
173
174 void
175 pfattach(int num __unused)
176 {
177 u_int32_t *timeout = pf_default_rule.timeout;
178
179 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl",
180 &pool_allocator_nointr);
181 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
182 "pfsrctrpl", NULL);
183 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl",
184 NULL);
185 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl",
186 &pool_allocator_nointr);
187 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0,
188 "pfpooladdrpl", &pool_allocator_nointr);
189 pfr_initialize();
190 pfi_initialize();
191 pf_osfp_initialize();
192
193 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
194 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
195
196 RB_INIT(&tree_src_tracking);
197 RB_INIT(&pf_anchors);
198 pf_init_ruleset(&pf_main_ruleset);
199 TAILQ_INIT(&pf_altqs[0]);
200 TAILQ_INIT(&pf_altqs[1]);
201 TAILQ_INIT(&pf_pabuf);
202 pf_altqs_active = &pf_altqs[0];
203 pf_altqs_inactive = &pf_altqs[1];
204 TAILQ_INIT(&state_updates);
205
206 /* default rule should never be garbage collected */
207 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
208 pf_default_rule.action = PF_PASS;
209 pf_default_rule.nr = -1;
210
211 /* initialize default timeouts */
212 timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
213 timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
214 timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
215 timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
216 timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
217 timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
218 timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
219 timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
220 timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
221 timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
222 timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
223 timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
224 timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
225 timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
226 timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
227 timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
228 timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
229 timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
230
231 #ifdef __OpenBSD__
232 timeout_set(&pf_expire_to, pf_purge_timeout, &pf_expire_to);
233 timeout_add(&pf_expire_to, timeout[PFTM_INTERVAL] * hz);
234 #else
235 callout_init(&pf_expire_to);
236 callout_reset(&pf_expire_to, timeout[PFTM_INTERVAL] * hz,
237 pf_purge_timeout, &pf_expire_to);
238 #endif
239
240 pf_normalize_init();
241 bzero(&pf_status, sizeof(pf_status));
242 pf_status.debug = PF_DEBUG_URGENT;
243
244 /* XXX do our best to avoid a conflict */
245 pf_status.hostid = arc4random();
246 }
247
248 #ifdef _LKM
249 void
250 pfdetach(void)
251 {
252 struct pf_anchor *anchor;
253 struct pf_state *state;
254 struct pf_src_node *node;
255 struct pfioc_table pt;
256 u_int32_t ticket;
257 int i;
258 char r = '\0';
259
260 (void)pf_pfil_detach();
261
262 callout_stop(&pf_expire_to);
263 pf_status.running = 0;
264
265 /* clear the rulesets */
266 for (i = 0; i < PF_RULESET_MAX; i++)
267 if (pf_begin_rules(&ticket, i, &r) == 0)
268 pf_commit_rules(ticket, i, &r);
269 #ifdef ALTQ_NEW
270 if (pf_begin_altq(&ticket) == 0)
271 pf_commit_altq(ticket);
272 #endif
273
274 /* clear states */
275 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
276 state->timeout = PFTM_PURGE;
277 #if NPFSYNC
278 state->sync_flags = PFSTATE_NOSYNC;
279 #endif
280 }
281 pf_purge_expired_states();
282 #if NPFSYNC
283 pfsync_clear_states(pf_status.hostid, NULL);
284 #endif
285
286 /* clear source nodes */
287 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
288 state->src_node = NULL;
289 state->nat_src_node = NULL;
290 }
291 RB_FOREACH(node, pf_src_tree, &tree_src_tracking) {
292 node->expire = 1;
293 node->states = 0;
294 }
295 pf_purge_expired_src_nodes();
296
297 /* clear tables */
298 memset(&pt, '\0', sizeof(pt));
299 pfr_clr_tables(&pt.pfrio_table, &pt.pfrio_ndel, pt.pfrio_flags);
300
301 /* destroy anchors */
302 while ((anchor = RB_MIN(pf_anchor_global, &pf_anchors)) != NULL) {
303 for (i = 0; i < PF_RULESET_MAX; i++)
304 if (pf_begin_rules(&ticket, i, anchor->name) == 0)
305 pf_commit_rules(ticket, i, anchor->name);
306 }
307
308 /* destroy main ruleset */
309 pf_remove_if_empty_ruleset(&pf_main_ruleset);
310
311 /* destroy the pools */
312 pool_destroy(&pf_pooladdr_pl);
313 pool_destroy(&pf_altq_pl);
314 pool_destroy(&pf_state_pl);
315 pool_destroy(&pf_rule_pl);
316 pool_destroy(&pf_src_tree_pl);
317
318 /* destroy subsystems */
319 pf_normalize_destroy();
320 pf_osfp_destroy();
321 pfr_destroy();
322 pfi_destroy();
323 }
324 #endif
325
326 int
327 pfopen(dev_t dev, int flags __unused, int fmt __unused, struct lwp *l __unused)
328 {
329 if (minor(dev) >= 1)
330 return (ENXIO);
331 return (0);
332 }
333
334 int
335 pfclose(dev_t dev, int flags __unused, int fmt __unused, struct lwp *l __unused)
336 {
337 if (minor(dev) >= 1)
338 return (ENXIO);
339 return (0);
340 }
341
342 struct pf_pool *
343 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
344 u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
345 u_int8_t check_ticket)
346 {
347 struct pf_ruleset *ruleset;
348 struct pf_rule *rule;
349 int rs_num;
350
351 ruleset = pf_find_ruleset(anchor);
352 if (ruleset == NULL)
353 return (NULL);
354 rs_num = pf_get_ruleset_number(rule_action);
355 if (rs_num >= PF_RULESET_MAX)
356 return (NULL);
357 if (active) {
358 if (check_ticket && ticket !=
359 ruleset->rules[rs_num].active.ticket)
360 return (NULL);
361 if (r_last)
362 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
363 pf_rulequeue);
364 else
365 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
366 } else {
367 if (check_ticket && ticket !=
368 ruleset->rules[rs_num].inactive.ticket)
369 return (NULL);
370 if (r_last)
371 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
372 pf_rulequeue);
373 else
374 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
375 }
376 if (!r_last) {
377 while ((rule != NULL) && (rule->nr != rule_number))
378 rule = TAILQ_NEXT(rule, entries);
379 }
380 if (rule == NULL)
381 return (NULL);
382
383 return (&rule->rpool);
384 }
385
386 int
387 pf_get_ruleset_number(u_int8_t action)
388 {
389 switch (action) {
390 case PF_SCRUB:
391 case PF_NOSCRUB:
392 return (PF_RULESET_SCRUB);
393 break;
394 case PF_PASS:
395 case PF_DROP:
396 return (PF_RULESET_FILTER);
397 break;
398 case PF_NAT:
399 case PF_NONAT:
400 return (PF_RULESET_NAT);
401 break;
402 case PF_BINAT:
403 case PF_NOBINAT:
404 return (PF_RULESET_BINAT);
405 break;
406 case PF_RDR:
407 case PF_NORDR:
408 return (PF_RULESET_RDR);
409 break;
410 default:
411 return (PF_RULESET_MAX);
412 break;
413 }
414 }
415
416 void
417 pf_init_ruleset(struct pf_ruleset *ruleset)
418 {
419 int i;
420
421 memset(ruleset, 0, sizeof(struct pf_ruleset));
422 for (i = 0; i < PF_RULESET_MAX; i++) {
423 TAILQ_INIT(&ruleset->rules[i].queues[0]);
424 TAILQ_INIT(&ruleset->rules[i].queues[1]);
425 ruleset->rules[i].active.ptr = &ruleset->rules[i].queues[0];
426 ruleset->rules[i].inactive.ptr = &ruleset->rules[i].queues[1];
427 }
428 }
429
430 struct pf_anchor *
431 pf_find_anchor(const char *path)
432 {
433 static struct pf_anchor key;
434
435 memset(&key, 0, sizeof(key));
436 strlcpy(key.path, path, sizeof(key.path));
437 return (RB_FIND(pf_anchor_global, &pf_anchors, &key));
438 }
439
440 struct pf_ruleset *
441 pf_find_ruleset(const char *path)
442 {
443 struct pf_anchor *anchor;
444
445 while (*path == '/')
446 path++;
447 if (!*path)
448 return (&pf_main_ruleset);
449 anchor = pf_find_anchor(path);
450 if (anchor == NULL)
451 return (NULL);
452 else
453 return (&anchor->ruleset);
454 }
455
456 struct pf_ruleset *
457 pf_find_or_create_ruleset(const char *path)
458 {
459 static char p[MAXPATHLEN];
460 char *q = NULL /* XXX gcc */, *r;
461 struct pf_ruleset *ruleset;
462 struct pf_anchor *anchor = NULL /* XXX gcc */,
463 *dup, *parent = NULL;
464
465 while (*path == '/')
466 path++;
467 ruleset = pf_find_ruleset(path);
468 if (ruleset != NULL)
469 return (ruleset);
470 strlcpy(p, path, sizeof(p));
471 while (parent == NULL && (q = strrchr(p, '/')) != NULL) {
472 *q = 0;
473 if ((ruleset = pf_find_ruleset(p)) != NULL) {
474 parent = ruleset->anchor;
475 break;
476 }
477 }
478 if (q == NULL)
479 q = p;
480 else
481 q++;
482 strlcpy(p, path, sizeof(p));
483 if (!*q)
484 return (NULL);
485 while ((r = strchr(q, '/')) != NULL || *q) {
486 if (r != NULL)
487 *r = 0;
488 if (!*q || strlen(q) >= PF_ANCHOR_NAME_SIZE ||
489 (parent != NULL && strlen(parent->path) >=
490 MAXPATHLEN - PF_ANCHOR_NAME_SIZE - 1))
491 return (NULL);
492 anchor = (struct pf_anchor *)malloc(sizeof(*anchor), M_TEMP,
493 M_NOWAIT);
494 if (anchor == NULL)
495 return (NULL);
496 memset(anchor, 0, sizeof(*anchor));
497 RB_INIT(&anchor->children);
498 strlcpy(anchor->name, q, sizeof(anchor->name));
499 if (parent != NULL) {
500 strlcpy(anchor->path, parent->path,
501 sizeof(anchor->path));
502 strlcat(anchor->path, "/", sizeof(anchor->path));
503 }
504 strlcat(anchor->path, anchor->name, sizeof(anchor->path));
505 if ((dup = RB_INSERT(pf_anchor_global, &pf_anchors, anchor)) !=
506 NULL) {
507 printf("pf_find_or_create_ruleset: RB_INSERT1 "
508 "'%s' '%s' collides with '%s' '%s'\n",
509 anchor->path, anchor->name, dup->path, dup->name);
510 free(anchor, M_TEMP);
511 return (NULL);
512 }
513 if (parent != NULL) {
514 anchor->parent = parent;
515 if ((dup = RB_INSERT(pf_anchor_node, &parent->children,
516 anchor)) != NULL) {
517 printf("pf_find_or_create_ruleset: "
518 "RB_INSERT2 '%s' '%s' collides with "
519 "'%s' '%s'\n", anchor->path, anchor->name,
520 dup->path, dup->name);
521 RB_REMOVE(pf_anchor_global, &pf_anchors,
522 anchor);
523 free(anchor, M_TEMP);
524 return (NULL);
525 }
526 }
527 pf_init_ruleset(&anchor->ruleset);
528 anchor->ruleset.anchor = anchor;
529 parent = anchor;
530 if (r != NULL)
531 q = r + 1;
532 else
533 *q = 0;
534 }
535 return (&anchor->ruleset);
536 }
537
538 void
539 pf_remove_if_empty_ruleset(struct pf_ruleset *ruleset)
540 {
541 struct pf_anchor *parent;
542 int i;
543
544 while (ruleset != NULL) {
545 if (ruleset == &pf_main_ruleset || ruleset->anchor == NULL ||
546 !RB_EMPTY(&ruleset->anchor->children) ||
547 ruleset->anchor->refcnt > 0 || ruleset->tables > 0 ||
548 ruleset->topen)
549 return;
550 for (i = 0; i < PF_RULESET_MAX; ++i)
551 if (!TAILQ_EMPTY(ruleset->rules[i].active.ptr) ||
552 !TAILQ_EMPTY(ruleset->rules[i].inactive.ptr) ||
553 ruleset->rules[i].inactive.open)
554 return;
555 RB_REMOVE(pf_anchor_global, &pf_anchors, ruleset->anchor);
556 if ((parent = ruleset->anchor->parent) != NULL)
557 RB_REMOVE(pf_anchor_node, &parent->children,
558 ruleset->anchor);
559 free(ruleset->anchor, M_TEMP);
560 if (parent == NULL)
561 return;
562 ruleset = &parent->ruleset;
563 }
564 }
565
566 int
567 pf_anchor_setup(struct pf_rule *r, const struct pf_ruleset *s,
568 const char *name)
569 {
570 static char *p, path[MAXPATHLEN];
571 struct pf_ruleset *ruleset;
572
573 r->anchor = NULL;
574 r->anchor_relative = 0;
575 r->anchor_wildcard = 0;
576 if (!name[0])
577 return (0);
578 if (name[0] == '/')
579 strlcpy(path, name + 1, sizeof(path));
580 else {
581 /* relative path */
582 r->anchor_relative = 1;
583 if (s->anchor == NULL || !s->anchor->path[0])
584 path[0] = 0;
585 else
586 strlcpy(path, s->anchor->path, sizeof(path));
587 while (name[0] == '.' && name[1] == '.' && name[2] == '/') {
588 if (!path[0]) {
589 printf("pf_anchor_setup: .. beyond root\n");
590 return (1);
591 }
592 if ((p = strrchr(path, '/')) != NULL)
593 *p = 0;
594 else
595 path[0] = 0;
596 r->anchor_relative++;
597 name += 3;
598 }
599 if (path[0])
600 strlcat(path, "/", sizeof(path));
601 strlcat(path, name, sizeof(path));
602 }
603 if ((p = strrchr(path, '/')) != NULL && !strcmp(p, "/*")) {
604 r->anchor_wildcard = 1;
605 *p = 0;
606 }
607 ruleset = pf_find_or_create_ruleset(path);
608 if (ruleset == NULL || ruleset->anchor == NULL) {
609 printf("pf_anchor_setup: ruleset\n");
610 return (1);
611 }
612 r->anchor = ruleset->anchor;
613 r->anchor->refcnt++;
614 return (0);
615 }
616
617 int
618 pf_anchor_copyout(const struct pf_ruleset *rs, const struct pf_rule *r,
619 struct pfioc_rule *pr)
620 {
621 pr->anchor_call[0] = 0;
622 if (r->anchor == NULL)
623 return (0);
624 if (!r->anchor_relative) {
625 strlcpy(pr->anchor_call, "/", sizeof(pr->anchor_call));
626 strlcat(pr->anchor_call, r->anchor->path,
627 sizeof(pr->anchor_call));
628 } else {
629 char a[MAXPATHLEN], b[MAXPATHLEN], *p;
630 int i;
631
632 if (rs->anchor == NULL)
633 a[0] = 0;
634 else
635 strlcpy(a, rs->anchor->path, sizeof(a));
636 strlcpy(b, r->anchor->path, sizeof(b));
637 for (i = 1; i < r->anchor_relative; ++i) {
638 if ((p = strrchr(a, '/')) == NULL)
639 p = a;
640 *p = 0;
641 strlcat(pr->anchor_call, "../",
642 sizeof(pr->anchor_call));
643 }
644 if (strncmp(a, b, strlen(a))) {
645 printf("pf_anchor_copyout: '%s' '%s'\n", a, b);
646 return (1);
647 }
648 if (strlen(b) > strlen(a))
649 strlcat(pr->anchor_call, b + (a[0] ? strlen(a) + 1 : 0),
650 sizeof(pr->anchor_call));
651 }
652 if (r->anchor_wildcard)
653 strlcat(pr->anchor_call, pr->anchor_call[0] ? "/*" : "*",
654 sizeof(pr->anchor_call));
655 return (0);
656 }
657
658 void
659 pf_anchor_remove(struct pf_rule *r)
660 {
661 if (r->anchor == NULL)
662 return;
663 if (r->anchor->refcnt <= 0) {
664 printf("pf_anchor_remove: broken refcount");
665 r->anchor = NULL;
666 return;
667 }
668 if (!--r->anchor->refcnt)
669 pf_remove_if_empty_ruleset(&r->anchor->ruleset);
670 r->anchor = NULL;
671 }
672
673 void
674 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
675 {
676 struct pf_pooladdr *mv_pool_pa;
677
678 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
679 TAILQ_REMOVE(poola, mv_pool_pa, entries);
680 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
681 }
682 }
683
684 void
685 pf_empty_pool(struct pf_palist *poola)
686 {
687 struct pf_pooladdr *empty_pool_pa;
688
689 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
690 pfi_dynaddr_remove(&empty_pool_pa->addr);
691 pf_tbladdr_remove(&empty_pool_pa->addr);
692 pfi_detach_rule(empty_pool_pa->kif);
693 TAILQ_REMOVE(poola, empty_pool_pa, entries);
694 pool_put(&pf_pooladdr_pl, empty_pool_pa);
695 }
696 }
697
698 void
699 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
700 {
701 if (rulequeue != NULL) {
702 if (rule->states <= 0) {
703 /*
704 * XXX - we need to remove the table *before* detaching
705 * the rule to make sure the table code does not delete
706 * the anchor under our feet.
707 */
708 pf_tbladdr_remove(&rule->src.addr);
709 pf_tbladdr_remove(&rule->dst.addr);
710 if (rule->overload_tbl)
711 pfr_detach_table(rule->overload_tbl);
712 }
713 TAILQ_REMOVE(rulequeue, rule, entries);
714 rule->entries.tqe_prev = NULL;
715 rule->nr = -1;
716 }
717
718 if (rule->states > 0 || rule->src_nodes > 0 ||
719 rule->entries.tqe_prev != NULL)
720 return;
721 pf_tag_unref(rule->tag);
722 pf_tag_unref(rule->match_tag);
723 #ifdef ALTQ_NEW
724 if (rule->pqid != rule->qid)
725 pf_qid_unref(rule->pqid);
726 pf_qid_unref(rule->qid);
727 #endif
728 pf_rtlabel_remove(&rule->src.addr);
729 pf_rtlabel_remove(&rule->dst.addr);
730 pfi_dynaddr_remove(&rule->src.addr);
731 pfi_dynaddr_remove(&rule->dst.addr);
732 if (rulequeue == NULL) {
733 pf_tbladdr_remove(&rule->src.addr);
734 pf_tbladdr_remove(&rule->dst.addr);
735 if (rule->overload_tbl)
736 pfr_detach_table(rule->overload_tbl);
737 }
738 pfi_detach_rule(rule->kif);
739 pf_anchor_remove(rule);
740 pf_empty_pool(&rule->rpool.list);
741 pool_put(&pf_rule_pl, rule);
742 }
743
744 static u_int16_t
745 tagname2tag(struct pf_tags *head, char *tagname)
746 {
747 struct pf_tagname *tag, *p = NULL;
748 u_int16_t new_tagid = 1;
749
750 TAILQ_FOREACH(tag, head, entries)
751 if (strcmp(tagname, tag->name) == 0) {
752 tag->ref++;
753 return (tag->tag);
754 }
755
756 /*
757 * to avoid fragmentation, we do a linear search from the beginning
758 * and take the first free slot we find. if there is none or the list
759 * is empty, append a new entry at the end.
760 */
761
762 /* new entry */
763 if (!TAILQ_EMPTY(head))
764 for (p = TAILQ_FIRST(head); p != NULL &&
765 p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
766 new_tagid = p->tag + 1;
767
768 if (new_tagid > TAGID_MAX)
769 return (0);
770
771 /* allocate and fill new struct pf_tagname */
772 tag = (struct pf_tagname *)malloc(sizeof(struct pf_tagname),
773 M_TEMP, M_NOWAIT);
774 if (tag == NULL)
775 return (0);
776 bzero(tag, sizeof(struct pf_tagname));
777 strlcpy(tag->name, tagname, sizeof(tag->name));
778 tag->tag = new_tagid;
779 tag->ref++;
780
781 if (p != NULL) /* insert new entry before p */
782 TAILQ_INSERT_BEFORE(p, tag, entries);
783 else /* either list empty or no free slot in between */
784 TAILQ_INSERT_TAIL(head, tag, entries);
785
786 return (tag->tag);
787 }
788
789 static void
790 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
791 {
792 struct pf_tagname *tag;
793
794 TAILQ_FOREACH(tag, head, entries)
795 if (tag->tag == tagid) {
796 strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
797 return;
798 }
799 }
800
801 static void
802 tag_unref(struct pf_tags *head, u_int16_t tag)
803 {
804 struct pf_tagname *p, *next;
805
806 if (tag == 0)
807 return;
808
809 for (p = TAILQ_FIRST(head); p != NULL; p = next) {
810 next = TAILQ_NEXT(p, entries);
811 if (tag == p->tag) {
812 if (--p->ref == 0) {
813 TAILQ_REMOVE(head, p, entries);
814 free(p, M_TEMP);
815 }
816 break;
817 }
818 }
819 }
820
821 u_int16_t
822 pf_tagname2tag(char *tagname)
823 {
824 return (tagname2tag(&pf_tags, tagname));
825 }
826
827 void
828 pf_tag2tagname(u_int16_t tagid, char *p)
829 {
830 return (tag2tagname(&pf_tags, tagid, p));
831 }
832
833 void
834 pf_tag_ref(u_int16_t tag)
835 {
836 struct pf_tagname *t;
837
838 TAILQ_FOREACH(t, &pf_tags, entries)
839 if (t->tag == tag)
840 break;
841 if (t != NULL)
842 t->ref++;
843 }
844
845 void
846 pf_tag_unref(u_int16_t tag)
847 {
848 return (tag_unref(&pf_tags, tag));
849 }
850
851 int
852 pf_rtlabel_add(struct pf_addr_wrap *a __unused)
853 {
854 #ifdef __OpenBSD__
855 if (a->type == PF_ADDR_RTLABEL &&
856 (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0)
857 return (-1);
858 #endif
859 return (0);
860 }
861
862 void
863 pf_rtlabel_remove(struct pf_addr_wrap *a __unused)
864 {
865 #ifdef __OpenBSD__
866 if (a->type == PF_ADDR_RTLABEL)
867 rtlabel_unref(a->v.rtlabel);
868 #endif
869 }
870
871 void
872 pf_rtlabel_copyout(struct pf_addr_wrap *a __unused)
873 {
874 #ifdef __OpenBSD__
875 const char *name;
876
877 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) {
878 if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL)
879 strlcpy(a->v.rtlabelname, "?",
880 sizeof(a->v.rtlabelname));
881 else
882 strlcpy(a->v.rtlabelname, name,
883 sizeof(a->v.rtlabelname));
884 }
885 #endif
886 }
887
888 #ifdef ALTQ_NEW
889 u_int32_t
890 pf_qname2qid(char *qname)
891 {
892 return ((u_int32_t)tagname2tag(&pf_qids, qname));
893 }
894
895 void
896 pf_qid2qname(u_int32_t qid, char *p)
897 {
898 return (tag2tagname(&pf_qids, (u_int16_t)qid, p));
899 }
900
901 void
902 pf_qid_unref(u_int32_t qid)
903 {
904 return (tag_unref(&pf_qids, (u_int16_t)qid));
905 }
906
907 int
908 pf_begin_altq(u_int32_t *ticket)
909 {
910 struct pf_altq *altq;
911 int error = 0;
912
913 /* Purge the old altq list */
914 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
915 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
916 if (altq->qname[0] == 0) {
917 /* detach and destroy the discipline */
918 error = altq_remove(altq);
919 } else
920 pf_qid_unref(altq->qid);
921 pool_put(&pf_altq_pl, altq);
922 }
923 if (error)
924 return (error);
925 *ticket = ++ticket_altqs_inactive;
926 altqs_inactive_open = 1;
927 return (0);
928 }
929
930 int
931 pf_rollback_altq(u_int32_t ticket)
932 {
933 struct pf_altq *altq;
934 int error = 0;
935
936 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
937 return (0);
938 /* Purge the old altq list */
939 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
940 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
941 if (altq->qname[0] == 0) {
942 /* detach and destroy the discipline */
943 error = altq_remove(altq);
944 } else
945 pf_qid_unref(altq->qid);
946 pool_put(&pf_altq_pl, altq);
947 }
948 altqs_inactive_open = 0;
949 return (error);
950 }
951
952 int
953 pf_commit_altq(u_int32_t ticket)
954 {
955 struct pf_altqqueue *old_altqs;
956 struct pf_altq *altq;
957 int s, err, error = 0;
958
959 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
960 return (EBUSY);
961
962 /* swap altqs, keep the old. */
963 s = splsoftnet();
964 old_altqs = pf_altqs_active;
965 pf_altqs_active = pf_altqs_inactive;
966 pf_altqs_inactive = old_altqs;
967 ticket_altqs_active = ticket_altqs_inactive;
968
969 /* Attach new disciplines */
970 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
971 if (altq->qname[0] == 0) {
972 /* attach the discipline */
973 error = altq_pfattach(altq);
974 if (error == 0 && pf_altq_running)
975 error = pf_enable_altq(altq);
976 if (error != 0) {
977 splx(s);
978 return (error);
979 }
980 }
981 }
982
983 /* Purge the old altq list */
984 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
985 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
986 if (altq->qname[0] == 0) {
987 /* detach and destroy the discipline */
988 if (pf_altq_running)
989 error = pf_disable_altq(altq);
990 err = altq_pfdetach(altq);
991 if (err != 0 && error == 0)
992 error = err;
993 err = altq_remove(altq);
994 if (err != 0 && error == 0)
995 error = err;
996 } else
997 pf_qid_unref(altq->qid);
998 pool_put(&pf_altq_pl, altq);
999 }
1000 splx(s);
1001
1002 altqs_inactive_open = 0;
1003 return (error);
1004 }
1005
1006 int
1007 pf_enable_altq(struct pf_altq *altq)
1008 {
1009 struct ifnet *ifp;
1010 struct tb_profile tb;
1011 int s, error = 0;
1012
1013 if ((ifp = ifunit(altq->ifname)) == NULL)
1014 return (EINVAL);
1015
1016 if (ifp->if_snd.altq_type != ALTQT_NONE)
1017 error = altq_enable(&ifp->if_snd);
1018
1019 /* set tokenbucket regulator */
1020 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
1021 tb.rate = altq->ifbandwidth;
1022 tb.depth = altq->tbrsize;
1023 s = splimp();
1024 error = tbr_set(&ifp->if_snd, &tb);
1025 splx(s);
1026 }
1027
1028 return (error);
1029 }
1030
1031 int
1032 pf_disable_altq(struct pf_altq *altq)
1033 {
1034 struct ifnet *ifp;
1035 struct tb_profile tb;
1036 int s, error;
1037
1038 if ((ifp = ifunit(altq->ifname)) == NULL)
1039 return (EINVAL);
1040
1041 /*
1042 * when the discipline is no longer referenced, it was overridden
1043 * by a new one. if so, just return.
1044 */
1045 if (altq->altq_disc != ifp->if_snd.altq_disc)
1046 return (0);
1047
1048 error = altq_disable(&ifp->if_snd);
1049
1050 if (error == 0) {
1051 /* clear tokenbucket regulator */
1052 tb.rate = 0;
1053 s = splimp();
1054 error = tbr_set(&ifp->if_snd, &tb);
1055 splx(s);
1056 }
1057
1058 return (error);
1059 }
1060 #endif /* ALTQ_NEW */
1061
1062 int
1063 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
1064 {
1065 struct pf_ruleset *rs;
1066 struct pf_rule *rule;
1067
1068 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1069 return (EINVAL);
1070 rs = pf_find_or_create_ruleset(anchor);
1071 if (rs == NULL)
1072 return (EINVAL);
1073 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL)
1074 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
1075 *ticket = ++rs->rules[rs_num].inactive.ticket;
1076 rs->rules[rs_num].inactive.open = 1;
1077 return (0);
1078 }
1079
1080 int
1081 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
1082 {
1083 struct pf_ruleset *rs;
1084 struct pf_rule *rule;
1085
1086 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1087 return (EINVAL);
1088 rs = pf_find_ruleset(anchor);
1089 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1090 rs->rules[rs_num].inactive.ticket != ticket)
1091 return (0);
1092 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL)
1093 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
1094 rs->rules[rs_num].inactive.open = 0;
1095 return (0);
1096 }
1097
1098 int
1099 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1100 {
1101 struct pf_ruleset *rs;
1102 struct pf_rule *rule;
1103 struct pf_rulequeue *old_rules;
1104 int s;
1105
1106 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1107 return (EINVAL);
1108 rs = pf_find_ruleset(anchor);
1109 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1110 ticket != rs->rules[rs_num].inactive.ticket)
1111 return (EBUSY);
1112
1113 /* Swap rules, keep the old. */
1114 s = splsoftnet();
1115 old_rules = rs->rules[rs_num].active.ptr;
1116 rs->rules[rs_num].active.ptr =
1117 rs->rules[rs_num].inactive.ptr;
1118 rs->rules[rs_num].inactive.ptr = old_rules;
1119 rs->rules[rs_num].active.ticket =
1120 rs->rules[rs_num].inactive.ticket;
1121 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1122
1123 /* Purge the old rule list. */
1124 while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1125 pf_rm_rule(old_rules, rule);
1126 rs->rules[rs_num].inactive.open = 0;
1127 pf_remove_if_empty_ruleset(rs);
1128 splx(s);
1129 return (0);
1130 }
1131
1132 int
1133 pfioctl(dev_t dev __unused, u_long cmd, caddr_t addr, int flags, struct lwp *l)
1134 {
1135 struct pf_pooladdr *pa = NULL;
1136 struct pf_pool *pool = NULL;
1137 int s;
1138 int error = 0;
1139
1140 /* XXX keep in sync with switch() below */
1141 if (kauth_authorize_network(l->l_cred, KAUTH_NETWORK_FIREWALL,
1142 KAUTH_REQ_NETWORK_FIREWALL_FW, NULL, NULL, NULL))
1143 switch (cmd) {
1144 case DIOCGETRULES:
1145 case DIOCGETRULE:
1146 case DIOCGETADDRS:
1147 case DIOCGETADDR:
1148 case DIOCGETSTATE:
1149 case DIOCSETSTATUSIF:
1150 case DIOCGETSTATUS:
1151 case DIOCCLRSTATUS:
1152 case DIOCNATLOOK:
1153 case DIOCSETDEBUG:
1154 case DIOCGETSTATES:
1155 case DIOCGETTIMEOUT:
1156 case DIOCCLRRULECTRS:
1157 case DIOCGETLIMIT:
1158 case DIOCGETALTQS:
1159 case DIOCGETALTQ:
1160 case DIOCGETQSTATS:
1161 case DIOCGETRULESETS:
1162 case DIOCGETRULESET:
1163 case DIOCRGETTABLES:
1164 case DIOCRGETTSTATS:
1165 case DIOCRCLRTSTATS:
1166 case DIOCRCLRADDRS:
1167 case DIOCRADDADDRS:
1168 case DIOCRDELADDRS:
1169 case DIOCRSETADDRS:
1170 case DIOCRGETADDRS:
1171 case DIOCRGETASTATS:
1172 case DIOCRCLRASTATS:
1173 case DIOCRTSTADDRS:
1174 case DIOCOSFPGET:
1175 case DIOCGETSRCNODES:
1176 case DIOCCLRSRCNODES:
1177 case DIOCIGETIFACES:
1178 case DIOCICLRISTATS:
1179 case DIOCSETIFFLAG:
1180 case DIOCCLRIFFLAG:
1181 break;
1182 case DIOCRCLRTABLES:
1183 case DIOCRADDTABLES:
1184 case DIOCRDELTABLES:
1185 case DIOCRSETTFLAGS:
1186 if (((struct pfioc_table *)addr)->pfrio_flags &
1187 PFR_FLAG_DUMMY)
1188 break; /* dummy operation ok */
1189 return (EPERM);
1190 default:
1191 return (EPERM);
1192 }
1193
1194 if (!(flags & FWRITE))
1195 switch (cmd) {
1196 case DIOCGETRULES:
1197 case DIOCGETRULE:
1198 case DIOCGETADDRS:
1199 case DIOCGETADDR:
1200 case DIOCGETSTATE:
1201 case DIOCGETSTATUS:
1202 case DIOCGETSTATES:
1203 case DIOCGETTIMEOUT:
1204 case DIOCGETLIMIT:
1205 case DIOCGETALTQS:
1206 case DIOCGETALTQ:
1207 case DIOCGETQSTATS:
1208 case DIOCGETRULESETS:
1209 case DIOCGETRULESET:
1210 case DIOCRGETTABLES:
1211 case DIOCRGETTSTATS:
1212 case DIOCRGETADDRS:
1213 case DIOCRGETASTATS:
1214 case DIOCRTSTADDRS:
1215 case DIOCOSFPGET:
1216 case DIOCGETSRCNODES:
1217 case DIOCIGETIFACES:
1218 break;
1219 case DIOCRCLRTABLES:
1220 case DIOCRADDTABLES:
1221 case DIOCRDELTABLES:
1222 case DIOCRCLRTSTATS:
1223 case DIOCRCLRADDRS:
1224 case DIOCRADDADDRS:
1225 case DIOCRDELADDRS:
1226 case DIOCRSETADDRS:
1227 case DIOCRSETTFLAGS:
1228 if (((struct pfioc_table *)addr)->pfrio_flags &
1229 PFR_FLAG_DUMMY)
1230 break; /* dummy operation ok */
1231 return (EACCES);
1232 default:
1233 return (EACCES);
1234 }
1235
1236 s = splsoftnet();
1237 switch (cmd) {
1238
1239 case DIOCSTART:
1240 if (pf_status.running)
1241 error = EEXIST;
1242 else {
1243 #ifdef __NetBSD__
1244 error = pf_pfil_attach();
1245 if (error)
1246 break;
1247 #endif
1248 pf_status.running = 1;
1249 pf_status.since = time_second;
1250 if (pf_status.stateid == 0) {
1251 pf_status.stateid = time_second;
1252 pf_status.stateid = pf_status.stateid << 32;
1253 }
1254 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1255 }
1256 break;
1257
1258 case DIOCSTOP:
1259 if (!pf_status.running)
1260 error = ENOENT;
1261 else {
1262 #ifdef __NetBSD__
1263 error = pf_pfil_detach();
1264 if (error)
1265 break;
1266 #endif
1267 pf_status.running = 0;
1268 pf_status.since = time_second;
1269 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1270 }
1271 break;
1272
1273 case DIOCADDRULE: {
1274 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1275 struct pf_ruleset *ruleset;
1276 struct pf_rule *rule, *tail;
1277 struct pf_pooladdr *pa;
1278 int rs_num;
1279
1280 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1281 ruleset = pf_find_ruleset(pr->anchor);
1282 if (ruleset == NULL) {
1283 error = EINVAL;
1284 break;
1285 }
1286 rs_num = pf_get_ruleset_number(pr->rule.action);
1287 if (rs_num >= PF_RULESET_MAX) {
1288 error = EINVAL;
1289 break;
1290 }
1291 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1292 error = EINVAL;
1293 break;
1294 }
1295 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
1296 error = EBUSY;
1297 break;
1298 }
1299 if (pr->pool_ticket != ticket_pabuf) {
1300 error = EBUSY;
1301 break;
1302 }
1303 rule = pool_get(&pf_rule_pl, PR_NOWAIT);
1304 if (rule == NULL) {
1305 error = ENOMEM;
1306 break;
1307 }
1308 bcopy(&pr->rule, rule, sizeof(struct pf_rule));
1309 rule->anchor = NULL;
1310 rule->kif = NULL;
1311 TAILQ_INIT(&rule->rpool.list);
1312 /* initialize refcounting */
1313 rule->states = 0;
1314 rule->src_nodes = 0;
1315 rule->entries.tqe_prev = NULL;
1316 #ifndef INET
1317 if (rule->af == AF_INET) {
1318 pool_put(&pf_rule_pl, rule);
1319 error = EAFNOSUPPORT;
1320 break;
1321 }
1322 #endif /* INET */
1323 #ifndef INET6
1324 if (rule->af == AF_INET6) {
1325 pool_put(&pf_rule_pl, rule);
1326 error = EAFNOSUPPORT;
1327 break;
1328 }
1329 #endif /* INET6 */
1330 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
1331 pf_rulequeue);
1332 if (tail)
1333 rule->nr = tail->nr + 1;
1334 else
1335 rule->nr = 0;
1336 if (rule->ifname[0]) {
1337 rule->kif = pfi_attach_rule(rule->ifname);
1338 if (rule->kif == NULL) {
1339 pool_put(&pf_rule_pl, rule);
1340 error = EINVAL;
1341 break;
1342 }
1343 }
1344
1345 #ifdef ALTQ_NEW
1346 /* set queue IDs */
1347 if (rule->qname[0] != 0) {
1348 if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
1349 error = EBUSY;
1350 else if (rule->pqname[0] != 0) {
1351 if ((rule->pqid =
1352 pf_qname2qid(rule->pqname)) == 0)
1353 error = EBUSY;
1354 } else
1355 rule->pqid = rule->qid;
1356 }
1357 #endif
1358 if (rule->tagname[0])
1359 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
1360 error = EBUSY;
1361 if (rule->match_tagname[0])
1362 if ((rule->match_tag =
1363 pf_tagname2tag(rule->match_tagname)) == 0)
1364 error = EBUSY;
1365 if (rule->rt && !rule->direction)
1366 error = EINVAL;
1367 if (pf_rtlabel_add(&rule->src.addr) ||
1368 pf_rtlabel_add(&rule->dst.addr))
1369 error = EBUSY;
1370 if (pfi_dynaddr_setup(&rule->src.addr, rule->af))
1371 error = EINVAL;
1372 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af))
1373 error = EINVAL;
1374 if (pf_tbladdr_setup(ruleset, &rule->src.addr))
1375 error = EINVAL;
1376 if (pf_tbladdr_setup(ruleset, &rule->dst.addr))
1377 error = EINVAL;
1378 if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1379 error = EINVAL;
1380 TAILQ_FOREACH(pa, &pf_pabuf, entries)
1381 if (pf_tbladdr_setup(ruleset, &pa->addr))
1382 error = EINVAL;
1383
1384 if (rule->overload_tblname[0]) {
1385 if ((rule->overload_tbl = pfr_attach_table(ruleset,
1386 rule->overload_tblname)) == NULL)
1387 error = EINVAL;
1388 else
1389 rule->overload_tbl->pfrkt_flags |=
1390 PFR_TFLAG_ACTIVE;
1391 }
1392
1393 pf_mv_pool(&pf_pabuf, &rule->rpool.list);
1394 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
1395 (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
1396 (rule->rt > PF_FASTROUTE)) &&
1397 (TAILQ_FIRST(&rule->rpool.list) == NULL))
1398 error = EINVAL;
1399
1400 if (error) {
1401 pf_rm_rule(NULL, rule);
1402 break;
1403 }
1404 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
1405 rule->evaluations = rule->packets = rule->bytes = 0;
1406 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
1407 rule, entries);
1408 break;
1409 }
1410
1411 case DIOCGETRULES: {
1412 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1413 struct pf_ruleset *ruleset;
1414 struct pf_rule *tail;
1415 int rs_num;
1416
1417 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1418 ruleset = pf_find_ruleset(pr->anchor);
1419 if (ruleset == NULL) {
1420 error = EINVAL;
1421 break;
1422 }
1423 rs_num = pf_get_ruleset_number(pr->rule.action);
1424 if (rs_num >= PF_RULESET_MAX) {
1425 error = EINVAL;
1426 break;
1427 }
1428 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
1429 pf_rulequeue);
1430 if (tail)
1431 pr->nr = tail->nr + 1;
1432 else
1433 pr->nr = 0;
1434 pr->ticket = ruleset->rules[rs_num].active.ticket;
1435 break;
1436 }
1437
1438 case DIOCGETRULE: {
1439 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1440 struct pf_ruleset *ruleset;
1441 struct pf_rule *rule;
1442 int rs_num, i;
1443
1444 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1445 ruleset = pf_find_ruleset(pr->anchor);
1446 if (ruleset == NULL) {
1447 error = EINVAL;
1448 break;
1449 }
1450 rs_num = pf_get_ruleset_number(pr->rule.action);
1451 if (rs_num >= PF_RULESET_MAX) {
1452 error = EINVAL;
1453 break;
1454 }
1455 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
1456 error = EBUSY;
1457 break;
1458 }
1459 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
1460 while ((rule != NULL) && (rule->nr != pr->nr))
1461 rule = TAILQ_NEXT(rule, entries);
1462 if (rule == NULL) {
1463 error = EBUSY;
1464 break;
1465 }
1466 bcopy(rule, &pr->rule, sizeof(struct pf_rule));
1467 if (pf_anchor_copyout(ruleset, rule, pr)) {
1468 error = EBUSY;
1469 break;
1470 }
1471 pfi_dynaddr_copyout(&pr->rule.src.addr);
1472 pfi_dynaddr_copyout(&pr->rule.dst.addr);
1473 pf_tbladdr_copyout(&pr->rule.src.addr);
1474 pf_tbladdr_copyout(&pr->rule.dst.addr);
1475 pf_rtlabel_copyout(&pr->rule.src.addr);
1476 pf_rtlabel_copyout(&pr->rule.dst.addr);
1477 for (i = 0; i < PF_SKIP_COUNT; ++i)
1478 if (rule->skip[i].ptr == NULL)
1479 pr->rule.skip[i].nr = -1;
1480 else
1481 pr->rule.skip[i].nr =
1482 rule->skip[i].ptr->nr;
1483 break;
1484 }
1485
1486 case DIOCCHANGERULE: {
1487 struct pfioc_rule *pcr = (struct pfioc_rule *)addr;
1488 struct pf_ruleset *ruleset;
1489 struct pf_rule *oldrule = NULL, *newrule = NULL;
1490 u_int32_t nr = 0;
1491 int rs_num;
1492
1493 if (!(pcr->action == PF_CHANGE_REMOVE ||
1494 pcr->action == PF_CHANGE_GET_TICKET) &&
1495 pcr->pool_ticket != ticket_pabuf) {
1496 error = EBUSY;
1497 break;
1498 }
1499
1500 if (pcr->action < PF_CHANGE_ADD_HEAD ||
1501 pcr->action > PF_CHANGE_GET_TICKET) {
1502 error = EINVAL;
1503 break;
1504 }
1505 ruleset = pf_find_ruleset(pcr->anchor);
1506 if (ruleset == NULL) {
1507 error = EINVAL;
1508 break;
1509 }
1510 rs_num = pf_get_ruleset_number(pcr->rule.action);
1511 if (rs_num >= PF_RULESET_MAX) {
1512 error = EINVAL;
1513 break;
1514 }
1515
1516 if (pcr->action == PF_CHANGE_GET_TICKET) {
1517 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
1518 break;
1519 } else {
1520 if (pcr->ticket !=
1521 ruleset->rules[rs_num].active.ticket) {
1522 error = EINVAL;
1523 break;
1524 }
1525 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1526 error = EINVAL;
1527 break;
1528 }
1529 }
1530
1531 if (pcr->action != PF_CHANGE_REMOVE) {
1532 newrule = pool_get(&pf_rule_pl, PR_NOWAIT);
1533 if (newrule == NULL) {
1534 error = ENOMEM;
1535 break;
1536 }
1537 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule));
1538 TAILQ_INIT(&newrule->rpool.list);
1539 /* initialize refcounting */
1540 newrule->states = 0;
1541 newrule->entries.tqe_prev = NULL;
1542 #ifndef INET
1543 if (newrule->af == AF_INET) {
1544 pool_put(&pf_rule_pl, newrule);
1545 error = EAFNOSUPPORT;
1546 break;
1547 }
1548 #endif /* INET */
1549 #ifndef INET6
1550 if (newrule->af == AF_INET6) {
1551 pool_put(&pf_rule_pl, newrule);
1552 error = EAFNOSUPPORT;
1553 break;
1554 }
1555 #endif /* INET6 */
1556 if (newrule->ifname[0]) {
1557 newrule->kif = pfi_attach_rule(newrule->ifname);
1558 if (newrule->kif == NULL) {
1559 pool_put(&pf_rule_pl, newrule);
1560 error = EINVAL;
1561 break;
1562 }
1563 } else
1564 newrule->kif = NULL;
1565
1566 #ifdef ALTQ_NEW
1567 /* set queue IDs */
1568 if (newrule->qname[0] != 0) {
1569 if ((newrule->qid =
1570 pf_qname2qid(newrule->qname)) == 0)
1571 error = EBUSY;
1572 else if (newrule->pqname[0] != 0) {
1573 if ((newrule->pqid =
1574 pf_qname2qid(newrule->pqname)) == 0)
1575 error = EBUSY;
1576 } else
1577 newrule->pqid = newrule->qid;
1578 }
1579 #endif /* ALTQ_NEW */
1580 if (newrule->tagname[0])
1581 if ((newrule->tag =
1582 pf_tagname2tag(newrule->tagname)) == 0)
1583 error = EBUSY;
1584 if (newrule->match_tagname[0])
1585 if ((newrule->match_tag = pf_tagname2tag(
1586 newrule->match_tagname)) == 0)
1587 error = EBUSY;
1588 if (newrule->rt && !newrule->direction)
1589 error = EINVAL;
1590 if (pf_rtlabel_add(&newrule->src.addr) ||
1591 pf_rtlabel_add(&newrule->dst.addr))
1592 error = EBUSY;
1593 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af))
1594 error = EINVAL;
1595 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af))
1596 error = EINVAL;
1597 if (pf_tbladdr_setup(ruleset, &newrule->src.addr))
1598 error = EINVAL;
1599 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr))
1600 error = EINVAL;
1601 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1602 error = EINVAL;
1603
1604 if (newrule->overload_tblname[0]) {
1605 if ((newrule->overload_tbl = pfr_attach_table(
1606 ruleset, newrule->overload_tblname)) ==
1607 NULL)
1608 error = EINVAL;
1609 else
1610 newrule->overload_tbl->pfrkt_flags |=
1611 PFR_TFLAG_ACTIVE;
1612 }
1613
1614 pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
1615 if (((((newrule->action == PF_NAT) ||
1616 (newrule->action == PF_RDR) ||
1617 (newrule->action == PF_BINAT) ||
1618 (newrule->rt > PF_FASTROUTE)) &&
1619 !pcr->anchor[0])) &&
1620 (TAILQ_FIRST(&newrule->rpool.list) == NULL))
1621 error = EINVAL;
1622
1623 if (error) {
1624 pf_rm_rule(NULL, newrule);
1625 break;
1626 }
1627 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
1628 newrule->evaluations = newrule->packets = 0;
1629 newrule->bytes = 0;
1630 }
1631 pf_empty_pool(&pf_pabuf);
1632
1633 if (pcr->action == PF_CHANGE_ADD_HEAD)
1634 oldrule = TAILQ_FIRST(
1635 ruleset->rules[rs_num].active.ptr);
1636 else if (pcr->action == PF_CHANGE_ADD_TAIL)
1637 oldrule = TAILQ_LAST(
1638 ruleset->rules[rs_num].active.ptr, pf_rulequeue);
1639 else {
1640 oldrule = TAILQ_FIRST(
1641 ruleset->rules[rs_num].active.ptr);
1642 while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1643 oldrule = TAILQ_NEXT(oldrule, entries);
1644 if (oldrule == NULL) {
1645 if (newrule != NULL)
1646 pf_rm_rule(NULL, newrule);
1647 error = EINVAL;
1648 break;
1649 }
1650 }
1651
1652 if (pcr->action == PF_CHANGE_REMOVE)
1653 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
1654 else {
1655 if (oldrule == NULL)
1656 TAILQ_INSERT_TAIL(
1657 ruleset->rules[rs_num].active.ptr,
1658 newrule, entries);
1659 else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1660 pcr->action == PF_CHANGE_ADD_BEFORE)
1661 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1662 else
1663 TAILQ_INSERT_AFTER(
1664 ruleset->rules[rs_num].active.ptr,
1665 oldrule, newrule, entries);
1666 }
1667
1668 nr = 0;
1669 TAILQ_FOREACH(oldrule,
1670 ruleset->rules[rs_num].active.ptr, entries)
1671 oldrule->nr = nr++;
1672
1673 ruleset->rules[rs_num].active.ticket++;
1674
1675 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
1676 pf_remove_if_empty_ruleset(ruleset);
1677
1678 break;
1679 }
1680
1681 case DIOCCLRSTATES: {
1682 struct pf_state *state;
1683 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1684 int killed = 0;
1685
1686 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1687 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1688 state->u.s.kif->pfik_name)) {
1689 state->timeout = PFTM_PURGE;
1690 #if NPFSYNC
1691 /* don't send out individual delete messages */
1692 state->sync_flags = PFSTATE_NOSYNC;
1693 #endif
1694 killed++;
1695 }
1696 }
1697 pf_purge_expired_states();
1698 pf_status.states = 0;
1699 psk->psk_af = killed;
1700 #if NPFSYNC
1701 pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
1702 #endif
1703 break;
1704 }
1705
1706 case DIOCKILLSTATES: {
1707 struct pf_state *state;
1708 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1709 int killed = 0;
1710
1711 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1712 if ((!psk->psk_af || state->af == psk->psk_af)
1713 && (!psk->psk_proto || psk->psk_proto ==
1714 state->proto) &&
1715 PF_MATCHA(psk->psk_src.neg,
1716 &psk->psk_src.addr.v.a.addr,
1717 &psk->psk_src.addr.v.a.mask,
1718 &state->lan.addr, state->af) &&
1719 PF_MATCHA(psk->psk_dst.neg,
1720 &psk->psk_dst.addr.v.a.addr,
1721 &psk->psk_dst.addr.v.a.mask,
1722 &state->ext.addr, state->af) &&
1723 (psk->psk_src.port_op == 0 ||
1724 pf_match_port(psk->psk_src.port_op,
1725 psk->psk_src.port[0], psk->psk_src.port[1],
1726 state->lan.port)) &&
1727 (psk->psk_dst.port_op == 0 ||
1728 pf_match_port(psk->psk_dst.port_op,
1729 psk->psk_dst.port[0], psk->psk_dst.port[1],
1730 state->ext.port)) &&
1731 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1732 state->u.s.kif->pfik_name))) {
1733 state->timeout = PFTM_PURGE;
1734 killed++;
1735 }
1736 }
1737 pf_purge_expired_states();
1738 psk->psk_af = killed;
1739 break;
1740 }
1741
1742 case DIOCADDSTATE: {
1743 struct pfioc_state *ps = (struct pfioc_state *)addr;
1744 struct pf_state *state;
1745 struct pfi_kif *kif;
1746
1747 if (ps->state.timeout >= PFTM_MAX &&
1748 ps->state.timeout != PFTM_UNTIL_PACKET) {
1749 error = EINVAL;
1750 break;
1751 }
1752 state = pool_get(&pf_state_pl, PR_NOWAIT);
1753 if (state == NULL) {
1754 error = ENOMEM;
1755 break;
1756 }
1757 kif = pfi_lookup_create(ps->state.u.ifname);
1758 if (kif == NULL) {
1759 pool_put(&pf_state_pl, state);
1760 error = ENOENT;
1761 break;
1762 }
1763 bcopy(&ps->state, state, sizeof(struct pf_state));
1764 bzero(&state->u, sizeof(state->u));
1765 state->rule.ptr = &pf_default_rule;
1766 state->nat_rule.ptr = NULL;
1767 state->anchor.ptr = NULL;
1768 state->rt_kif = NULL;
1769 state->creation = time_second;
1770 state->pfsync_time = 0;
1771 state->packets[0] = state->packets[1] = 0;
1772 state->bytes[0] = state->bytes[1] = 0;
1773
1774 if (pf_insert_state(kif, state)) {
1775 pfi_maybe_destroy(kif);
1776 pool_put(&pf_state_pl, state);
1777 error = ENOMEM;
1778 }
1779 break;
1780 }
1781
1782 case DIOCGETSTATE: {
1783 struct pfioc_state *ps = (struct pfioc_state *)addr;
1784 struct pf_state *state;
1785 u_int32_t nr;
1786
1787 nr = 0;
1788 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1789 if (nr >= ps->nr)
1790 break;
1791 nr++;
1792 }
1793 if (state == NULL) {
1794 error = EBUSY;
1795 break;
1796 }
1797 bcopy(state, &ps->state, sizeof(struct pf_state));
1798 ps->state.rule.nr = state->rule.ptr->nr;
1799 ps->state.nat_rule.nr = (state->nat_rule.ptr == NULL) ?
1800 -1 : state->nat_rule.ptr->nr;
1801 ps->state.anchor.nr = (state->anchor.ptr == NULL) ?
1802 -1 : state->anchor.ptr->nr;
1803 ps->state.expire = pf_state_expires(state);
1804 if (ps->state.expire > time_second)
1805 ps->state.expire -= time_second;
1806 else
1807 ps->state.expire = 0;
1808 break;
1809 }
1810
1811 case DIOCGETSTATES: {
1812 struct pfioc_states *ps = (struct pfioc_states *)addr;
1813 struct pf_state *state;
1814 struct pf_state *p, pstore;
1815 struct pfi_kif *kif;
1816 u_int32_t nr = 0;
1817 int space = ps->ps_len;
1818
1819 if (space == 0) {
1820 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states)
1821 nr += kif->pfik_states;
1822 ps->ps_len = sizeof(struct pf_state) * nr;
1823 break;
1824 }
1825
1826 p = ps->ps_states;
1827 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states)
1828 RB_FOREACH(state, pf_state_tree_ext_gwy,
1829 &kif->pfik_ext_gwy) {
1830 int secs = time_second;
1831
1832 if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len)
1833 break;
1834
1835 bcopy(state, &pstore, sizeof(pstore));
1836 strlcpy(pstore.u.ifname, kif->pfik_name,
1837 sizeof(pstore.u.ifname));
1838 pstore.rule.nr = state->rule.ptr->nr;
1839 pstore.nat_rule.nr = (state->nat_rule.ptr ==
1840 NULL) ? -1 : state->nat_rule.ptr->nr;
1841 pstore.anchor.nr = (state->anchor.ptr ==
1842 NULL) ? -1 : state->anchor.ptr->nr;
1843 pstore.creation = secs - pstore.creation;
1844 pstore.expire = pf_state_expires(state);
1845 if (pstore.expire > secs)
1846 pstore.expire -= secs;
1847 else
1848 pstore.expire = 0;
1849 error = copyout(&pstore, p, sizeof(*p));
1850 if (error)
1851 goto fail;
1852 p++;
1853 nr++;
1854 }
1855 ps->ps_len = sizeof(struct pf_state) * nr;
1856 break;
1857 }
1858
1859 case DIOCGETSTATUS: {
1860 struct pf_status *s = (struct pf_status *)addr;
1861 bcopy(&pf_status, s, sizeof(struct pf_status));
1862 pfi_fill_oldstatus(s);
1863 break;
1864 }
1865
1866 case DIOCSETSTATUSIF: {
1867 struct pfioc_if *pi = (struct pfioc_if *)addr;
1868
1869 if (pi->ifname[0] == 0) {
1870 bzero(pf_status.ifname, IFNAMSIZ);
1871 break;
1872 }
1873 if (ifunit(pi->ifname) == NULL) {
1874 error = EINVAL;
1875 break;
1876 }
1877 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
1878 break;
1879 }
1880
1881 case DIOCCLRSTATUS: {
1882 bzero(pf_status.counters, sizeof(pf_status.counters));
1883 bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
1884 bzero(pf_status.scounters, sizeof(pf_status.scounters));
1885 if (*pf_status.ifname)
1886 pfi_clr_istats(pf_status.ifname, NULL,
1887 PFI_FLAG_INSTANCE);
1888 break;
1889 }
1890
1891 case DIOCNATLOOK: {
1892 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr;
1893 struct pf_state *state;
1894 struct pf_state key;
1895 int m = 0, direction = pnl->direction;
1896
1897 key.af = pnl->af;
1898 key.proto = pnl->proto;
1899
1900 if (!pnl->proto ||
1901 PF_AZERO(&pnl->saddr, pnl->af) ||
1902 PF_AZERO(&pnl->daddr, pnl->af) ||
1903 !pnl->dport || !pnl->sport)
1904 error = EINVAL;
1905 else {
1906 /*
1907 * userland gives us source and dest of connection,
1908 * reverse the lookup so we ask for what happens with
1909 * the return traffic, enabling us to find it in the
1910 * state tree.
1911 */
1912 if (direction == PF_IN) {
1913 PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af);
1914 key.ext.port = pnl->dport;
1915 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af);
1916 key.gwy.port = pnl->sport;
1917 state = pf_find_state_all(&key, PF_EXT_GWY, &m);
1918 } else {
1919 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af);
1920 key.lan.port = pnl->dport;
1921 PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af);
1922 key.ext.port = pnl->sport;
1923 state = pf_find_state_all(&key, PF_LAN_EXT, &m);
1924 }
1925 if (m > 1)
1926 error = E2BIG; /* more than one state */
1927 else if (state != NULL) {
1928 if (direction == PF_IN) {
1929 PF_ACPY(&pnl->rsaddr, &state->lan.addr,
1930 state->af);
1931 pnl->rsport = state->lan.port;
1932 PF_ACPY(&pnl->rdaddr, &pnl->daddr,
1933 pnl->af);
1934 pnl->rdport = pnl->dport;
1935 } else {
1936 PF_ACPY(&pnl->rdaddr, &state->gwy.addr,
1937 state->af);
1938 pnl->rdport = state->gwy.port;
1939 PF_ACPY(&pnl->rsaddr, &pnl->saddr,
1940 pnl->af);
1941 pnl->rsport = pnl->sport;
1942 }
1943 } else
1944 error = ENOENT;
1945 }
1946 break;
1947 }
1948
1949 case DIOCSETTIMEOUT: {
1950 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1951 int old;
1952
1953 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
1954 pt->seconds < 0) {
1955 error = EINVAL;
1956 goto fail;
1957 }
1958 old = pf_default_rule.timeout[pt->timeout];
1959 pf_default_rule.timeout[pt->timeout] = pt->seconds;
1960 pt->seconds = old;
1961 break;
1962 }
1963
1964 case DIOCGETTIMEOUT: {
1965 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1966
1967 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
1968 error = EINVAL;
1969 goto fail;
1970 }
1971 pt->seconds = pf_default_rule.timeout[pt->timeout];
1972 break;
1973 }
1974
1975 case DIOCGETLIMIT: {
1976 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
1977
1978 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
1979 error = EINVAL;
1980 goto fail;
1981 }
1982 pl->limit = pf_pool_limits[pl->index].limit;
1983 break;
1984 }
1985
1986 case DIOCSETLIMIT: {
1987 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
1988 int old_limit;
1989
1990 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
1991 pf_pool_limits[pl->index].pp == NULL) {
1992 error = EINVAL;
1993 goto fail;
1994 }
1995 #ifdef __OpenBSD__
1996 if (pool_sethardlimit(pf_pool_limits[pl->index].pp,
1997 pl->limit, NULL, 0) != 0) {
1998 error = EBUSY;
1999 goto fail;
2000 }
2001 #else
2002 pool_sethardlimit(pf_pool_limits[pl->index].pp,
2003 pl->limit, NULL, 0);
2004 #endif
2005 old_limit = pf_pool_limits[pl->index].limit;
2006 pf_pool_limits[pl->index].limit = pl->limit;
2007 pl->limit = old_limit;
2008 break;
2009 }
2010
2011 case DIOCSETDEBUG: {
2012 u_int32_t *level = (u_int32_t *)addr;
2013
2014 pf_status.debug = *level;
2015 break;
2016 }
2017
2018 case DIOCCLRRULECTRS: {
2019 struct pf_ruleset *ruleset = &pf_main_ruleset;
2020 struct pf_rule *rule;
2021
2022 TAILQ_FOREACH(rule,
2023 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries)
2024 rule->evaluations = rule->packets =
2025 rule->bytes = 0;
2026 break;
2027 }
2028
2029 #ifdef ALTQ_NEW
2030 case DIOCSTARTALTQ: {
2031 struct pf_altq *altq;
2032
2033 /* enable all altq interfaces on active list */
2034 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
2035 if (altq->qname[0] == 0) {
2036 error = pf_enable_altq(altq);
2037 if (error != 0)
2038 break;
2039 }
2040 }
2041 if (error == 0)
2042 pf_altq_running = 1;
2043 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
2044 break;
2045 }
2046
2047 case DIOCSTOPALTQ: {
2048 struct pf_altq *altq;
2049
2050 /* disable all altq interfaces on active list */
2051 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
2052 if (altq->qname[0] == 0) {
2053 error = pf_disable_altq(altq);
2054 if (error != 0)
2055 break;
2056 }
2057 }
2058 if (error == 0)
2059 pf_altq_running = 0;
2060 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
2061 break;
2062 }
2063
2064 case DIOCADDALTQ: {
2065 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2066 struct pf_altq *altq, *a;
2067
2068 if (pa->ticket != ticket_altqs_inactive) {
2069 error = EBUSY;
2070 break;
2071 }
2072 altq = pool_get(&pf_altq_pl, PR_NOWAIT);
2073 if (altq == NULL) {
2074 error = ENOMEM;
2075 break;
2076 }
2077 bcopy(&pa->altq, altq, sizeof(struct pf_altq));
2078
2079 /*
2080 * if this is for a queue, find the discipline and
2081 * copy the necessary fields
2082 */
2083 if (altq->qname[0] != 0) {
2084 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
2085 error = EBUSY;
2086 pool_put(&pf_altq_pl, altq);
2087 break;
2088 }
2089 TAILQ_FOREACH(a, pf_altqs_inactive, entries) {
2090 if (strncmp(a->ifname, altq->ifname,
2091 IFNAMSIZ) == 0 && a->qname[0] == 0) {
2092 altq->altq_disc = a->altq_disc;
2093 break;
2094 }
2095 }
2096 }
2097
2098 error = altq_add(altq);
2099 if (error) {
2100 pool_put(&pf_altq_pl, altq);
2101 break;
2102 }
2103
2104 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries);
2105 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2106 break;
2107 }
2108
2109 case DIOCGETALTQS: {
2110 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2111 struct pf_altq *altq;
2112
2113 pa->nr = 0;
2114 TAILQ_FOREACH(altq, pf_altqs_active, entries)
2115 pa->nr++;
2116 pa->ticket = ticket_altqs_active;
2117 break;
2118 }
2119
2120 case DIOCGETALTQ: {
2121 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2122 struct pf_altq *altq;
2123 u_int32_t nr;
2124
2125 if (pa->ticket != ticket_altqs_active) {
2126 error = EBUSY;
2127 break;
2128 }
2129 nr = 0;
2130 altq = TAILQ_FIRST(pf_altqs_active);
2131 while ((altq != NULL) && (nr < pa->nr)) {
2132 altq = TAILQ_NEXT(altq, entries);
2133 nr++;
2134 }
2135 if (altq == NULL) {
2136 error = EBUSY;
2137 break;
2138 }
2139 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2140 break;
2141 }
2142
2143 case DIOCCHANGEALTQ:
2144 /* CHANGEALTQ not supported yet! */
2145 error = ENODEV;
2146 break;
2147
2148 case DIOCGETQSTATS: {
2149 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr;
2150 struct pf_altq *altq;
2151 u_int32_t nr;
2152 int nbytes;
2153
2154 if (pq->ticket != ticket_altqs_active) {
2155 error = EBUSY;
2156 break;
2157 }
2158 nbytes = pq->nbytes;
2159 nr = 0;
2160 altq = TAILQ_FIRST(pf_altqs_active);
2161 while ((altq != NULL) && (nr < pq->nr)) {
2162 altq = TAILQ_NEXT(altq, entries);
2163 nr++;
2164 }
2165 if (altq == NULL) {
2166 error = EBUSY;
2167 break;
2168 }
2169 error = altq_getqstats(altq, pq->buf, &nbytes);
2170 if (error == 0) {
2171 pq->scheduler = altq->scheduler;
2172 pq->nbytes = nbytes;
2173 }
2174 break;
2175 }
2176 #endif /* ALTQ_NEW */
2177
2178 case DIOCBEGINADDRS: {
2179 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2180
2181 pf_empty_pool(&pf_pabuf);
2182 pp->ticket = ++ticket_pabuf;
2183 break;
2184 }
2185
2186 case DIOCADDADDR: {
2187 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2188
2189 #ifndef INET
2190 if (pp->af == AF_INET) {
2191 error = EAFNOSUPPORT;
2192 break;
2193 }
2194 #endif /* INET */
2195 #ifndef INET6
2196 if (pp->af == AF_INET6) {
2197 error = EAFNOSUPPORT;
2198 break;
2199 }
2200 #endif /* INET6 */
2201 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2202 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2203 pp->addr.addr.type != PF_ADDR_TABLE) {
2204 error = EINVAL;
2205 break;
2206 }
2207 pa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
2208 if (pa == NULL) {
2209 error = ENOMEM;
2210 break;
2211 }
2212 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr));
2213 if (pa->ifname[0]) {
2214 pa->kif = pfi_attach_rule(pa->ifname);
2215 if (pa->kif == NULL) {
2216 pool_put(&pf_pooladdr_pl, pa);
2217 error = EINVAL;
2218 break;
2219 }
2220 }
2221 if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
2222 pfi_dynaddr_remove(&pa->addr);
2223 pfi_detach_rule(pa->kif);
2224 pool_put(&pf_pooladdr_pl, pa);
2225 error = EINVAL;
2226 break;
2227 }
2228 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
2229 break;
2230 }
2231
2232 case DIOCGETADDRS: {
2233 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2234
2235 pp->nr = 0;
2236 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2237 pp->r_num, 0, 1, 0);
2238 if (pool == NULL) {
2239 error = EBUSY;
2240 break;
2241 }
2242 TAILQ_FOREACH(pa, &pool->list, entries)
2243 pp->nr++;
2244 break;
2245 }
2246
2247 case DIOCGETADDR: {
2248 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2249 u_int32_t nr = 0;
2250
2251 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2252 pp->r_num, 0, 1, 1);
2253 if (pool == NULL) {
2254 error = EBUSY;
2255 break;
2256 }
2257 pa = TAILQ_FIRST(&pool->list);
2258 while ((pa != NULL) && (nr < pp->nr)) {
2259 pa = TAILQ_NEXT(pa, entries);
2260 nr++;
2261 }
2262 if (pa == NULL) {
2263 error = EBUSY;
2264 break;
2265 }
2266 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr));
2267 pfi_dynaddr_copyout(&pp->addr.addr);
2268 pf_tbladdr_copyout(&pp->addr.addr);
2269 pf_rtlabel_copyout(&pp->addr.addr);
2270 break;
2271 }
2272
2273 case DIOCCHANGEADDR: {
2274 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr;
2275 struct pf_pooladdr *oldpa = NULL, *newpa = NULL;
2276 struct pf_ruleset *ruleset;
2277
2278 if (pca->action < PF_CHANGE_ADD_HEAD ||
2279 pca->action > PF_CHANGE_REMOVE) {
2280 error = EINVAL;
2281 break;
2282 }
2283 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
2284 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
2285 pca->addr.addr.type != PF_ADDR_TABLE) {
2286 error = EINVAL;
2287 break;
2288 }
2289
2290 ruleset = pf_find_ruleset(pca->anchor);
2291 if (ruleset == NULL) {
2292 error = EBUSY;
2293 break;
2294 }
2295 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
2296 pca->r_num, pca->r_last, 1, 1);
2297 if (pool == NULL) {
2298 error = EBUSY;
2299 break;
2300 }
2301 if (pca->action != PF_CHANGE_REMOVE) {
2302 newpa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
2303 if (newpa == NULL) {
2304 error = ENOMEM;
2305 break;
2306 }
2307 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
2308 #ifndef INET
2309 if (pca->af == AF_INET) {
2310 pool_put(&pf_pooladdr_pl, newpa);
2311 error = EAFNOSUPPORT;
2312 break;
2313 }
2314 #endif /* INET */
2315 #ifndef INET6
2316 if (pca->af == AF_INET6) {
2317 pool_put(&pf_pooladdr_pl, newpa);
2318 error = EAFNOSUPPORT;
2319 break;
2320 }
2321 #endif /* INET6 */
2322 if (newpa->ifname[0]) {
2323 newpa->kif = pfi_attach_rule(newpa->ifname);
2324 if (newpa->kif == NULL) {
2325 pool_put(&pf_pooladdr_pl, newpa);
2326 error = EINVAL;
2327 break;
2328 }
2329 } else
2330 newpa->kif = NULL;
2331 if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
2332 pf_tbladdr_setup(ruleset, &newpa->addr)) {
2333 pfi_dynaddr_remove(&newpa->addr);
2334 pfi_detach_rule(newpa->kif);
2335 pool_put(&pf_pooladdr_pl, newpa);
2336 error = EINVAL;
2337 break;
2338 }
2339 }
2340
2341 if (pca->action == PF_CHANGE_ADD_HEAD)
2342 oldpa = TAILQ_FIRST(&pool->list);
2343 else if (pca->action == PF_CHANGE_ADD_TAIL)
2344 oldpa = TAILQ_LAST(&pool->list, pf_palist);
2345 else {
2346 int i = 0;
2347
2348 oldpa = TAILQ_FIRST(&pool->list);
2349 while ((oldpa != NULL) && (i < pca->nr)) {
2350 oldpa = TAILQ_NEXT(oldpa, entries);
2351 i++;
2352 }
2353 if (oldpa == NULL) {
2354 error = EINVAL;
2355 break;
2356 }
2357 }
2358
2359 if (pca->action == PF_CHANGE_REMOVE) {
2360 TAILQ_REMOVE(&pool->list, oldpa, entries);
2361 pfi_dynaddr_remove(&oldpa->addr);
2362 pf_tbladdr_remove(&oldpa->addr);
2363 pfi_detach_rule(oldpa->kif);
2364 pool_put(&pf_pooladdr_pl, oldpa);
2365 } else {
2366 if (oldpa == NULL)
2367 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
2368 else if (pca->action == PF_CHANGE_ADD_HEAD ||
2369 pca->action == PF_CHANGE_ADD_BEFORE)
2370 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
2371 else
2372 TAILQ_INSERT_AFTER(&pool->list, oldpa,
2373 newpa, entries);
2374 }
2375
2376 pool->cur = TAILQ_FIRST(&pool->list);
2377 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
2378 pca->af);
2379 break;
2380 }
2381
2382 case DIOCGETRULESETS: {
2383 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2384 struct pf_ruleset *ruleset;
2385 struct pf_anchor *anchor;
2386
2387 pr->path[sizeof(pr->path) - 1] = 0;
2388 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2389 error = EINVAL;
2390 break;
2391 }
2392 pr->nr = 0;
2393 if (ruleset->anchor == NULL) {
2394 /* XXX kludge for pf_main_ruleset */
2395 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2396 if (anchor->parent == NULL)
2397 pr->nr++;
2398 } else {
2399 RB_FOREACH(anchor, pf_anchor_node,
2400 &ruleset->anchor->children)
2401 pr->nr++;
2402 }
2403 break;
2404 }
2405
2406 case DIOCGETRULESET: {
2407 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2408 struct pf_ruleset *ruleset;
2409 struct pf_anchor *anchor;
2410 u_int32_t nr = 0;
2411
2412 pr->path[sizeof(pr->path) - 1] = 0;
2413 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2414 error = EINVAL;
2415 break;
2416 }
2417 pr->name[0] = 0;
2418 if (ruleset->anchor == NULL) {
2419 /* XXX kludge for pf_main_ruleset */
2420 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2421 if (anchor->parent == NULL && nr++ == pr->nr) {
2422 strlcpy(pr->name, anchor->name,
2423 sizeof(pr->name));
2424 break;
2425 }
2426 } else {
2427 RB_FOREACH(anchor, pf_anchor_node,
2428 &ruleset->anchor->children)
2429 if (nr++ == pr->nr) {
2430 strlcpy(pr->name, anchor->name,
2431 sizeof(pr->name));
2432 break;
2433 }
2434 }
2435 if (!pr->name[0])
2436 error = EBUSY;
2437 break;
2438 }
2439
2440 case DIOCRCLRTABLES: {
2441 struct pfioc_table *io = (struct pfioc_table *)addr;
2442
2443 if (io->pfrio_esize != 0) {
2444 error = ENODEV;
2445 break;
2446 }
2447 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
2448 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2449 break;
2450 }
2451
2452 case DIOCRADDTABLES: {
2453 struct pfioc_table *io = (struct pfioc_table *)addr;
2454
2455 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2456 error = ENODEV;
2457 break;
2458 }
2459 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
2460 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2461 break;
2462 }
2463
2464 case DIOCRDELTABLES: {
2465 struct pfioc_table *io = (struct pfioc_table *)addr;
2466
2467 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2468 error = ENODEV;
2469 break;
2470 }
2471 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
2472 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2473 break;
2474 }
2475
2476 case DIOCRGETTABLES: {
2477 struct pfioc_table *io = (struct pfioc_table *)addr;
2478
2479 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2480 error = ENODEV;
2481 break;
2482 }
2483 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
2484 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2485 break;
2486 }
2487
2488 case DIOCRGETTSTATS: {
2489 struct pfioc_table *io = (struct pfioc_table *)addr;
2490
2491 if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
2492 error = ENODEV;
2493 break;
2494 }
2495 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
2496 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2497 break;
2498 }
2499
2500 case DIOCRCLRTSTATS: {
2501 struct pfioc_table *io = (struct pfioc_table *)addr;
2502
2503 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2504 error = ENODEV;
2505 break;
2506 }
2507 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
2508 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2509 break;
2510 }
2511
2512 case DIOCRSETTFLAGS: {
2513 struct pfioc_table *io = (struct pfioc_table *)addr;
2514
2515 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2516 error = ENODEV;
2517 break;
2518 }
2519 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
2520 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
2521 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2522 break;
2523 }
2524
2525 case DIOCRCLRADDRS: {
2526 struct pfioc_table *io = (struct pfioc_table *)addr;
2527
2528 if (io->pfrio_esize != 0) {
2529 error = ENODEV;
2530 break;
2531 }
2532 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
2533 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2534 break;
2535 }
2536
2537 case DIOCRADDADDRS: {
2538 struct pfioc_table *io = (struct pfioc_table *)addr;
2539
2540 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2541 error = ENODEV;
2542 break;
2543 }
2544 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
2545 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
2546 PFR_FLAG_USERIOCTL);
2547 break;
2548 }
2549
2550 case DIOCRDELADDRS: {
2551 struct pfioc_table *io = (struct pfioc_table *)addr;
2552
2553 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2554 error = ENODEV;
2555 break;
2556 }
2557 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
2558 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
2559 PFR_FLAG_USERIOCTL);
2560 break;
2561 }
2562
2563 case DIOCRSETADDRS: {
2564 struct pfioc_table *io = (struct pfioc_table *)addr;
2565
2566 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2567 error = ENODEV;
2568 break;
2569 }
2570 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
2571 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
2572 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
2573 PFR_FLAG_USERIOCTL);
2574 break;
2575 }
2576
2577 case DIOCRGETADDRS: {
2578 struct pfioc_table *io = (struct pfioc_table *)addr;
2579
2580 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2581 error = ENODEV;
2582 break;
2583 }
2584 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
2585 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2586 break;
2587 }
2588
2589 case DIOCRGETASTATS: {
2590 struct pfioc_table *io = (struct pfioc_table *)addr;
2591
2592 if (io->pfrio_esize != sizeof(struct pfr_astats)) {
2593 error = ENODEV;
2594 break;
2595 }
2596 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
2597 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2598 break;
2599 }
2600
2601 case DIOCRCLRASTATS: {
2602 struct pfioc_table *io = (struct pfioc_table *)addr;
2603
2604 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2605 error = ENODEV;
2606 break;
2607 }
2608 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
2609 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
2610 PFR_FLAG_USERIOCTL);
2611 break;
2612 }
2613
2614 case DIOCRTSTADDRS: {
2615 struct pfioc_table *io = (struct pfioc_table *)addr;
2616
2617 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2618 error = ENODEV;
2619 break;
2620 }
2621 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
2622 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
2623 PFR_FLAG_USERIOCTL);
2624 break;
2625 }
2626
2627 case DIOCRINADEFINE: {
2628 struct pfioc_table *io = (struct pfioc_table *)addr;
2629
2630 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2631 error = ENODEV;
2632 break;
2633 }
2634 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
2635 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
2636 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2637 break;
2638 }
2639
2640 case DIOCOSFPADD: {
2641 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2642 error = pf_osfp_add(io);
2643 break;
2644 }
2645
2646 case DIOCOSFPGET: {
2647 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2648 error = pf_osfp_get(io);
2649 break;
2650 }
2651
2652 case DIOCXBEGIN: {
2653 struct pfioc_trans *io = (struct pfioc_trans *)
2654 addr;
2655 static struct pfioc_trans_e ioe;
2656 static struct pfr_table table;
2657 int i;
2658
2659 if (io->esize != sizeof(ioe)) {
2660 error = ENODEV;
2661 goto fail;
2662 }
2663 for (i = 0; i < io->size; i++) {
2664 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2665 error = EFAULT;
2666 goto fail;
2667 }
2668 switch (ioe.rs_num) {
2669 #ifdef ALTQ_NEW
2670 case PF_RULESET_ALTQ:
2671 if (ioe.anchor[0]) {
2672 error = EINVAL;
2673 goto fail;
2674 }
2675 if ((error = pf_begin_altq(&ioe.ticket)))
2676 goto fail;
2677 break;
2678 #endif /* ALTQ_NEW */
2679 case PF_RULESET_TABLE:
2680 bzero(&table, sizeof(table));
2681 strlcpy(table.pfrt_anchor, ioe.anchor,
2682 sizeof(table.pfrt_anchor));
2683 if ((error = pfr_ina_begin(&table,
2684 &ioe.ticket, NULL, 0)))
2685 goto fail;
2686 break;
2687 default:
2688 if ((error = pf_begin_rules(&ioe.ticket,
2689 ioe.rs_num, ioe.anchor)))
2690 goto fail;
2691 break;
2692 }
2693 if (copyout(&ioe, io->array+i, sizeof(io->array[i]))) {
2694 error = EFAULT;
2695 goto fail;
2696 }
2697 }
2698 break;
2699 }
2700
2701 case DIOCXROLLBACK: {
2702 struct pfioc_trans *io = (struct pfioc_trans *)
2703 addr;
2704 static struct pfioc_trans_e ioe;
2705 static struct pfr_table table;
2706 int i;
2707
2708 if (io->esize != sizeof(ioe)) {
2709 error = ENODEV;
2710 goto fail;
2711 }
2712 for (i = 0; i < io->size; i++) {
2713 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2714 error = EFAULT;
2715 goto fail;
2716 }
2717 switch (ioe.rs_num) {
2718 #ifdef ALTQ_NEW
2719 case PF_RULESET_ALTQ:
2720 if (ioe.anchor[0]) {
2721 error = EINVAL;
2722 goto fail;
2723 }
2724 if ((error = pf_rollback_altq(ioe.ticket)))
2725 goto fail; /* really bad */
2726 break;
2727 #endif /* ALTQ_NEW */
2728 case PF_RULESET_TABLE:
2729 bzero(&table, sizeof(table));
2730 strlcpy(table.pfrt_anchor, ioe.anchor,
2731 sizeof(table.pfrt_anchor));
2732 if ((error = pfr_ina_rollback(&table,
2733 ioe.ticket, NULL, 0)))
2734 goto fail; /* really bad */
2735 break;
2736 default:
2737 if ((error = pf_rollback_rules(ioe.ticket,
2738 ioe.rs_num, ioe.anchor)))
2739 goto fail; /* really bad */
2740 break;
2741 }
2742 }
2743 break;
2744 }
2745
2746 case DIOCXCOMMIT: {
2747 struct pfioc_trans *io = (struct pfioc_trans *)
2748 addr;
2749 static struct pfioc_trans_e ioe;
2750 static struct pfr_table table;
2751 struct pf_ruleset *rs;
2752 int i;
2753
2754 if (io->esize != sizeof(ioe)) {
2755 error = ENODEV;
2756 goto fail;
2757 }
2758 /* first makes sure everything will succeed */
2759 for (i = 0; i < io->size; i++) {
2760 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2761 error = EFAULT;
2762 goto fail;
2763 }
2764 switch (ioe.rs_num) {
2765 #ifdef ALTQ_NEW
2766 case PF_RULESET_ALTQ:
2767 if (ioe.anchor[0]) {
2768 error = EINVAL;
2769 goto fail;
2770 }
2771 if (!altqs_inactive_open || ioe.ticket !=
2772 ticket_altqs_inactive) {
2773 error = EBUSY;
2774 goto fail;
2775 }
2776 break;
2777 #endif /* ALTQ_NEW */
2778 case PF_RULESET_TABLE:
2779 rs = pf_find_ruleset(ioe.anchor);
2780 if (rs == NULL || !rs->topen || ioe.ticket !=
2781 rs->tticket) {
2782 error = EBUSY;
2783 goto fail;
2784 }
2785 break;
2786 default:
2787 if (ioe.rs_num < 0 || ioe.rs_num >=
2788 PF_RULESET_MAX) {
2789 error = EINVAL;
2790 goto fail;
2791 }
2792 rs = pf_find_ruleset(ioe.anchor);
2793 if (rs == NULL ||
2794 !rs->rules[ioe.rs_num].inactive.open ||
2795 rs->rules[ioe.rs_num].inactive.ticket !=
2796 ioe.ticket) {
2797 error = EBUSY;
2798 goto fail;
2799 }
2800 break;
2801 }
2802 }
2803 /* now do the commit - no errors should happen here */
2804 for (i = 0; i < io->size; i++) {
2805 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2806 error = EFAULT;
2807 goto fail;
2808 }
2809 switch (ioe.rs_num) {
2810 #ifdef ALTQ_NEW
2811 case PF_RULESET_ALTQ:
2812 if ((error = pf_commit_altq(ioe.ticket)))
2813 goto fail; /* really bad */
2814 break;
2815 #endif /* ALTQ_NEW */
2816 case PF_RULESET_TABLE:
2817 bzero(&table, sizeof(table));
2818 strlcpy(table.pfrt_anchor, ioe.anchor,
2819 sizeof(table.pfrt_anchor));
2820 if ((error = pfr_ina_commit(&table, ioe.ticket,
2821 NULL, NULL, 0)))
2822 goto fail; /* really bad */
2823 break;
2824 default:
2825 if ((error = pf_commit_rules(ioe.ticket,
2826 ioe.rs_num, ioe.anchor)))
2827 goto fail; /* really bad */
2828 break;
2829 }
2830 }
2831 break;
2832 }
2833
2834 case DIOCGETSRCNODES: {
2835 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr;
2836 struct pf_src_node *n;
2837 struct pf_src_node *p, pstore;
2838 u_int32_t nr = 0;
2839 int space = psn->psn_len;
2840
2841 if (space == 0) {
2842 RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
2843 nr++;
2844 psn->psn_len = sizeof(struct pf_src_node) * nr;
2845 break;
2846 }
2847
2848 p = psn->psn_src_nodes;
2849 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2850 int secs = time_second, diff;
2851
2852 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
2853 break;
2854
2855 bcopy(n, &pstore, sizeof(pstore));
2856 if (n->rule.ptr != NULL)
2857 pstore.rule.nr = n->rule.ptr->nr;
2858 pstore.creation = secs - pstore.creation;
2859 if (pstore.expire > secs)
2860 pstore.expire -= secs;
2861 else
2862 pstore.expire = 0;
2863
2864 /* adjust the connection rate estimate */
2865 diff = secs - n->conn_rate.last;
2866 if (diff >= n->conn_rate.seconds)
2867 pstore.conn_rate.count = 0;
2868 else
2869 pstore.conn_rate.count -=
2870 n->conn_rate.count * diff /
2871 n->conn_rate.seconds;
2872
2873 error = copyout(&pstore, p, sizeof(*p));
2874 if (error)
2875 goto fail;
2876 p++;
2877 nr++;
2878 }
2879 psn->psn_len = sizeof(struct pf_src_node) * nr;
2880 break;
2881 }
2882
2883 case DIOCCLRSRCNODES: {
2884 struct pf_src_node *n;
2885 struct pf_state *state;
2886
2887 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
2888 state->src_node = NULL;
2889 state->nat_src_node = NULL;
2890 }
2891 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2892 n->expire = 1;
2893 n->states = 0;
2894 }
2895 pf_purge_expired_src_nodes();
2896 pf_status.src_nodes = 0;
2897 break;
2898 }
2899
2900 case DIOCSETHOSTID: {
2901 u_int32_t *hostid = (u_int32_t *)addr;
2902
2903 if (*hostid == 0)
2904 pf_status.hostid = arc4random();
2905 else
2906 pf_status.hostid = *hostid;
2907 break;
2908 }
2909
2910 case DIOCOSFPFLUSH:
2911 pf_osfp_flush();
2912 break;
2913
2914 case DIOCIGETIFACES: {
2915 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2916
2917 if (io->pfiio_esize != sizeof(struct pfi_if)) {
2918 error = ENODEV;
2919 break;
2920 }
2921 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer,
2922 &io->pfiio_size, io->pfiio_flags);
2923 break;
2924 }
2925
2926 case DIOCICLRISTATS: {
2927 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2928
2929 error = pfi_clr_istats(io->pfiio_name, &io->pfiio_nzero,
2930 io->pfiio_flags);
2931 break;
2932 }
2933
2934 case DIOCSETIFFLAG: {
2935 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2936
2937 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
2938 break;
2939 }
2940
2941 case DIOCCLRIFFLAG: {
2942 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2943
2944 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
2945 break;
2946 }
2947
2948 default:
2949 error = ENODEV;
2950 break;
2951 }
2952 fail:
2953 splx(s);
2954 return (error);
2955 }
2956
2957 #ifdef __NetBSD__
2958 #ifdef INET
2959 int
2960 pfil4_wrapper(void *arg __unused, struct mbuf **mp, struct ifnet *ifp, int dir)
2961 {
2962 int error;
2963
2964 /*
2965 * ensure that mbufs are writable beforehand
2966 * as it's assumed by pf code.
2967 * ip hdr (60 bytes) + tcp hdr (60 bytes) should be enough.
2968 * XXX inefficient
2969 */
2970 error = m_makewritable(mp, 0, 60 + 60, M_DONTWAIT);
2971 if (error) {
2972 m_freem(*mp);
2973 *mp = NULL;
2974 return error;
2975 }
2976
2977 /*
2978 * If the packet is out-bound, we can't delay checksums
2979 * here. For in-bound, the checksum has already been
2980 * validated.
2981 */
2982 if (dir == PFIL_OUT) {
2983 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
2984 in_delayed_cksum(*mp);
2985 (*mp)->m_pkthdr.csum_flags &=
2986 ~(M_CSUM_TCPv4|M_CSUM_UDPv4);
2987 }
2988 }
2989
2990 if (pf_test(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp, NULL)
2991 != PF_PASS) {
2992 m_freem(*mp);
2993 *mp = NULL;
2994 return EHOSTUNREACH;
2995 }
2996
2997 /*
2998 * we're not compatible with fast-forward.
2999 */
3000
3001 if (dir == PFIL_IN && *mp) {
3002 (*mp)->m_flags &= ~M_CANFASTFWD;
3003 }
3004
3005 return (0);
3006 }
3007 #endif /* INET */
3008
3009 #ifdef INET6
3010 int
3011 pfil6_wrapper(void *arg __unused, struct mbuf **mp, struct ifnet *ifp, int dir)
3012 {
3013 int error;
3014
3015 /*
3016 * ensure that mbufs are writable beforehand
3017 * as it's assumed by pf code.
3018 * XXX inefficient
3019 */
3020 error = m_makewritable(mp, 0, M_COPYALL, M_DONTWAIT);
3021 if (error) {
3022 m_freem(*mp);
3023 *mp = NULL;
3024 return error;
3025 }
3026
3027 /*
3028 * If the packet is out-bound, we can't delay checksums
3029 * here. For in-bound, the checksum has already been
3030 * validated.
3031 */
3032 if (dir == PFIL_OUT) {
3033 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv6|M_CSUM_UDPv6)) {
3034 in6_delayed_cksum(*mp);
3035 (*mp)->m_pkthdr.csum_flags &=
3036 ~(M_CSUM_TCPv6|M_CSUM_UDPv6);
3037 }
3038 }
3039
3040 if (pf_test6(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp, NULL)
3041 != PF_PASS) {
3042 m_freem(*mp);
3043 *mp = NULL;
3044 return EHOSTUNREACH;
3045 } else
3046 return (0);
3047 }
3048 #endif
3049
3050 int
3051 pfil_ifnet_wrapper(void *arg __unused, struct mbuf **mp, struct ifnet *ifp,
3052 int dir __unused)
3053 {
3054 u_long cmd = (u_long)mp;
3055
3056 switch (cmd) {
3057 case PFIL_IFNET_ATTACH:
3058 pfi_attach_ifnet(ifp);
3059 break;
3060 case PFIL_IFNET_DETACH:
3061 pfi_detach_ifnet(ifp);
3062 break;
3063 }
3064
3065 return (0);
3066 }
3067
3068 int
3069 pfil_ifaddr_wrapper(void *arg __unused, struct mbuf **mp, struct ifnet *ifp,
3070 int dir __unused)
3071 {
3072 extern void pfi_kifaddr_update_if(struct ifnet *);
3073
3074 u_long cmd = (u_long)mp;
3075
3076 switch (cmd) {
3077 case SIOCSIFADDR:
3078 case SIOCAIFADDR:
3079 case SIOCDIFADDR:
3080 #ifdef INET6
3081 case SIOCAIFADDR_IN6:
3082 case SIOCDIFADDR_IN6:
3083 #endif
3084 pfi_kifaddr_update_if(ifp);
3085 break;
3086 default:
3087 panic("unexpected ioctl");
3088 }
3089
3090 return (0);
3091 }
3092
3093 static int
3094 pf_pfil_attach(void)
3095 {
3096 struct pfil_head *ph_inet;
3097 #ifdef INET6
3098 struct pfil_head *ph_inet6;
3099 #endif
3100 int error;
3101 int i;
3102
3103 if (pf_pfil_attached)
3104 return (0);
3105
3106 error = pfil_add_hook(pfil_ifnet_wrapper, NULL, PFIL_IFNET, &if_pfil);
3107 if (error)
3108 goto bad1;
3109 error = pfil_add_hook(pfil_ifaddr_wrapper, NULL, PFIL_IFADDR, &if_pfil);
3110 if (error)
3111 goto bad2;
3112
3113 ph_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3114 if (ph_inet)
3115 error = pfil_add_hook((void *)pfil4_wrapper, NULL,
3116 PFIL_IN|PFIL_OUT, ph_inet);
3117 else
3118 error = ENOENT;
3119 if (error)
3120 goto bad3;
3121
3122 #ifdef INET6
3123 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3124 if (ph_inet6)
3125 error = pfil_add_hook((void *)pfil6_wrapper, NULL,
3126 PFIL_IN|PFIL_OUT, ph_inet6);
3127 else
3128 error = ENOENT;
3129 if (error)
3130 goto bad4;
3131 #endif
3132
3133 for (i = 0; i < if_indexlim; i++)
3134 if (ifindex2ifnet[i])
3135 pfi_attach_ifnet(ifindex2ifnet[i]);
3136 pf_pfil_attached = 1;
3137
3138 return (0);
3139
3140 #ifdef INET6
3141 bad4:
3142 pfil_remove_hook(pfil4_wrapper, NULL, PFIL_IN|PFIL_OUT, ph_inet);
3143 #endif
3144 bad3:
3145 pfil_remove_hook(pfil_ifaddr_wrapper, NULL, PFIL_IFADDR, &if_pfil);
3146 bad2:
3147 pfil_remove_hook(pfil_ifnet_wrapper, NULL, PFIL_IFNET, &if_pfil);
3148 bad1:
3149 return (error);
3150 }
3151
3152 static int
3153 pf_pfil_detach(void)
3154 {
3155 struct pfil_head *ph_inet;
3156 #ifdef INET6
3157 struct pfil_head *ph_inet6;
3158 #endif
3159 int i;
3160
3161 if (pf_pfil_attached == 0)
3162 return (0);
3163
3164 for (i = 0; i < if_indexlim; i++)
3165 if (pfi_index2kif[i])
3166 pfi_detach_ifnet(ifindex2ifnet[i]);
3167
3168 pfil_remove_hook(pfil_ifaddr_wrapper, NULL, PFIL_IFADDR, &if_pfil);
3169 pfil_remove_hook(pfil_ifnet_wrapper, NULL, PFIL_IFNET, &if_pfil);
3170
3171 ph_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3172 if (ph_inet)
3173 pfil_remove_hook((void *)pfil4_wrapper, NULL,
3174 PFIL_IN|PFIL_OUT, ph_inet);
3175 #ifdef INET6
3176 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3177 if (ph_inet6)
3178 pfil_remove_hook((void *)pfil6_wrapper, NULL,
3179 PFIL_IN|PFIL_OUT, ph_inet6);
3180 #endif
3181 pf_pfil_attached = 0;
3182
3183 return (0);
3184 }
3185 #endif
3186