pf_ioctl.c revision 1.32 1 /* $NetBSD: pf_ioctl.c,v 1.32 2007/12/11 11:08:20 lukem Exp $ */
2 /* $OpenBSD: pf_ioctl.c,v 1.139 2005/03/03 07:13:39 dhartmei Exp $ */
3
4 /*
5 * Copyright (c) 2001 Daniel Hartmeier
6 * Copyright (c) 2002,2003 Henning Brauer
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 *
13 * - Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * - Redistributions in binary form must reproduce the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer in the documentation and/or other materials provided
18 * with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Effort sponsored in part by the Defense Advanced Research Projects
34 * Agency (DARPA) and Air Force Research Laboratory, Air Force
35 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36 *
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: pf_ioctl.c,v 1.32 2007/12/11 11:08:20 lukem Exp $");
41
42 #ifdef _KERNEL_OPT
43 #include "opt_inet.h"
44 #include "opt_pfil_hooks.h"
45 #endif
46
47 #ifdef __OpenBSD__
48 #include "pfsync.h"
49 #else
50 #define NPFSYNC 0
51 #endif
52
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/mbuf.h>
56 #include <sys/filio.h>
57 #include <sys/fcntl.h>
58 #include <sys/socket.h>
59 #include <sys/socketvar.h>
60 #include <sys/kernel.h>
61 #include <sys/time.h>
62 #ifdef __OpenBSD__
63 #include <sys/timeout.h>
64 #else
65 #include <sys/callout.h>
66 #endif
67 #include <sys/pool.h>
68 #include <sys/malloc.h>
69 #ifdef __NetBSD__
70 #include <sys/conf.h>
71 #include <sys/lwp.h>
72 #include <sys/kauth.h>
73 #endif
74
75 #include <net/if.h>
76 #include <net/if_types.h>
77 #include <net/route.h>
78
79 #include <netinet/in.h>
80 #include <netinet/in_var.h>
81 #include <netinet/in_systm.h>
82 #include <netinet/ip.h>
83 #include <netinet/ip_var.h>
84 #include <netinet/ip_icmp.h>
85
86 #ifdef __OpenBSD__
87 #include <dev/rndvar.h>
88 #endif
89 #include <net/pfvar.h>
90
91 #if NPFSYNC > 0
92 #include <net/if_pfsync.h>
93 #endif /* NPFSYNC > 0 */
94
95 #ifdef INET6
96 #include <netinet/ip6.h>
97 #include <netinet/in_pcb.h>
98 #endif /* INET6 */
99
100 #ifdef ALTQ
101 #include <altq/altq.h>
102 #endif
103
104 void pfattach(int);
105 #ifdef _LKM
106 void pfdetach(void);
107 #endif
108 int pfopen(dev_t, int, int, struct lwp *);
109 int pfclose(dev_t, int, int, struct lwp *);
110 struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
111 u_int8_t, u_int8_t, u_int8_t);
112 int pf_get_ruleset_number(u_int8_t);
113 void pf_init_ruleset(struct pf_ruleset *);
114 int pf_anchor_setup(struct pf_rule *,
115 const struct pf_ruleset *, const char *);
116 int pf_anchor_copyout(const struct pf_ruleset *,
117 const struct pf_rule *, struct pfioc_rule *);
118 void pf_anchor_remove(struct pf_rule *);
119
120 void pf_mv_pool(struct pf_palist *, struct pf_palist *);
121 void pf_empty_pool(struct pf_palist *);
122 int pfioctl(dev_t, u_long, void *, int, struct lwp *);
123 #ifdef ALTQ
124 int pf_begin_altq(u_int32_t *);
125 int pf_rollback_altq(u_int32_t);
126 int pf_commit_altq(u_int32_t);
127 int pf_enable_altq(struct pf_altq *);
128 int pf_disable_altq(struct pf_altq *);
129 #endif /* ALTQ */
130 int pf_begin_rules(u_int32_t *, int, const char *);
131 int pf_rollback_rules(u_int32_t, int, char *);
132 int pf_commit_rules(u_int32_t, int, char *);
133
134 #ifdef __NetBSD__
135 const struct cdevsw pf_cdevsw = {
136 pfopen, pfclose, noread, nowrite, pfioctl,
137 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER,
138 };
139
140 static int pf_pfil_attach(void);
141 static int pf_pfil_detach(void);
142
143 static int pf_pfil_attached = 0;
144 #endif
145
146 #ifdef __OpenBSD__
147 extern struct timeout pf_expire_to;
148 #else
149 extern struct callout pf_expire_to;
150 #endif
151
152 struct pf_rule pf_default_rule;
153 #ifdef ALTQ
154 static int pf_altq_running;
155 #endif
156
157 #define TAGID_MAX 50000
158 TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
159 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
160
161 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
162 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
163 #endif
164 static u_int16_t tagname2tag(struct pf_tags *, char *);
165 static void tag2tagname(struct pf_tags *, u_int16_t, char *);
166 static void tag_unref(struct pf_tags *, u_int16_t);
167 int pf_rtlabel_add(struct pf_addr_wrap *);
168 void pf_rtlabel_remove(struct pf_addr_wrap *);
169 void pf_rtlabel_copyout(struct pf_addr_wrap *);
170
171 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
172
173 #ifdef __NetBSD__
174 extern struct pfil_head if_pfil;
175 #endif
176
177 void
178 pfattach(int num)
179 {
180 u_int32_t *timeout = pf_default_rule.timeout;
181
182 #ifdef __NetBSD__
183 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl",
184 &pool_allocator_nointr, IPL_NONE);
185 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
186 "pfsrctrpl", NULL, IPL_SOFTNET);
187 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl",
188 NULL, IPL_SOFTNET);
189 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl",
190 &pool_allocator_nointr, IPL_NONE);
191 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0,
192 "pfpooladdrpl", &pool_allocator_nointr, IPL_NONE);
193 #else
194 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl",
195 &pool_allocator_nointr);
196 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
197 "pfsrctrpl", NULL);
198 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl",
199 NULL);
200 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl",
201 &pool_allocator_nointr);
202 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0,
203 "pfpooladdrpl", &pool_allocator_nointr);
204 #endif
205
206 pfr_initialize();
207 pfi_initialize();
208 pf_osfp_initialize();
209
210 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
211 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
212
213 RB_INIT(&tree_src_tracking);
214 RB_INIT(&pf_anchors);
215 pf_init_ruleset(&pf_main_ruleset);
216 TAILQ_INIT(&pf_altqs[0]);
217 TAILQ_INIT(&pf_altqs[1]);
218 TAILQ_INIT(&pf_pabuf);
219 pf_altqs_active = &pf_altqs[0];
220 pf_altqs_inactive = &pf_altqs[1];
221 TAILQ_INIT(&state_updates);
222
223 /* default rule should never be garbage collected */
224 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
225 pf_default_rule.action = PF_PASS;
226 pf_default_rule.nr = -1;
227
228 /* initialize default timeouts */
229 timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
230 timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
231 timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
232 timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
233 timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
234 timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
235 timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
236 timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
237 timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
238 timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
239 timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
240 timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
241 timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
242 timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
243 timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
244 timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
245 timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
246 timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
247
248 #ifdef __OpenBSD__
249 timeout_set(&pf_expire_to, pf_purge_timeout, &pf_expire_to);
250 timeout_add(&pf_expire_to, timeout[PFTM_INTERVAL] * hz);
251 #else
252 callout_init(&pf_expire_to, 0);
253 callout_reset(&pf_expire_to, timeout[PFTM_INTERVAL] * hz,
254 pf_purge_timeout, &pf_expire_to);
255 #endif
256
257 pf_normalize_init();
258 bzero(&pf_status, sizeof(pf_status));
259 pf_status.debug = PF_DEBUG_URGENT;
260
261 /* XXX do our best to avoid a conflict */
262 pf_status.hostid = arc4random();
263 }
264
265 #ifdef _LKM
266 void
267 pfdetach(void)
268 {
269 struct pf_anchor *anchor;
270 struct pf_state *state;
271 struct pf_src_node *node;
272 struct pfioc_table pt;
273 u_int32_t ticket;
274 int i;
275 char r = '\0';
276
277 (void)pf_pfil_detach();
278
279 callout_stop(&pf_expire_to);
280 pf_status.running = 0;
281
282 /* clear the rulesets */
283 for (i = 0; i < PF_RULESET_MAX; i++)
284 if (pf_begin_rules(&ticket, i, &r) == 0)
285 pf_commit_rules(ticket, i, &r);
286 #ifdef ALTQ
287 if (pf_begin_altq(&ticket) == 0)
288 pf_commit_altq(ticket);
289 #endif
290
291 /* clear states */
292 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
293 state->timeout = PFTM_PURGE;
294 #if NPFSYNC
295 state->sync_flags = PFSTATE_NOSYNC;
296 #endif
297 }
298 pf_purge_expired_states();
299 #if NPFSYNC
300 pfsync_clear_states(pf_status.hostid, NULL);
301 #endif
302
303 /* clear source nodes */
304 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
305 state->src_node = NULL;
306 state->nat_src_node = NULL;
307 }
308 RB_FOREACH(node, pf_src_tree, &tree_src_tracking) {
309 node->expire = 1;
310 node->states = 0;
311 }
312 pf_purge_expired_src_nodes();
313
314 /* clear tables */
315 memset(&pt, '\0', sizeof(pt));
316 pfr_clr_tables(&pt.pfrio_table, &pt.pfrio_ndel, pt.pfrio_flags);
317
318 /* destroy anchors */
319 while ((anchor = RB_MIN(pf_anchor_global, &pf_anchors)) != NULL) {
320 for (i = 0; i < PF_RULESET_MAX; i++)
321 if (pf_begin_rules(&ticket, i, anchor->name) == 0)
322 pf_commit_rules(ticket, i, anchor->name);
323 }
324
325 /* destroy main ruleset */
326 pf_remove_if_empty_ruleset(&pf_main_ruleset);
327
328 /* destroy the pools */
329 pool_destroy(&pf_pooladdr_pl);
330 pool_destroy(&pf_altq_pl);
331 pool_destroy(&pf_state_pl);
332 pool_destroy(&pf_rule_pl);
333 pool_destroy(&pf_src_tree_pl);
334
335 /* destroy subsystems */
336 pf_normalize_destroy();
337 pf_osfp_destroy();
338 pfr_destroy();
339 pfi_destroy();
340 }
341 #endif
342
343 int
344 pfopen(dev_t dev, int flags, int fmt, struct lwp *l)
345 {
346 if (minor(dev) >= 1)
347 return (ENXIO);
348 return (0);
349 }
350
351 int
352 pfclose(dev_t dev, int flags, int fmt, struct lwp *l)
353 {
354 if (minor(dev) >= 1)
355 return (ENXIO);
356 return (0);
357 }
358
359 struct pf_pool *
360 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
361 u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
362 u_int8_t check_ticket)
363 {
364 struct pf_ruleset *ruleset;
365 struct pf_rule *rule;
366 int rs_num;
367
368 ruleset = pf_find_ruleset(anchor);
369 if (ruleset == NULL)
370 return (NULL);
371 rs_num = pf_get_ruleset_number(rule_action);
372 if (rs_num >= PF_RULESET_MAX)
373 return (NULL);
374 if (active) {
375 if (check_ticket && ticket !=
376 ruleset->rules[rs_num].active.ticket)
377 return (NULL);
378 if (r_last)
379 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
380 pf_rulequeue);
381 else
382 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
383 } else {
384 if (check_ticket && ticket !=
385 ruleset->rules[rs_num].inactive.ticket)
386 return (NULL);
387 if (r_last)
388 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
389 pf_rulequeue);
390 else
391 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
392 }
393 if (!r_last) {
394 while ((rule != NULL) && (rule->nr != rule_number))
395 rule = TAILQ_NEXT(rule, entries);
396 }
397 if (rule == NULL)
398 return (NULL);
399
400 return (&rule->rpool);
401 }
402
403 int
404 pf_get_ruleset_number(u_int8_t action)
405 {
406 switch (action) {
407 case PF_SCRUB:
408 case PF_NOSCRUB:
409 return (PF_RULESET_SCRUB);
410 break;
411 case PF_PASS:
412 case PF_DROP:
413 return (PF_RULESET_FILTER);
414 break;
415 case PF_NAT:
416 case PF_NONAT:
417 return (PF_RULESET_NAT);
418 break;
419 case PF_BINAT:
420 case PF_NOBINAT:
421 return (PF_RULESET_BINAT);
422 break;
423 case PF_RDR:
424 case PF_NORDR:
425 return (PF_RULESET_RDR);
426 break;
427 default:
428 return (PF_RULESET_MAX);
429 break;
430 }
431 }
432
433 void
434 pf_init_ruleset(struct pf_ruleset *ruleset)
435 {
436 int i;
437
438 memset(ruleset, 0, sizeof(struct pf_ruleset));
439 for (i = 0; i < PF_RULESET_MAX; i++) {
440 TAILQ_INIT(&ruleset->rules[i].queues[0]);
441 TAILQ_INIT(&ruleset->rules[i].queues[1]);
442 ruleset->rules[i].active.ptr = &ruleset->rules[i].queues[0];
443 ruleset->rules[i].inactive.ptr = &ruleset->rules[i].queues[1];
444 }
445 }
446
447 struct pf_anchor *
448 pf_find_anchor(const char *path)
449 {
450 static struct pf_anchor key;
451
452 memset(&key, 0, sizeof(key));
453 strlcpy(key.path, path, sizeof(key.path));
454 return (RB_FIND(pf_anchor_global, &pf_anchors, &key));
455 }
456
457 struct pf_ruleset *
458 pf_find_ruleset(const char *path)
459 {
460 struct pf_anchor *anchor;
461
462 while (*path == '/')
463 path++;
464 if (!*path)
465 return (&pf_main_ruleset);
466 anchor = pf_find_anchor(path);
467 if (anchor == NULL)
468 return (NULL);
469 else
470 return (&anchor->ruleset);
471 }
472
473 struct pf_ruleset *
474 pf_find_or_create_ruleset(const char *path)
475 {
476 static char p[MAXPATHLEN];
477 char *q = NULL /* XXX gcc */, *r;
478 struct pf_ruleset *ruleset;
479 struct pf_anchor *anchor = NULL /* XXX gcc */,
480 *dup, *parent = NULL;
481
482 while (*path == '/')
483 path++;
484 ruleset = pf_find_ruleset(path);
485 if (ruleset != NULL)
486 return (ruleset);
487 strlcpy(p, path, sizeof(p));
488 while (parent == NULL && (q = strrchr(p, '/')) != NULL) {
489 *q = 0;
490 if ((ruleset = pf_find_ruleset(p)) != NULL) {
491 parent = ruleset->anchor;
492 break;
493 }
494 }
495 if (q == NULL)
496 q = p;
497 else
498 q++;
499 strlcpy(p, path, sizeof(p));
500 if (!*q)
501 return (NULL);
502 while ((r = strchr(q, '/')) != NULL || *q) {
503 if (r != NULL)
504 *r = 0;
505 if (!*q || strlen(q) >= PF_ANCHOR_NAME_SIZE ||
506 (parent != NULL && strlen(parent->path) >=
507 MAXPATHLEN - PF_ANCHOR_NAME_SIZE - 1))
508 return (NULL);
509 anchor = (struct pf_anchor *)malloc(sizeof(*anchor), M_TEMP,
510 M_NOWAIT);
511 if (anchor == NULL)
512 return (NULL);
513 memset(anchor, 0, sizeof(*anchor));
514 RB_INIT(&anchor->children);
515 strlcpy(anchor->name, q, sizeof(anchor->name));
516 if (parent != NULL) {
517 strlcpy(anchor->path, parent->path,
518 sizeof(anchor->path));
519 strlcat(anchor->path, "/", sizeof(anchor->path));
520 }
521 strlcat(anchor->path, anchor->name, sizeof(anchor->path));
522 if ((dup = RB_INSERT(pf_anchor_global, &pf_anchors, anchor)) !=
523 NULL) {
524 printf("pf_find_or_create_ruleset: RB_INSERT1 "
525 "'%s' '%s' collides with '%s' '%s'\n",
526 anchor->path, anchor->name, dup->path, dup->name);
527 free(anchor, M_TEMP);
528 return (NULL);
529 }
530 if (parent != NULL) {
531 anchor->parent = parent;
532 if ((dup = RB_INSERT(pf_anchor_node, &parent->children,
533 anchor)) != NULL) {
534 printf("pf_find_or_create_ruleset: "
535 "RB_INSERT2 '%s' '%s' collides with "
536 "'%s' '%s'\n", anchor->path, anchor->name,
537 dup->path, dup->name);
538 RB_REMOVE(pf_anchor_global, &pf_anchors,
539 anchor);
540 free(anchor, M_TEMP);
541 return (NULL);
542 }
543 }
544 pf_init_ruleset(&anchor->ruleset);
545 anchor->ruleset.anchor = anchor;
546 parent = anchor;
547 if (r != NULL)
548 q = r + 1;
549 else
550 *q = 0;
551 }
552 return (&anchor->ruleset);
553 }
554
555 void
556 pf_remove_if_empty_ruleset(struct pf_ruleset *ruleset)
557 {
558 struct pf_anchor *parent;
559 int i;
560
561 while (ruleset != NULL) {
562 if (ruleset == &pf_main_ruleset || ruleset->anchor == NULL ||
563 !RB_EMPTY(&ruleset->anchor->children) ||
564 ruleset->anchor->refcnt > 0 || ruleset->tables > 0 ||
565 ruleset->topen)
566 return;
567 for (i = 0; i < PF_RULESET_MAX; ++i)
568 if (!TAILQ_EMPTY(ruleset->rules[i].active.ptr) ||
569 !TAILQ_EMPTY(ruleset->rules[i].inactive.ptr) ||
570 ruleset->rules[i].inactive.open)
571 return;
572 RB_REMOVE(pf_anchor_global, &pf_anchors, ruleset->anchor);
573 if ((parent = ruleset->anchor->parent) != NULL)
574 RB_REMOVE(pf_anchor_node, &parent->children,
575 ruleset->anchor);
576 free(ruleset->anchor, M_TEMP);
577 if (parent == NULL)
578 return;
579 ruleset = &parent->ruleset;
580 }
581 }
582
583 int
584 pf_anchor_setup(struct pf_rule *r, const struct pf_ruleset *s,
585 const char *name)
586 {
587 static char *p, path[MAXPATHLEN];
588 struct pf_ruleset *ruleset;
589
590 r->anchor = NULL;
591 r->anchor_relative = 0;
592 r->anchor_wildcard = 0;
593 if (!name[0])
594 return (0);
595 if (name[0] == '/')
596 strlcpy(path, name + 1, sizeof(path));
597 else {
598 /* relative path */
599 r->anchor_relative = 1;
600 if (s->anchor == NULL || !s->anchor->path[0])
601 path[0] = 0;
602 else
603 strlcpy(path, s->anchor->path, sizeof(path));
604 while (name[0] == '.' && name[1] == '.' && name[2] == '/') {
605 if (!path[0]) {
606 printf("pf_anchor_setup: .. beyond root\n");
607 return (1);
608 }
609 if ((p = strrchr(path, '/')) != NULL)
610 *p = 0;
611 else
612 path[0] = 0;
613 r->anchor_relative++;
614 name += 3;
615 }
616 if (path[0])
617 strlcat(path, "/", sizeof(path));
618 strlcat(path, name, sizeof(path));
619 }
620 if ((p = strrchr(path, '/')) != NULL && !strcmp(p, "/*")) {
621 r->anchor_wildcard = 1;
622 *p = 0;
623 }
624 ruleset = pf_find_or_create_ruleset(path);
625 if (ruleset == NULL || ruleset->anchor == NULL) {
626 printf("pf_anchor_setup: ruleset\n");
627 return (1);
628 }
629 r->anchor = ruleset->anchor;
630 r->anchor->refcnt++;
631 return (0);
632 }
633
634 int
635 pf_anchor_copyout(const struct pf_ruleset *rs, const struct pf_rule *r,
636 struct pfioc_rule *pr)
637 {
638 pr->anchor_call[0] = 0;
639 if (r->anchor == NULL)
640 return (0);
641 if (!r->anchor_relative) {
642 strlcpy(pr->anchor_call, "/", sizeof(pr->anchor_call));
643 strlcat(pr->anchor_call, r->anchor->path,
644 sizeof(pr->anchor_call));
645 } else {
646 char a[MAXPATHLEN], b[MAXPATHLEN], *p;
647 int i;
648
649 if (rs->anchor == NULL)
650 a[0] = 0;
651 else
652 strlcpy(a, rs->anchor->path, sizeof(a));
653 strlcpy(b, r->anchor->path, sizeof(b));
654 for (i = 1; i < r->anchor_relative; ++i) {
655 if ((p = strrchr(a, '/')) == NULL)
656 p = a;
657 *p = 0;
658 strlcat(pr->anchor_call, "../",
659 sizeof(pr->anchor_call));
660 }
661 if (strncmp(a, b, strlen(a))) {
662 printf("pf_anchor_copyout: '%s' '%s'\n", a, b);
663 return (1);
664 }
665 if (strlen(b) > strlen(a))
666 strlcat(pr->anchor_call, b + (a[0] ? strlen(a) + 1 : 0),
667 sizeof(pr->anchor_call));
668 }
669 if (r->anchor_wildcard)
670 strlcat(pr->anchor_call, pr->anchor_call[0] ? "/*" : "*",
671 sizeof(pr->anchor_call));
672 return (0);
673 }
674
675 void
676 pf_anchor_remove(struct pf_rule *r)
677 {
678 if (r->anchor == NULL)
679 return;
680 if (r->anchor->refcnt <= 0) {
681 printf("pf_anchor_remove: broken refcount");
682 r->anchor = NULL;
683 return;
684 }
685 if (!--r->anchor->refcnt)
686 pf_remove_if_empty_ruleset(&r->anchor->ruleset);
687 r->anchor = NULL;
688 }
689
690 void
691 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
692 {
693 struct pf_pooladdr *mv_pool_pa;
694
695 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
696 TAILQ_REMOVE(poola, mv_pool_pa, entries);
697 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
698 }
699 }
700
701 void
702 pf_empty_pool(struct pf_palist *poola)
703 {
704 struct pf_pooladdr *empty_pool_pa;
705
706 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
707 pfi_dynaddr_remove(&empty_pool_pa->addr);
708 pf_tbladdr_remove(&empty_pool_pa->addr);
709 pfi_detach_rule(empty_pool_pa->kif);
710 TAILQ_REMOVE(poola, empty_pool_pa, entries);
711 pool_put(&pf_pooladdr_pl, empty_pool_pa);
712 }
713 }
714
715 void
716 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
717 {
718 if (rulequeue != NULL) {
719 if (rule->states <= 0) {
720 /*
721 * XXX - we need to remove the table *before* detaching
722 * the rule to make sure the table code does not delete
723 * the anchor under our feet.
724 */
725 pf_tbladdr_remove(&rule->src.addr);
726 pf_tbladdr_remove(&rule->dst.addr);
727 if (rule->overload_tbl)
728 pfr_detach_table(rule->overload_tbl);
729 }
730 TAILQ_REMOVE(rulequeue, rule, entries);
731 rule->entries.tqe_prev = NULL;
732 rule->nr = -1;
733 }
734
735 if (rule->states > 0 || rule->src_nodes > 0 ||
736 rule->entries.tqe_prev != NULL)
737 return;
738 pf_tag_unref(rule->tag);
739 pf_tag_unref(rule->match_tag);
740 #ifdef ALTQ
741 if (rule->pqid != rule->qid)
742 pf_qid_unref(rule->pqid);
743 pf_qid_unref(rule->qid);
744 #endif
745 pf_rtlabel_remove(&rule->src.addr);
746 pf_rtlabel_remove(&rule->dst.addr);
747 pfi_dynaddr_remove(&rule->src.addr);
748 pfi_dynaddr_remove(&rule->dst.addr);
749 if (rulequeue == NULL) {
750 pf_tbladdr_remove(&rule->src.addr);
751 pf_tbladdr_remove(&rule->dst.addr);
752 if (rule->overload_tbl)
753 pfr_detach_table(rule->overload_tbl);
754 }
755 pfi_detach_rule(rule->kif);
756 pf_anchor_remove(rule);
757 pf_empty_pool(&rule->rpool.list);
758 pool_put(&pf_rule_pl, rule);
759 }
760
761 static u_int16_t
762 tagname2tag(struct pf_tags *head, char *tagname)
763 {
764 struct pf_tagname *tag, *p = NULL;
765 u_int16_t new_tagid = 1;
766
767 TAILQ_FOREACH(tag, head, entries)
768 if (strcmp(tagname, tag->name) == 0) {
769 tag->ref++;
770 return (tag->tag);
771 }
772
773 /*
774 * to avoid fragmentation, we do a linear search from the beginning
775 * and take the first free slot we find. if there is none or the list
776 * is empty, append a new entry at the end.
777 */
778
779 /* new entry */
780 if (!TAILQ_EMPTY(head))
781 for (p = TAILQ_FIRST(head); p != NULL &&
782 p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
783 new_tagid = p->tag + 1;
784
785 if (new_tagid > TAGID_MAX)
786 return (0);
787
788 /* allocate and fill new struct pf_tagname */
789 tag = (struct pf_tagname *)malloc(sizeof(struct pf_tagname),
790 M_TEMP, M_NOWAIT);
791 if (tag == NULL)
792 return (0);
793 bzero(tag, sizeof(struct pf_tagname));
794 strlcpy(tag->name, tagname, sizeof(tag->name));
795 tag->tag = new_tagid;
796 tag->ref++;
797
798 if (p != NULL) /* insert new entry before p */
799 TAILQ_INSERT_BEFORE(p, tag, entries);
800 else /* either list empty or no free slot in between */
801 TAILQ_INSERT_TAIL(head, tag, entries);
802
803 return (tag->tag);
804 }
805
806 static void
807 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
808 {
809 struct pf_tagname *tag;
810
811 TAILQ_FOREACH(tag, head, entries)
812 if (tag->tag == tagid) {
813 strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
814 return;
815 }
816 }
817
818 static void
819 tag_unref(struct pf_tags *head, u_int16_t tag)
820 {
821 struct pf_tagname *p, *next;
822
823 if (tag == 0)
824 return;
825
826 for (p = TAILQ_FIRST(head); p != NULL; p = next) {
827 next = TAILQ_NEXT(p, entries);
828 if (tag == p->tag) {
829 if (--p->ref == 0) {
830 TAILQ_REMOVE(head, p, entries);
831 free(p, M_TEMP);
832 }
833 break;
834 }
835 }
836 }
837
838 u_int16_t
839 pf_tagname2tag(char *tagname)
840 {
841 return (tagname2tag(&pf_tags, tagname));
842 }
843
844 void
845 pf_tag2tagname(u_int16_t tagid, char *p)
846 {
847 return (tag2tagname(&pf_tags, tagid, p));
848 }
849
850 void
851 pf_tag_ref(u_int16_t tag)
852 {
853 struct pf_tagname *t;
854
855 TAILQ_FOREACH(t, &pf_tags, entries)
856 if (t->tag == tag)
857 break;
858 if (t != NULL)
859 t->ref++;
860 }
861
862 void
863 pf_tag_unref(u_int16_t tag)
864 {
865 return (tag_unref(&pf_tags, tag));
866 }
867
868 int
869 pf_rtlabel_add(struct pf_addr_wrap *a)
870 {
871 #ifdef __OpenBSD__
872 if (a->type == PF_ADDR_RTLABEL &&
873 (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0)
874 return (-1);
875 #endif
876 return (0);
877 }
878
879 void
880 pf_rtlabel_remove(struct pf_addr_wrap *a)
881 {
882 #ifdef __OpenBSD__
883 if (a->type == PF_ADDR_RTLABEL)
884 rtlabel_unref(a->v.rtlabel);
885 #endif
886 }
887
888 void
889 pf_rtlabel_copyout(struct pf_addr_wrap *a)
890 {
891 #ifdef __OpenBSD__
892 const char *name;
893
894 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) {
895 if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL)
896 strlcpy(a->v.rtlabelname, "?",
897 sizeof(a->v.rtlabelname));
898 else
899 strlcpy(a->v.rtlabelname, name,
900 sizeof(a->v.rtlabelname));
901 }
902 #endif
903 }
904
905 #ifdef ALTQ
906 u_int32_t
907 pf_qname2qid(char *qname)
908 {
909 return ((u_int32_t)tagname2tag(&pf_qids, qname));
910 }
911
912 void
913 pf_qid2qname(u_int32_t qid, char *p)
914 {
915 return (tag2tagname(&pf_qids, (u_int16_t)qid, p));
916 }
917
918 void
919 pf_qid_unref(u_int32_t qid)
920 {
921 return (tag_unref(&pf_qids, (u_int16_t)qid));
922 }
923
924 int
925 pf_begin_altq(u_int32_t *ticket)
926 {
927 struct pf_altq *altq;
928 int error = 0;
929
930 /* Purge the old altq list */
931 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
932 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
933 if (altq->qname[0] == 0) {
934 /* detach and destroy the discipline */
935 error = altq_remove(altq);
936 } else
937 pf_qid_unref(altq->qid);
938 pool_put(&pf_altq_pl, altq);
939 }
940 if (error)
941 return (error);
942 *ticket = ++ticket_altqs_inactive;
943 altqs_inactive_open = 1;
944 return (0);
945 }
946
947 int
948 pf_rollback_altq(u_int32_t ticket)
949 {
950 struct pf_altq *altq;
951 int error = 0;
952
953 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
954 return (0);
955 /* Purge the old altq list */
956 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
957 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
958 if (altq->qname[0] == 0) {
959 /* detach and destroy the discipline */
960 error = altq_remove(altq);
961 } else
962 pf_qid_unref(altq->qid);
963 pool_put(&pf_altq_pl, altq);
964 }
965 altqs_inactive_open = 0;
966 return (error);
967 }
968
969 int
970 pf_commit_altq(u_int32_t ticket)
971 {
972 struct pf_altqqueue *old_altqs;
973 struct pf_altq *altq;
974 int s, err, error = 0;
975
976 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
977 return (EBUSY);
978
979 /* swap altqs, keep the old. */
980 s = splsoftnet();
981 old_altqs = pf_altqs_active;
982 pf_altqs_active = pf_altqs_inactive;
983 pf_altqs_inactive = old_altqs;
984 ticket_altqs_active = ticket_altqs_inactive;
985
986 /* Attach new disciplines */
987 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
988 if (altq->qname[0] == 0) {
989 /* attach the discipline */
990 error = altq_pfattach(altq);
991 if (error == 0 && pf_altq_running)
992 error = pf_enable_altq(altq);
993 if (error != 0) {
994 splx(s);
995 return (error);
996 }
997 }
998 }
999
1000 /* Purge the old altq list */
1001 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
1002 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
1003 if (altq->qname[0] == 0) {
1004 /* detach and destroy the discipline */
1005 if (pf_altq_running)
1006 error = pf_disable_altq(altq);
1007 err = altq_pfdetach(altq);
1008 if (err != 0 && error == 0)
1009 error = err;
1010 err = altq_remove(altq);
1011 if (err != 0 && error == 0)
1012 error = err;
1013 } else
1014 pf_qid_unref(altq->qid);
1015 pool_put(&pf_altq_pl, altq);
1016 }
1017 splx(s);
1018
1019 altqs_inactive_open = 0;
1020 return (error);
1021 }
1022
1023 int
1024 pf_enable_altq(struct pf_altq *altq)
1025 {
1026 struct ifnet *ifp;
1027 struct tb_profile tb;
1028 int s, error = 0;
1029
1030 if ((ifp = ifunit(altq->ifname)) == NULL)
1031 return (EINVAL);
1032
1033 if (ifp->if_snd.altq_type != ALTQT_NONE)
1034 error = altq_enable(&ifp->if_snd);
1035
1036 /* set tokenbucket regulator */
1037 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
1038 tb.rate = altq->ifbandwidth;
1039 tb.depth = altq->tbrsize;
1040 #ifdef __OpenBSD__
1041 s = splimp();
1042 #else
1043 s = splnet();
1044 #endif
1045 error = tbr_set(&ifp->if_snd, &tb);
1046 splx(s);
1047 }
1048
1049 return (error);
1050 }
1051
1052 int
1053 pf_disable_altq(struct pf_altq *altq)
1054 {
1055 struct ifnet *ifp;
1056 struct tb_profile tb;
1057 int s, error;
1058
1059 if ((ifp = ifunit(altq->ifname)) == NULL)
1060 return (EINVAL);
1061
1062 /*
1063 * when the discipline is no longer referenced, it was overridden
1064 * by a new one. if so, just return.
1065 */
1066 if (altq->altq_disc != ifp->if_snd.altq_disc)
1067 return (0);
1068
1069 error = altq_disable(&ifp->if_snd);
1070
1071 if (error == 0) {
1072 /* clear tokenbucket regulator */
1073 tb.rate = 0;
1074 #ifdef __OpenBSD__
1075 s = splimp();
1076 #else
1077 s = splnet();
1078 #endif
1079 error = tbr_set(&ifp->if_snd, &tb);
1080 splx(s);
1081 }
1082
1083 return (error);
1084 }
1085 #endif /* ALTQ */
1086
1087 int
1088 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
1089 {
1090 struct pf_ruleset *rs;
1091 struct pf_rule *rule;
1092
1093 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1094 return (EINVAL);
1095 rs = pf_find_or_create_ruleset(anchor);
1096 if (rs == NULL)
1097 return (EINVAL);
1098 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL)
1099 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
1100 *ticket = ++rs->rules[rs_num].inactive.ticket;
1101 rs->rules[rs_num].inactive.open = 1;
1102 return (0);
1103 }
1104
1105 int
1106 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
1107 {
1108 struct pf_ruleset *rs;
1109 struct pf_rule *rule;
1110
1111 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1112 return (EINVAL);
1113 rs = pf_find_ruleset(anchor);
1114 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1115 rs->rules[rs_num].inactive.ticket != ticket)
1116 return (0);
1117 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL)
1118 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
1119 rs->rules[rs_num].inactive.open = 0;
1120 return (0);
1121 }
1122
1123 int
1124 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1125 {
1126 struct pf_ruleset *rs;
1127 struct pf_rule *rule;
1128 struct pf_rulequeue *old_rules;
1129 int s;
1130
1131 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1132 return (EINVAL);
1133 rs = pf_find_ruleset(anchor);
1134 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1135 ticket != rs->rules[rs_num].inactive.ticket)
1136 return (EBUSY);
1137
1138 /* Swap rules, keep the old. */
1139 s = splsoftnet();
1140 old_rules = rs->rules[rs_num].active.ptr;
1141 rs->rules[rs_num].active.ptr =
1142 rs->rules[rs_num].inactive.ptr;
1143 rs->rules[rs_num].inactive.ptr = old_rules;
1144 rs->rules[rs_num].active.ticket =
1145 rs->rules[rs_num].inactive.ticket;
1146 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1147
1148 /* Purge the old rule list. */
1149 while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1150 pf_rm_rule(old_rules, rule);
1151 rs->rules[rs_num].inactive.open = 0;
1152 pf_remove_if_empty_ruleset(rs);
1153 splx(s);
1154 return (0);
1155 }
1156
1157 int
1158 pfioctl(dev_t dev, u_long cmd, void *addr, int flags, struct lwp *l)
1159 {
1160 struct pf_pooladdr *pa = NULL;
1161 struct pf_pool *pool = NULL;
1162 int s;
1163 int error = 0;
1164
1165 /* XXX keep in sync with switch() below */
1166 if (kauth_authorize_network(l->l_cred, KAUTH_NETWORK_FIREWALL,
1167 KAUTH_REQ_NETWORK_FIREWALL_FW, NULL, NULL, NULL))
1168 switch (cmd) {
1169 case DIOCGETRULES:
1170 case DIOCGETRULE:
1171 case DIOCGETADDRS:
1172 case DIOCGETADDR:
1173 case DIOCGETSTATE:
1174 case DIOCSETSTATUSIF:
1175 case DIOCGETSTATUS:
1176 case DIOCCLRSTATUS:
1177 case DIOCNATLOOK:
1178 case DIOCSETDEBUG:
1179 case DIOCGETSTATES:
1180 case DIOCGETTIMEOUT:
1181 case DIOCCLRRULECTRS:
1182 case DIOCGETLIMIT:
1183 case DIOCGETALTQS:
1184 case DIOCGETALTQ:
1185 case DIOCGETQSTATS:
1186 case DIOCGETRULESETS:
1187 case DIOCGETRULESET:
1188 case DIOCRGETTABLES:
1189 case DIOCRGETTSTATS:
1190 case DIOCRCLRTSTATS:
1191 case DIOCRCLRADDRS:
1192 case DIOCRADDADDRS:
1193 case DIOCRDELADDRS:
1194 case DIOCRSETADDRS:
1195 case DIOCRGETADDRS:
1196 case DIOCRGETASTATS:
1197 case DIOCRCLRASTATS:
1198 case DIOCRTSTADDRS:
1199 case DIOCOSFPGET:
1200 case DIOCGETSRCNODES:
1201 case DIOCCLRSRCNODES:
1202 case DIOCIGETIFACES:
1203 case DIOCICLRISTATS:
1204 case DIOCSETIFFLAG:
1205 case DIOCCLRIFFLAG:
1206 break;
1207 case DIOCRCLRTABLES:
1208 case DIOCRADDTABLES:
1209 case DIOCRDELTABLES:
1210 case DIOCRSETTFLAGS:
1211 if (((struct pfioc_table *)addr)->pfrio_flags &
1212 PFR_FLAG_DUMMY)
1213 break; /* dummy operation ok */
1214 return (EPERM);
1215 default:
1216 return (EPERM);
1217 }
1218
1219 if (!(flags & FWRITE))
1220 switch (cmd) {
1221 case DIOCGETRULES:
1222 case DIOCGETRULE:
1223 case DIOCGETADDRS:
1224 case DIOCGETADDR:
1225 case DIOCGETSTATE:
1226 case DIOCGETSTATUS:
1227 case DIOCGETSTATES:
1228 case DIOCGETTIMEOUT:
1229 case DIOCGETLIMIT:
1230 case DIOCGETALTQS:
1231 case DIOCGETALTQ:
1232 case DIOCGETQSTATS:
1233 case DIOCGETRULESETS:
1234 case DIOCGETRULESET:
1235 case DIOCRGETTABLES:
1236 case DIOCRGETTSTATS:
1237 case DIOCRGETADDRS:
1238 case DIOCRGETASTATS:
1239 case DIOCRTSTADDRS:
1240 case DIOCOSFPGET:
1241 case DIOCGETSRCNODES:
1242 case DIOCIGETIFACES:
1243 break;
1244 case DIOCRCLRTABLES:
1245 case DIOCRADDTABLES:
1246 case DIOCRDELTABLES:
1247 case DIOCRCLRTSTATS:
1248 case DIOCRCLRADDRS:
1249 case DIOCRADDADDRS:
1250 case DIOCRDELADDRS:
1251 case DIOCRSETADDRS:
1252 case DIOCRSETTFLAGS:
1253 if (((struct pfioc_table *)addr)->pfrio_flags &
1254 PFR_FLAG_DUMMY)
1255 break; /* dummy operation ok */
1256 return (EACCES);
1257 default:
1258 return (EACCES);
1259 }
1260
1261 s = splsoftnet();
1262 switch (cmd) {
1263
1264 case DIOCSTART:
1265 if (pf_status.running)
1266 error = EEXIST;
1267 else {
1268 #ifdef __NetBSD__
1269 error = pf_pfil_attach();
1270 if (error)
1271 break;
1272 #endif
1273 pf_status.running = 1;
1274 pf_status.since = time_second;
1275 if (pf_status.stateid == 0) {
1276 pf_status.stateid = time_second;
1277 pf_status.stateid = pf_status.stateid << 32;
1278 }
1279 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1280 }
1281 break;
1282
1283 case DIOCSTOP:
1284 if (!pf_status.running)
1285 error = ENOENT;
1286 else {
1287 #ifdef __NetBSD__
1288 error = pf_pfil_detach();
1289 if (error)
1290 break;
1291 #endif
1292 pf_status.running = 0;
1293 pf_status.since = time_second;
1294 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1295 }
1296 break;
1297
1298 case DIOCADDRULE: {
1299 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1300 struct pf_ruleset *ruleset;
1301 struct pf_rule *rule, *tail;
1302 struct pf_pooladdr *pa;
1303 int rs_num;
1304
1305 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1306 ruleset = pf_find_ruleset(pr->anchor);
1307 if (ruleset == NULL) {
1308 error = EINVAL;
1309 break;
1310 }
1311 rs_num = pf_get_ruleset_number(pr->rule.action);
1312 if (rs_num >= PF_RULESET_MAX) {
1313 error = EINVAL;
1314 break;
1315 }
1316 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1317 error = EINVAL;
1318 break;
1319 }
1320 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
1321 error = EBUSY;
1322 break;
1323 }
1324 if (pr->pool_ticket != ticket_pabuf) {
1325 error = EBUSY;
1326 break;
1327 }
1328 rule = pool_get(&pf_rule_pl, PR_NOWAIT);
1329 if (rule == NULL) {
1330 error = ENOMEM;
1331 break;
1332 }
1333 bcopy(&pr->rule, rule, sizeof(struct pf_rule));
1334 rule->anchor = NULL;
1335 rule->kif = NULL;
1336 TAILQ_INIT(&rule->rpool.list);
1337 /* initialize refcounting */
1338 rule->states = 0;
1339 rule->src_nodes = 0;
1340 rule->entries.tqe_prev = NULL;
1341 #ifndef INET
1342 if (rule->af == AF_INET) {
1343 pool_put(&pf_rule_pl, rule);
1344 error = EAFNOSUPPORT;
1345 break;
1346 }
1347 #endif /* INET */
1348 #ifndef INET6
1349 if (rule->af == AF_INET6) {
1350 pool_put(&pf_rule_pl, rule);
1351 error = EAFNOSUPPORT;
1352 break;
1353 }
1354 #endif /* INET6 */
1355 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
1356 pf_rulequeue);
1357 if (tail)
1358 rule->nr = tail->nr + 1;
1359 else
1360 rule->nr = 0;
1361 if (rule->ifname[0]) {
1362 rule->kif = pfi_attach_rule(rule->ifname);
1363 if (rule->kif == NULL) {
1364 pool_put(&pf_rule_pl, rule);
1365 error = EINVAL;
1366 break;
1367 }
1368 }
1369
1370 #ifdef ALTQ
1371 /* set queue IDs */
1372 if (rule->qname[0] != 0) {
1373 if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
1374 error = EBUSY;
1375 else if (rule->pqname[0] != 0) {
1376 if ((rule->pqid =
1377 pf_qname2qid(rule->pqname)) == 0)
1378 error = EBUSY;
1379 } else
1380 rule->pqid = rule->qid;
1381 }
1382 #endif
1383 if (rule->tagname[0])
1384 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
1385 error = EBUSY;
1386 if (rule->match_tagname[0])
1387 if ((rule->match_tag =
1388 pf_tagname2tag(rule->match_tagname)) == 0)
1389 error = EBUSY;
1390 if (rule->rt && !rule->direction)
1391 error = EINVAL;
1392 if (pf_rtlabel_add(&rule->src.addr) ||
1393 pf_rtlabel_add(&rule->dst.addr))
1394 error = EBUSY;
1395 if (pfi_dynaddr_setup(&rule->src.addr, rule->af))
1396 error = EINVAL;
1397 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af))
1398 error = EINVAL;
1399 if (pf_tbladdr_setup(ruleset, &rule->src.addr))
1400 error = EINVAL;
1401 if (pf_tbladdr_setup(ruleset, &rule->dst.addr))
1402 error = EINVAL;
1403 if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1404 error = EINVAL;
1405 TAILQ_FOREACH(pa, &pf_pabuf, entries)
1406 if (pf_tbladdr_setup(ruleset, &pa->addr))
1407 error = EINVAL;
1408
1409 if (rule->overload_tblname[0]) {
1410 if ((rule->overload_tbl = pfr_attach_table(ruleset,
1411 rule->overload_tblname)) == NULL)
1412 error = EINVAL;
1413 else
1414 rule->overload_tbl->pfrkt_flags |=
1415 PFR_TFLAG_ACTIVE;
1416 }
1417
1418 pf_mv_pool(&pf_pabuf, &rule->rpool.list);
1419 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
1420 (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
1421 (rule->rt > PF_FASTROUTE)) &&
1422 (TAILQ_FIRST(&rule->rpool.list) == NULL))
1423 error = EINVAL;
1424
1425 if (error) {
1426 pf_rm_rule(NULL, rule);
1427 break;
1428 }
1429 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
1430 rule->evaluations = rule->packets = rule->bytes = 0;
1431 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
1432 rule, entries);
1433 break;
1434 }
1435
1436 case DIOCGETRULES: {
1437 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1438 struct pf_ruleset *ruleset;
1439 struct pf_rule *tail;
1440 int rs_num;
1441
1442 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1443 ruleset = pf_find_ruleset(pr->anchor);
1444 if (ruleset == NULL) {
1445 error = EINVAL;
1446 break;
1447 }
1448 rs_num = pf_get_ruleset_number(pr->rule.action);
1449 if (rs_num >= PF_RULESET_MAX) {
1450 error = EINVAL;
1451 break;
1452 }
1453 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
1454 pf_rulequeue);
1455 if (tail)
1456 pr->nr = tail->nr + 1;
1457 else
1458 pr->nr = 0;
1459 pr->ticket = ruleset->rules[rs_num].active.ticket;
1460 break;
1461 }
1462
1463 case DIOCGETRULE: {
1464 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1465 struct pf_ruleset *ruleset;
1466 struct pf_rule *rule;
1467 int rs_num, i;
1468
1469 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1470 ruleset = pf_find_ruleset(pr->anchor);
1471 if (ruleset == NULL) {
1472 error = EINVAL;
1473 break;
1474 }
1475 rs_num = pf_get_ruleset_number(pr->rule.action);
1476 if (rs_num >= PF_RULESET_MAX) {
1477 error = EINVAL;
1478 break;
1479 }
1480 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
1481 error = EBUSY;
1482 break;
1483 }
1484 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
1485 while ((rule != NULL) && (rule->nr != pr->nr))
1486 rule = TAILQ_NEXT(rule, entries);
1487 if (rule == NULL) {
1488 error = EBUSY;
1489 break;
1490 }
1491 bcopy(rule, &pr->rule, sizeof(struct pf_rule));
1492 if (pf_anchor_copyout(ruleset, rule, pr)) {
1493 error = EBUSY;
1494 break;
1495 }
1496 pfi_dynaddr_copyout(&pr->rule.src.addr);
1497 pfi_dynaddr_copyout(&pr->rule.dst.addr);
1498 pf_tbladdr_copyout(&pr->rule.src.addr);
1499 pf_tbladdr_copyout(&pr->rule.dst.addr);
1500 pf_rtlabel_copyout(&pr->rule.src.addr);
1501 pf_rtlabel_copyout(&pr->rule.dst.addr);
1502 for (i = 0; i < PF_SKIP_COUNT; ++i)
1503 if (rule->skip[i].ptr == NULL)
1504 pr->rule.skip[i].nr = -1;
1505 else
1506 pr->rule.skip[i].nr =
1507 rule->skip[i].ptr->nr;
1508 break;
1509 }
1510
1511 case DIOCCHANGERULE: {
1512 struct pfioc_rule *pcr = (struct pfioc_rule *)addr;
1513 struct pf_ruleset *ruleset;
1514 struct pf_rule *oldrule = NULL, *newrule = NULL;
1515 u_int32_t nr = 0;
1516 int rs_num;
1517
1518 if (!(pcr->action == PF_CHANGE_REMOVE ||
1519 pcr->action == PF_CHANGE_GET_TICKET) &&
1520 pcr->pool_ticket != ticket_pabuf) {
1521 error = EBUSY;
1522 break;
1523 }
1524
1525 if (pcr->action < PF_CHANGE_ADD_HEAD ||
1526 pcr->action > PF_CHANGE_GET_TICKET) {
1527 error = EINVAL;
1528 break;
1529 }
1530 ruleset = pf_find_ruleset(pcr->anchor);
1531 if (ruleset == NULL) {
1532 error = EINVAL;
1533 break;
1534 }
1535 rs_num = pf_get_ruleset_number(pcr->rule.action);
1536 if (rs_num >= PF_RULESET_MAX) {
1537 error = EINVAL;
1538 break;
1539 }
1540
1541 if (pcr->action == PF_CHANGE_GET_TICKET) {
1542 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
1543 break;
1544 } else {
1545 if (pcr->ticket !=
1546 ruleset->rules[rs_num].active.ticket) {
1547 error = EINVAL;
1548 break;
1549 }
1550 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1551 error = EINVAL;
1552 break;
1553 }
1554 }
1555
1556 if (pcr->action != PF_CHANGE_REMOVE) {
1557 newrule = pool_get(&pf_rule_pl, PR_NOWAIT);
1558 if (newrule == NULL) {
1559 error = ENOMEM;
1560 break;
1561 }
1562 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule));
1563 TAILQ_INIT(&newrule->rpool.list);
1564 /* initialize refcounting */
1565 newrule->states = 0;
1566 newrule->entries.tqe_prev = NULL;
1567 #ifndef INET
1568 if (newrule->af == AF_INET) {
1569 pool_put(&pf_rule_pl, newrule);
1570 error = EAFNOSUPPORT;
1571 break;
1572 }
1573 #endif /* INET */
1574 #ifndef INET6
1575 if (newrule->af == AF_INET6) {
1576 pool_put(&pf_rule_pl, newrule);
1577 error = EAFNOSUPPORT;
1578 break;
1579 }
1580 #endif /* INET6 */
1581 if (newrule->ifname[0]) {
1582 newrule->kif = pfi_attach_rule(newrule->ifname);
1583 if (newrule->kif == NULL) {
1584 pool_put(&pf_rule_pl, newrule);
1585 error = EINVAL;
1586 break;
1587 }
1588 } else
1589 newrule->kif = NULL;
1590
1591 #ifdef ALTQ
1592 /* set queue IDs */
1593 if (newrule->qname[0] != 0) {
1594 if ((newrule->qid =
1595 pf_qname2qid(newrule->qname)) == 0)
1596 error = EBUSY;
1597 else if (newrule->pqname[0] != 0) {
1598 if ((newrule->pqid =
1599 pf_qname2qid(newrule->pqname)) == 0)
1600 error = EBUSY;
1601 } else
1602 newrule->pqid = newrule->qid;
1603 }
1604 #endif /* ALTQ */
1605 if (newrule->tagname[0])
1606 if ((newrule->tag =
1607 pf_tagname2tag(newrule->tagname)) == 0)
1608 error = EBUSY;
1609 if (newrule->match_tagname[0])
1610 if ((newrule->match_tag = pf_tagname2tag(
1611 newrule->match_tagname)) == 0)
1612 error = EBUSY;
1613 if (newrule->rt && !newrule->direction)
1614 error = EINVAL;
1615 if (pf_rtlabel_add(&newrule->src.addr) ||
1616 pf_rtlabel_add(&newrule->dst.addr))
1617 error = EBUSY;
1618 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af))
1619 error = EINVAL;
1620 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af))
1621 error = EINVAL;
1622 if (pf_tbladdr_setup(ruleset, &newrule->src.addr))
1623 error = EINVAL;
1624 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr))
1625 error = EINVAL;
1626 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1627 error = EINVAL;
1628
1629 if (newrule->overload_tblname[0]) {
1630 if ((newrule->overload_tbl = pfr_attach_table(
1631 ruleset, newrule->overload_tblname)) ==
1632 NULL)
1633 error = EINVAL;
1634 else
1635 newrule->overload_tbl->pfrkt_flags |=
1636 PFR_TFLAG_ACTIVE;
1637 }
1638
1639 pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
1640 if (((((newrule->action == PF_NAT) ||
1641 (newrule->action == PF_RDR) ||
1642 (newrule->action == PF_BINAT) ||
1643 (newrule->rt > PF_FASTROUTE)) &&
1644 !pcr->anchor[0])) &&
1645 (TAILQ_FIRST(&newrule->rpool.list) == NULL))
1646 error = EINVAL;
1647
1648 if (error) {
1649 pf_rm_rule(NULL, newrule);
1650 break;
1651 }
1652 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
1653 newrule->evaluations = newrule->packets = 0;
1654 newrule->bytes = 0;
1655 }
1656 pf_empty_pool(&pf_pabuf);
1657
1658 if (pcr->action == PF_CHANGE_ADD_HEAD)
1659 oldrule = TAILQ_FIRST(
1660 ruleset->rules[rs_num].active.ptr);
1661 else if (pcr->action == PF_CHANGE_ADD_TAIL)
1662 oldrule = TAILQ_LAST(
1663 ruleset->rules[rs_num].active.ptr, pf_rulequeue);
1664 else {
1665 oldrule = TAILQ_FIRST(
1666 ruleset->rules[rs_num].active.ptr);
1667 while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1668 oldrule = TAILQ_NEXT(oldrule, entries);
1669 if (oldrule == NULL) {
1670 if (newrule != NULL)
1671 pf_rm_rule(NULL, newrule);
1672 error = EINVAL;
1673 break;
1674 }
1675 }
1676
1677 if (pcr->action == PF_CHANGE_REMOVE)
1678 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
1679 else {
1680 if (oldrule == NULL)
1681 TAILQ_INSERT_TAIL(
1682 ruleset->rules[rs_num].active.ptr,
1683 newrule, entries);
1684 else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1685 pcr->action == PF_CHANGE_ADD_BEFORE)
1686 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1687 else
1688 TAILQ_INSERT_AFTER(
1689 ruleset->rules[rs_num].active.ptr,
1690 oldrule, newrule, entries);
1691 }
1692
1693 nr = 0;
1694 TAILQ_FOREACH(oldrule,
1695 ruleset->rules[rs_num].active.ptr, entries)
1696 oldrule->nr = nr++;
1697
1698 ruleset->rules[rs_num].active.ticket++;
1699
1700 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
1701 pf_remove_if_empty_ruleset(ruleset);
1702
1703 break;
1704 }
1705
1706 case DIOCCLRSTATES: {
1707 struct pf_state *state;
1708 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1709 int killed = 0;
1710
1711 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1712 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1713 state->u.s.kif->pfik_name)) {
1714 state->timeout = PFTM_PURGE;
1715 #if NPFSYNC
1716 /* don't send out individual delete messages */
1717 state->sync_flags = PFSTATE_NOSYNC;
1718 #endif
1719 killed++;
1720 }
1721 }
1722 pf_purge_expired_states();
1723 pf_status.states = 0;
1724 psk->psk_af = killed;
1725 #if NPFSYNC
1726 pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
1727 #endif
1728 break;
1729 }
1730
1731 case DIOCKILLSTATES: {
1732 struct pf_state *state;
1733 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1734 int killed = 0;
1735
1736 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1737 if ((!psk->psk_af || state->af == psk->psk_af)
1738 && (!psk->psk_proto || psk->psk_proto ==
1739 state->proto) &&
1740 PF_MATCHA(psk->psk_src.neg,
1741 &psk->psk_src.addr.v.a.addr,
1742 &psk->psk_src.addr.v.a.mask,
1743 &state->lan.addr, state->af) &&
1744 PF_MATCHA(psk->psk_dst.neg,
1745 &psk->psk_dst.addr.v.a.addr,
1746 &psk->psk_dst.addr.v.a.mask,
1747 &state->ext.addr, state->af) &&
1748 (psk->psk_src.port_op == 0 ||
1749 pf_match_port(psk->psk_src.port_op,
1750 psk->psk_src.port[0], psk->psk_src.port[1],
1751 state->lan.port)) &&
1752 (psk->psk_dst.port_op == 0 ||
1753 pf_match_port(psk->psk_dst.port_op,
1754 psk->psk_dst.port[0], psk->psk_dst.port[1],
1755 state->ext.port)) &&
1756 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1757 state->u.s.kif->pfik_name))) {
1758 state->timeout = PFTM_PURGE;
1759 killed++;
1760 }
1761 }
1762 pf_purge_expired_states();
1763 psk->psk_af = killed;
1764 break;
1765 }
1766
1767 case DIOCADDSTATE: {
1768 struct pfioc_state *ps = (struct pfioc_state *)addr;
1769 struct pf_state *state;
1770 struct pfi_kif *kif;
1771
1772 if (ps->state.timeout >= PFTM_MAX &&
1773 ps->state.timeout != PFTM_UNTIL_PACKET) {
1774 error = EINVAL;
1775 break;
1776 }
1777 state = pool_get(&pf_state_pl, PR_NOWAIT);
1778 if (state == NULL) {
1779 error = ENOMEM;
1780 break;
1781 }
1782 kif = pfi_lookup_create(ps->state.u.ifname);
1783 if (kif == NULL) {
1784 pool_put(&pf_state_pl, state);
1785 error = ENOENT;
1786 break;
1787 }
1788 bcopy(&ps->state, state, sizeof(struct pf_state));
1789 bzero(&state->u, sizeof(state->u));
1790 state->rule.ptr = &pf_default_rule;
1791 state->nat_rule.ptr = NULL;
1792 state->anchor.ptr = NULL;
1793 state->rt_kif = NULL;
1794 state->creation = time_second;
1795 state->pfsync_time = 0;
1796 state->packets[0] = state->packets[1] = 0;
1797 state->bytes[0] = state->bytes[1] = 0;
1798
1799 if (pf_insert_state(kif, state)) {
1800 pfi_maybe_destroy(kif);
1801 pool_put(&pf_state_pl, state);
1802 error = ENOMEM;
1803 }
1804 break;
1805 }
1806
1807 case DIOCGETSTATE: {
1808 struct pfioc_state *ps = (struct pfioc_state *)addr;
1809 struct pf_state *state;
1810 u_int32_t nr;
1811
1812 nr = 0;
1813 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1814 if (nr >= ps->nr)
1815 break;
1816 nr++;
1817 }
1818 if (state == NULL) {
1819 error = EBUSY;
1820 break;
1821 }
1822 bcopy(state, &ps->state, sizeof(struct pf_state));
1823 ps->state.rule.nr = state->rule.ptr->nr;
1824 ps->state.nat_rule.nr = (state->nat_rule.ptr == NULL) ?
1825 -1 : state->nat_rule.ptr->nr;
1826 ps->state.anchor.nr = (state->anchor.ptr == NULL) ?
1827 -1 : state->anchor.ptr->nr;
1828 ps->state.expire = pf_state_expires(state);
1829 if (ps->state.expire > time_second)
1830 ps->state.expire -= time_second;
1831 else
1832 ps->state.expire = 0;
1833 break;
1834 }
1835
1836 case DIOCGETSTATES: {
1837 struct pfioc_states *ps = (struct pfioc_states *)addr;
1838 struct pf_state *state;
1839 struct pf_state *p, pstore;
1840 struct pfi_kif *kif;
1841 u_int32_t nr = 0;
1842 int space = ps->ps_len;
1843
1844 if (space == 0) {
1845 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states)
1846 nr += kif->pfik_states;
1847 ps->ps_len = sizeof(struct pf_state) * nr;
1848 break;
1849 }
1850
1851 p = ps->ps_states;
1852 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states)
1853 RB_FOREACH(state, pf_state_tree_ext_gwy,
1854 &kif->pfik_ext_gwy) {
1855 int secs = time_second;
1856
1857 if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len)
1858 break;
1859
1860 bcopy(state, &pstore, sizeof(pstore));
1861 strlcpy(pstore.u.ifname, kif->pfik_name,
1862 sizeof(pstore.u.ifname));
1863 pstore.rule.nr = state->rule.ptr->nr;
1864 pstore.nat_rule.nr = (state->nat_rule.ptr ==
1865 NULL) ? -1 : state->nat_rule.ptr->nr;
1866 pstore.anchor.nr = (state->anchor.ptr ==
1867 NULL) ? -1 : state->anchor.ptr->nr;
1868 pstore.creation = secs - pstore.creation;
1869 pstore.expire = pf_state_expires(state);
1870 if (pstore.expire > secs)
1871 pstore.expire -= secs;
1872 else
1873 pstore.expire = 0;
1874 error = copyout(&pstore, p, sizeof(*p));
1875 if (error)
1876 goto fail;
1877 p++;
1878 nr++;
1879 }
1880 ps->ps_len = sizeof(struct pf_state) * nr;
1881 break;
1882 }
1883
1884 case DIOCGETSTATUS: {
1885 struct pf_status *s = (struct pf_status *)addr;
1886 bcopy(&pf_status, s, sizeof(struct pf_status));
1887 pfi_fill_oldstatus(s);
1888 break;
1889 }
1890
1891 case DIOCSETSTATUSIF: {
1892 struct pfioc_if *pi = (struct pfioc_if *)addr;
1893
1894 if (pi->ifname[0] == 0) {
1895 bzero(pf_status.ifname, IFNAMSIZ);
1896 break;
1897 }
1898 if (ifunit(pi->ifname) == NULL) {
1899 error = EINVAL;
1900 break;
1901 }
1902 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
1903 break;
1904 }
1905
1906 case DIOCCLRSTATUS: {
1907 bzero(pf_status.counters, sizeof(pf_status.counters));
1908 bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
1909 bzero(pf_status.scounters, sizeof(pf_status.scounters));
1910 if (*pf_status.ifname)
1911 pfi_clr_istats(pf_status.ifname, NULL,
1912 PFI_FLAG_INSTANCE);
1913 break;
1914 }
1915
1916 case DIOCNATLOOK: {
1917 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr;
1918 struct pf_state *state;
1919 struct pf_state key;
1920 int m = 0, direction = pnl->direction;
1921
1922 key.af = pnl->af;
1923 key.proto = pnl->proto;
1924
1925 if (!pnl->proto ||
1926 PF_AZERO(&pnl->saddr, pnl->af) ||
1927 PF_AZERO(&pnl->daddr, pnl->af) ||
1928 !pnl->dport || !pnl->sport)
1929 error = EINVAL;
1930 else {
1931 /*
1932 * userland gives us source and dest of connection,
1933 * reverse the lookup so we ask for what happens with
1934 * the return traffic, enabling us to find it in the
1935 * state tree.
1936 */
1937 if (direction == PF_IN) {
1938 PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af);
1939 key.ext.port = pnl->dport;
1940 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af);
1941 key.gwy.port = pnl->sport;
1942 state = pf_find_state_all(&key, PF_EXT_GWY, &m);
1943 } else {
1944 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af);
1945 key.lan.port = pnl->dport;
1946 PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af);
1947 key.ext.port = pnl->sport;
1948 state = pf_find_state_all(&key, PF_LAN_EXT, &m);
1949 }
1950 if (m > 1)
1951 error = E2BIG; /* more than one state */
1952 else if (state != NULL) {
1953 if (direction == PF_IN) {
1954 PF_ACPY(&pnl->rsaddr, &state->lan.addr,
1955 state->af);
1956 pnl->rsport = state->lan.port;
1957 PF_ACPY(&pnl->rdaddr, &pnl->daddr,
1958 pnl->af);
1959 pnl->rdport = pnl->dport;
1960 } else {
1961 PF_ACPY(&pnl->rdaddr, &state->gwy.addr,
1962 state->af);
1963 pnl->rdport = state->gwy.port;
1964 PF_ACPY(&pnl->rsaddr, &pnl->saddr,
1965 pnl->af);
1966 pnl->rsport = pnl->sport;
1967 }
1968 } else
1969 error = ENOENT;
1970 }
1971 break;
1972 }
1973
1974 case DIOCSETTIMEOUT: {
1975 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1976 int old;
1977
1978 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
1979 pt->seconds < 0) {
1980 error = EINVAL;
1981 goto fail;
1982 }
1983 old = pf_default_rule.timeout[pt->timeout];
1984 pf_default_rule.timeout[pt->timeout] = pt->seconds;
1985 pt->seconds = old;
1986 break;
1987 }
1988
1989 case DIOCGETTIMEOUT: {
1990 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1991
1992 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
1993 error = EINVAL;
1994 goto fail;
1995 }
1996 pt->seconds = pf_default_rule.timeout[pt->timeout];
1997 break;
1998 }
1999
2000 case DIOCGETLIMIT: {
2001 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
2002
2003 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
2004 error = EINVAL;
2005 goto fail;
2006 }
2007 pl->limit = pf_pool_limits[pl->index].limit;
2008 break;
2009 }
2010
2011 case DIOCSETLIMIT: {
2012 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
2013 int old_limit;
2014
2015 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
2016 pf_pool_limits[pl->index].pp == NULL) {
2017 error = EINVAL;
2018 goto fail;
2019 }
2020 #ifdef __OpenBSD__
2021 if (pool_sethardlimit(pf_pool_limits[pl->index].pp,
2022 pl->limit, NULL, 0) != 0) {
2023 error = EBUSY;
2024 goto fail;
2025 }
2026 #else
2027 pool_sethardlimit(pf_pool_limits[pl->index].pp,
2028 pl->limit, NULL, 0);
2029 #endif
2030 old_limit = pf_pool_limits[pl->index].limit;
2031 pf_pool_limits[pl->index].limit = pl->limit;
2032 pl->limit = old_limit;
2033 break;
2034 }
2035
2036 case DIOCSETDEBUG: {
2037 u_int32_t *level = (u_int32_t *)addr;
2038
2039 pf_status.debug = *level;
2040 break;
2041 }
2042
2043 case DIOCCLRRULECTRS: {
2044 struct pf_ruleset *ruleset = &pf_main_ruleset;
2045 struct pf_rule *rule;
2046
2047 TAILQ_FOREACH(rule,
2048 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries)
2049 rule->evaluations = rule->packets =
2050 rule->bytes = 0;
2051 break;
2052 }
2053
2054 #ifdef ALTQ
2055 case DIOCSTARTALTQ: {
2056 struct pf_altq *altq;
2057
2058 /* enable all altq interfaces on active list */
2059 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
2060 if (altq->qname[0] == 0) {
2061 error = pf_enable_altq(altq);
2062 if (error != 0)
2063 break;
2064 }
2065 }
2066 if (error == 0)
2067 pf_altq_running = 1;
2068 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
2069 break;
2070 }
2071
2072 case DIOCSTOPALTQ: {
2073 struct pf_altq *altq;
2074
2075 /* disable all altq interfaces on active list */
2076 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
2077 if (altq->qname[0] == 0) {
2078 error = pf_disable_altq(altq);
2079 if (error != 0)
2080 break;
2081 }
2082 }
2083 if (error == 0)
2084 pf_altq_running = 0;
2085 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
2086 break;
2087 }
2088
2089 case DIOCADDALTQ: {
2090 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2091 struct pf_altq *altq, *a;
2092
2093 if (pa->ticket != ticket_altqs_inactive) {
2094 error = EBUSY;
2095 break;
2096 }
2097 altq = pool_get(&pf_altq_pl, PR_NOWAIT);
2098 if (altq == NULL) {
2099 error = ENOMEM;
2100 break;
2101 }
2102 bcopy(&pa->altq, altq, sizeof(struct pf_altq));
2103
2104 /*
2105 * if this is for a queue, find the discipline and
2106 * copy the necessary fields
2107 */
2108 if (altq->qname[0] != 0) {
2109 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
2110 error = EBUSY;
2111 pool_put(&pf_altq_pl, altq);
2112 break;
2113 }
2114 TAILQ_FOREACH(a, pf_altqs_inactive, entries) {
2115 if (strncmp(a->ifname, altq->ifname,
2116 IFNAMSIZ) == 0 && a->qname[0] == 0) {
2117 altq->altq_disc = a->altq_disc;
2118 break;
2119 }
2120 }
2121 }
2122
2123 error = altq_add(altq);
2124 if (error) {
2125 pool_put(&pf_altq_pl, altq);
2126 break;
2127 }
2128
2129 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries);
2130 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2131 break;
2132 }
2133
2134 case DIOCGETALTQS: {
2135 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2136 struct pf_altq *altq;
2137
2138 pa->nr = 0;
2139 TAILQ_FOREACH(altq, pf_altqs_active, entries)
2140 pa->nr++;
2141 pa->ticket = ticket_altqs_active;
2142 break;
2143 }
2144
2145 case DIOCGETALTQ: {
2146 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2147 struct pf_altq *altq;
2148 u_int32_t nr;
2149
2150 if (pa->ticket != ticket_altqs_active) {
2151 error = EBUSY;
2152 break;
2153 }
2154 nr = 0;
2155 altq = TAILQ_FIRST(pf_altqs_active);
2156 while ((altq != NULL) && (nr < pa->nr)) {
2157 altq = TAILQ_NEXT(altq, entries);
2158 nr++;
2159 }
2160 if (altq == NULL) {
2161 error = EBUSY;
2162 break;
2163 }
2164 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2165 break;
2166 }
2167
2168 case DIOCCHANGEALTQ:
2169 /* CHANGEALTQ not supported yet! */
2170 error = ENODEV;
2171 break;
2172
2173 case DIOCGETQSTATS: {
2174 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr;
2175 struct pf_altq *altq;
2176 u_int32_t nr;
2177 int nbytes;
2178
2179 if (pq->ticket != ticket_altqs_active) {
2180 error = EBUSY;
2181 break;
2182 }
2183 nbytes = pq->nbytes;
2184 nr = 0;
2185 altq = TAILQ_FIRST(pf_altqs_active);
2186 while ((altq != NULL) && (nr < pq->nr)) {
2187 altq = TAILQ_NEXT(altq, entries);
2188 nr++;
2189 }
2190 if (altq == NULL) {
2191 error = EBUSY;
2192 break;
2193 }
2194 error = altq_getqstats(altq, pq->buf, &nbytes);
2195 if (error == 0) {
2196 pq->scheduler = altq->scheduler;
2197 pq->nbytes = nbytes;
2198 }
2199 break;
2200 }
2201 #endif /* ALTQ */
2202
2203 case DIOCBEGINADDRS: {
2204 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2205
2206 pf_empty_pool(&pf_pabuf);
2207 pp->ticket = ++ticket_pabuf;
2208 break;
2209 }
2210
2211 case DIOCADDADDR: {
2212 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2213
2214 #ifndef INET
2215 if (pp->af == AF_INET) {
2216 error = EAFNOSUPPORT;
2217 break;
2218 }
2219 #endif /* INET */
2220 #ifndef INET6
2221 if (pp->af == AF_INET6) {
2222 error = EAFNOSUPPORT;
2223 break;
2224 }
2225 #endif /* INET6 */
2226 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2227 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2228 pp->addr.addr.type != PF_ADDR_TABLE) {
2229 error = EINVAL;
2230 break;
2231 }
2232 pa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
2233 if (pa == NULL) {
2234 error = ENOMEM;
2235 break;
2236 }
2237 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr));
2238 if (pa->ifname[0]) {
2239 pa->kif = pfi_attach_rule(pa->ifname);
2240 if (pa->kif == NULL) {
2241 pool_put(&pf_pooladdr_pl, pa);
2242 error = EINVAL;
2243 break;
2244 }
2245 }
2246 if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
2247 pfi_dynaddr_remove(&pa->addr);
2248 pfi_detach_rule(pa->kif);
2249 pool_put(&pf_pooladdr_pl, pa);
2250 error = EINVAL;
2251 break;
2252 }
2253 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
2254 break;
2255 }
2256
2257 case DIOCGETADDRS: {
2258 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2259
2260 pp->nr = 0;
2261 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2262 pp->r_num, 0, 1, 0);
2263 if (pool == NULL) {
2264 error = EBUSY;
2265 break;
2266 }
2267 TAILQ_FOREACH(pa, &pool->list, entries)
2268 pp->nr++;
2269 break;
2270 }
2271
2272 case DIOCGETADDR: {
2273 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2274 u_int32_t nr = 0;
2275
2276 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2277 pp->r_num, 0, 1, 1);
2278 if (pool == NULL) {
2279 error = EBUSY;
2280 break;
2281 }
2282 pa = TAILQ_FIRST(&pool->list);
2283 while ((pa != NULL) && (nr < pp->nr)) {
2284 pa = TAILQ_NEXT(pa, entries);
2285 nr++;
2286 }
2287 if (pa == NULL) {
2288 error = EBUSY;
2289 break;
2290 }
2291 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr));
2292 pfi_dynaddr_copyout(&pp->addr.addr);
2293 pf_tbladdr_copyout(&pp->addr.addr);
2294 pf_rtlabel_copyout(&pp->addr.addr);
2295 break;
2296 }
2297
2298 case DIOCCHANGEADDR: {
2299 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr;
2300 struct pf_pooladdr *oldpa = NULL, *newpa = NULL;
2301 struct pf_ruleset *ruleset;
2302
2303 if (pca->action < PF_CHANGE_ADD_HEAD ||
2304 pca->action > PF_CHANGE_REMOVE) {
2305 error = EINVAL;
2306 break;
2307 }
2308 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
2309 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
2310 pca->addr.addr.type != PF_ADDR_TABLE) {
2311 error = EINVAL;
2312 break;
2313 }
2314
2315 ruleset = pf_find_ruleset(pca->anchor);
2316 if (ruleset == NULL) {
2317 error = EBUSY;
2318 break;
2319 }
2320 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
2321 pca->r_num, pca->r_last, 1, 1);
2322 if (pool == NULL) {
2323 error = EBUSY;
2324 break;
2325 }
2326 if (pca->action != PF_CHANGE_REMOVE) {
2327 newpa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
2328 if (newpa == NULL) {
2329 error = ENOMEM;
2330 break;
2331 }
2332 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
2333 #ifndef INET
2334 if (pca->af == AF_INET) {
2335 pool_put(&pf_pooladdr_pl, newpa);
2336 error = EAFNOSUPPORT;
2337 break;
2338 }
2339 #endif /* INET */
2340 #ifndef INET6
2341 if (pca->af == AF_INET6) {
2342 pool_put(&pf_pooladdr_pl, newpa);
2343 error = EAFNOSUPPORT;
2344 break;
2345 }
2346 #endif /* INET6 */
2347 if (newpa->ifname[0]) {
2348 newpa->kif = pfi_attach_rule(newpa->ifname);
2349 if (newpa->kif == NULL) {
2350 pool_put(&pf_pooladdr_pl, newpa);
2351 error = EINVAL;
2352 break;
2353 }
2354 } else
2355 newpa->kif = NULL;
2356 if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
2357 pf_tbladdr_setup(ruleset, &newpa->addr)) {
2358 pfi_dynaddr_remove(&newpa->addr);
2359 pfi_detach_rule(newpa->kif);
2360 pool_put(&pf_pooladdr_pl, newpa);
2361 error = EINVAL;
2362 break;
2363 }
2364 }
2365
2366 if (pca->action == PF_CHANGE_ADD_HEAD)
2367 oldpa = TAILQ_FIRST(&pool->list);
2368 else if (pca->action == PF_CHANGE_ADD_TAIL)
2369 oldpa = TAILQ_LAST(&pool->list, pf_palist);
2370 else {
2371 int i = 0;
2372
2373 oldpa = TAILQ_FIRST(&pool->list);
2374 while ((oldpa != NULL) && (i < pca->nr)) {
2375 oldpa = TAILQ_NEXT(oldpa, entries);
2376 i++;
2377 }
2378 if (oldpa == NULL) {
2379 error = EINVAL;
2380 break;
2381 }
2382 }
2383
2384 if (pca->action == PF_CHANGE_REMOVE) {
2385 TAILQ_REMOVE(&pool->list, oldpa, entries);
2386 pfi_dynaddr_remove(&oldpa->addr);
2387 pf_tbladdr_remove(&oldpa->addr);
2388 pfi_detach_rule(oldpa->kif);
2389 pool_put(&pf_pooladdr_pl, oldpa);
2390 } else {
2391 if (oldpa == NULL)
2392 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
2393 else if (pca->action == PF_CHANGE_ADD_HEAD ||
2394 pca->action == PF_CHANGE_ADD_BEFORE)
2395 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
2396 else
2397 TAILQ_INSERT_AFTER(&pool->list, oldpa,
2398 newpa, entries);
2399 }
2400
2401 pool->cur = TAILQ_FIRST(&pool->list);
2402 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
2403 pca->af);
2404 break;
2405 }
2406
2407 case DIOCGETRULESETS: {
2408 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2409 struct pf_ruleset *ruleset;
2410 struct pf_anchor *anchor;
2411
2412 pr->path[sizeof(pr->path) - 1] = 0;
2413 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2414 error = EINVAL;
2415 break;
2416 }
2417 pr->nr = 0;
2418 if (ruleset->anchor == NULL) {
2419 /* XXX kludge for pf_main_ruleset */
2420 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2421 if (anchor->parent == NULL)
2422 pr->nr++;
2423 } else {
2424 RB_FOREACH(anchor, pf_anchor_node,
2425 &ruleset->anchor->children)
2426 pr->nr++;
2427 }
2428 break;
2429 }
2430
2431 case DIOCGETRULESET: {
2432 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2433 struct pf_ruleset *ruleset;
2434 struct pf_anchor *anchor;
2435 u_int32_t nr = 0;
2436
2437 pr->path[sizeof(pr->path) - 1] = 0;
2438 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2439 error = EINVAL;
2440 break;
2441 }
2442 pr->name[0] = 0;
2443 if (ruleset->anchor == NULL) {
2444 /* XXX kludge for pf_main_ruleset */
2445 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2446 if (anchor->parent == NULL && nr++ == pr->nr) {
2447 strlcpy(pr->name, anchor->name,
2448 sizeof(pr->name));
2449 break;
2450 }
2451 } else {
2452 RB_FOREACH(anchor, pf_anchor_node,
2453 &ruleset->anchor->children)
2454 if (nr++ == pr->nr) {
2455 strlcpy(pr->name, anchor->name,
2456 sizeof(pr->name));
2457 break;
2458 }
2459 }
2460 if (!pr->name[0])
2461 error = EBUSY;
2462 break;
2463 }
2464
2465 case DIOCRCLRTABLES: {
2466 struct pfioc_table *io = (struct pfioc_table *)addr;
2467
2468 if (io->pfrio_esize != 0) {
2469 error = ENODEV;
2470 break;
2471 }
2472 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
2473 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2474 break;
2475 }
2476
2477 case DIOCRADDTABLES: {
2478 struct pfioc_table *io = (struct pfioc_table *)addr;
2479
2480 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2481 error = ENODEV;
2482 break;
2483 }
2484 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
2485 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2486 break;
2487 }
2488
2489 case DIOCRDELTABLES: {
2490 struct pfioc_table *io = (struct pfioc_table *)addr;
2491
2492 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2493 error = ENODEV;
2494 break;
2495 }
2496 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
2497 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2498 break;
2499 }
2500
2501 case DIOCRGETTABLES: {
2502 struct pfioc_table *io = (struct pfioc_table *)addr;
2503
2504 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2505 error = ENODEV;
2506 break;
2507 }
2508 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
2509 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2510 break;
2511 }
2512
2513 case DIOCRGETTSTATS: {
2514 struct pfioc_table *io = (struct pfioc_table *)addr;
2515
2516 if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
2517 error = ENODEV;
2518 break;
2519 }
2520 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
2521 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2522 break;
2523 }
2524
2525 case DIOCRCLRTSTATS: {
2526 struct pfioc_table *io = (struct pfioc_table *)addr;
2527
2528 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2529 error = ENODEV;
2530 break;
2531 }
2532 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
2533 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2534 break;
2535 }
2536
2537 case DIOCRSETTFLAGS: {
2538 struct pfioc_table *io = (struct pfioc_table *)addr;
2539
2540 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2541 error = ENODEV;
2542 break;
2543 }
2544 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
2545 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
2546 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2547 break;
2548 }
2549
2550 case DIOCRCLRADDRS: {
2551 struct pfioc_table *io = (struct pfioc_table *)addr;
2552
2553 if (io->pfrio_esize != 0) {
2554 error = ENODEV;
2555 break;
2556 }
2557 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
2558 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2559 break;
2560 }
2561
2562 case DIOCRADDADDRS: {
2563 struct pfioc_table *io = (struct pfioc_table *)addr;
2564
2565 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2566 error = ENODEV;
2567 break;
2568 }
2569 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
2570 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
2571 PFR_FLAG_USERIOCTL);
2572 break;
2573 }
2574
2575 case DIOCRDELADDRS: {
2576 struct pfioc_table *io = (struct pfioc_table *)addr;
2577
2578 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2579 error = ENODEV;
2580 break;
2581 }
2582 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
2583 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
2584 PFR_FLAG_USERIOCTL);
2585 break;
2586 }
2587
2588 case DIOCRSETADDRS: {
2589 struct pfioc_table *io = (struct pfioc_table *)addr;
2590
2591 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2592 error = ENODEV;
2593 break;
2594 }
2595 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
2596 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
2597 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
2598 PFR_FLAG_USERIOCTL);
2599 break;
2600 }
2601
2602 case DIOCRGETADDRS: {
2603 struct pfioc_table *io = (struct pfioc_table *)addr;
2604
2605 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2606 error = ENODEV;
2607 break;
2608 }
2609 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
2610 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2611 break;
2612 }
2613
2614 case DIOCRGETASTATS: {
2615 struct pfioc_table *io = (struct pfioc_table *)addr;
2616
2617 if (io->pfrio_esize != sizeof(struct pfr_astats)) {
2618 error = ENODEV;
2619 break;
2620 }
2621 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
2622 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2623 break;
2624 }
2625
2626 case DIOCRCLRASTATS: {
2627 struct pfioc_table *io = (struct pfioc_table *)addr;
2628
2629 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2630 error = ENODEV;
2631 break;
2632 }
2633 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
2634 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
2635 PFR_FLAG_USERIOCTL);
2636 break;
2637 }
2638
2639 case DIOCRTSTADDRS: {
2640 struct pfioc_table *io = (struct pfioc_table *)addr;
2641
2642 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2643 error = ENODEV;
2644 break;
2645 }
2646 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
2647 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
2648 PFR_FLAG_USERIOCTL);
2649 break;
2650 }
2651
2652 case DIOCRINADEFINE: {
2653 struct pfioc_table *io = (struct pfioc_table *)addr;
2654
2655 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2656 error = ENODEV;
2657 break;
2658 }
2659 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
2660 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
2661 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2662 break;
2663 }
2664
2665 case DIOCOSFPADD: {
2666 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2667 error = pf_osfp_add(io);
2668 break;
2669 }
2670
2671 case DIOCOSFPGET: {
2672 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2673 error = pf_osfp_get(io);
2674 break;
2675 }
2676
2677 case DIOCXBEGIN: {
2678 struct pfioc_trans *io = (struct pfioc_trans *)
2679 addr;
2680 static struct pfioc_trans_e ioe;
2681 static struct pfr_table table;
2682 int i;
2683
2684 if (io->esize != sizeof(ioe)) {
2685 error = ENODEV;
2686 goto fail;
2687 }
2688 for (i = 0; i < io->size; i++) {
2689 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2690 error = EFAULT;
2691 goto fail;
2692 }
2693 switch (ioe.rs_num) {
2694 #ifdef ALTQ
2695 case PF_RULESET_ALTQ:
2696 if (ioe.anchor[0]) {
2697 error = EINVAL;
2698 goto fail;
2699 }
2700 if ((error = pf_begin_altq(&ioe.ticket)))
2701 goto fail;
2702 break;
2703 #endif /* ALTQ */
2704 case PF_RULESET_TABLE:
2705 bzero(&table, sizeof(table));
2706 strlcpy(table.pfrt_anchor, ioe.anchor,
2707 sizeof(table.pfrt_anchor));
2708 if ((error = pfr_ina_begin(&table,
2709 &ioe.ticket, NULL, 0)))
2710 goto fail;
2711 break;
2712 default:
2713 if ((error = pf_begin_rules(&ioe.ticket,
2714 ioe.rs_num, ioe.anchor)))
2715 goto fail;
2716 break;
2717 }
2718 if (copyout(&ioe, io->array+i, sizeof(io->array[i]))) {
2719 error = EFAULT;
2720 goto fail;
2721 }
2722 }
2723 break;
2724 }
2725
2726 case DIOCXROLLBACK: {
2727 struct pfioc_trans *io = (struct pfioc_trans *)
2728 addr;
2729 static struct pfioc_trans_e ioe;
2730 static struct pfr_table table;
2731 int i;
2732
2733 if (io->esize != sizeof(ioe)) {
2734 error = ENODEV;
2735 goto fail;
2736 }
2737 for (i = 0; i < io->size; i++) {
2738 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2739 error = EFAULT;
2740 goto fail;
2741 }
2742 switch (ioe.rs_num) {
2743 #ifdef ALTQ
2744 case PF_RULESET_ALTQ:
2745 if (ioe.anchor[0]) {
2746 error = EINVAL;
2747 goto fail;
2748 }
2749 if ((error = pf_rollback_altq(ioe.ticket)))
2750 goto fail; /* really bad */
2751 break;
2752 #endif /* ALTQ */
2753 case PF_RULESET_TABLE:
2754 bzero(&table, sizeof(table));
2755 strlcpy(table.pfrt_anchor, ioe.anchor,
2756 sizeof(table.pfrt_anchor));
2757 if ((error = pfr_ina_rollback(&table,
2758 ioe.ticket, NULL, 0)))
2759 goto fail; /* really bad */
2760 break;
2761 default:
2762 if ((error = pf_rollback_rules(ioe.ticket,
2763 ioe.rs_num, ioe.anchor)))
2764 goto fail; /* really bad */
2765 break;
2766 }
2767 }
2768 break;
2769 }
2770
2771 case DIOCXCOMMIT: {
2772 struct pfioc_trans *io = (struct pfioc_trans *)
2773 addr;
2774 static struct pfioc_trans_e ioe;
2775 static struct pfr_table table;
2776 struct pf_ruleset *rs;
2777 int i;
2778
2779 if (io->esize != sizeof(ioe)) {
2780 error = ENODEV;
2781 goto fail;
2782 }
2783 /* first makes sure everything will succeed */
2784 for (i = 0; i < io->size; i++) {
2785 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2786 error = EFAULT;
2787 goto fail;
2788 }
2789 switch (ioe.rs_num) {
2790 #ifdef ALTQ
2791 case PF_RULESET_ALTQ:
2792 if (ioe.anchor[0]) {
2793 error = EINVAL;
2794 goto fail;
2795 }
2796 if (!altqs_inactive_open || ioe.ticket !=
2797 ticket_altqs_inactive) {
2798 error = EBUSY;
2799 goto fail;
2800 }
2801 break;
2802 #endif /* ALTQ */
2803 case PF_RULESET_TABLE:
2804 rs = pf_find_ruleset(ioe.anchor);
2805 if (rs == NULL || !rs->topen || ioe.ticket !=
2806 rs->tticket) {
2807 error = EBUSY;
2808 goto fail;
2809 }
2810 break;
2811 default:
2812 if (ioe.rs_num < 0 || ioe.rs_num >=
2813 PF_RULESET_MAX) {
2814 error = EINVAL;
2815 goto fail;
2816 }
2817 rs = pf_find_ruleset(ioe.anchor);
2818 if (rs == NULL ||
2819 !rs->rules[ioe.rs_num].inactive.open ||
2820 rs->rules[ioe.rs_num].inactive.ticket !=
2821 ioe.ticket) {
2822 error = EBUSY;
2823 goto fail;
2824 }
2825 break;
2826 }
2827 }
2828 /* now do the commit - no errors should happen here */
2829 for (i = 0; i < io->size; i++) {
2830 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2831 error = EFAULT;
2832 goto fail;
2833 }
2834 switch (ioe.rs_num) {
2835 #ifdef ALTQ
2836 case PF_RULESET_ALTQ:
2837 if ((error = pf_commit_altq(ioe.ticket)))
2838 goto fail; /* really bad */
2839 break;
2840 #endif /* ALTQ */
2841 case PF_RULESET_TABLE:
2842 bzero(&table, sizeof(table));
2843 strlcpy(table.pfrt_anchor, ioe.anchor,
2844 sizeof(table.pfrt_anchor));
2845 if ((error = pfr_ina_commit(&table, ioe.ticket,
2846 NULL, NULL, 0)))
2847 goto fail; /* really bad */
2848 break;
2849 default:
2850 if ((error = pf_commit_rules(ioe.ticket,
2851 ioe.rs_num, ioe.anchor)))
2852 goto fail; /* really bad */
2853 break;
2854 }
2855 }
2856 break;
2857 }
2858
2859 case DIOCGETSRCNODES: {
2860 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr;
2861 struct pf_src_node *n;
2862 struct pf_src_node *p, pstore;
2863 u_int32_t nr = 0;
2864 int space = psn->psn_len;
2865
2866 if (space == 0) {
2867 RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
2868 nr++;
2869 psn->psn_len = sizeof(struct pf_src_node) * nr;
2870 break;
2871 }
2872
2873 p = psn->psn_src_nodes;
2874 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2875 int secs = time_second, diff;
2876
2877 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
2878 break;
2879
2880 bcopy(n, &pstore, sizeof(pstore));
2881 if (n->rule.ptr != NULL)
2882 pstore.rule.nr = n->rule.ptr->nr;
2883 pstore.creation = secs - pstore.creation;
2884 if (pstore.expire > secs)
2885 pstore.expire -= secs;
2886 else
2887 pstore.expire = 0;
2888
2889 /* adjust the connection rate estimate */
2890 diff = secs - n->conn_rate.last;
2891 if (diff >= n->conn_rate.seconds)
2892 pstore.conn_rate.count = 0;
2893 else
2894 pstore.conn_rate.count -=
2895 n->conn_rate.count * diff /
2896 n->conn_rate.seconds;
2897
2898 error = copyout(&pstore, p, sizeof(*p));
2899 if (error)
2900 goto fail;
2901 p++;
2902 nr++;
2903 }
2904 psn->psn_len = sizeof(struct pf_src_node) * nr;
2905 break;
2906 }
2907
2908 case DIOCCLRSRCNODES: {
2909 struct pf_src_node *n;
2910 struct pf_state *state;
2911
2912 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
2913 state->src_node = NULL;
2914 state->nat_src_node = NULL;
2915 }
2916 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2917 n->expire = 1;
2918 n->states = 0;
2919 }
2920 pf_purge_expired_src_nodes();
2921 pf_status.src_nodes = 0;
2922 break;
2923 }
2924
2925 case DIOCSETHOSTID: {
2926 u_int32_t *hostid = (u_int32_t *)addr;
2927
2928 if (*hostid == 0)
2929 pf_status.hostid = arc4random();
2930 else
2931 pf_status.hostid = *hostid;
2932 break;
2933 }
2934
2935 case DIOCOSFPFLUSH:
2936 pf_osfp_flush();
2937 break;
2938
2939 case DIOCIGETIFACES: {
2940 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2941
2942 if (io->pfiio_esize != sizeof(struct pfi_if)) {
2943 error = ENODEV;
2944 break;
2945 }
2946 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer,
2947 &io->pfiio_size, io->pfiio_flags);
2948 break;
2949 }
2950
2951 case DIOCICLRISTATS: {
2952 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2953
2954 error = pfi_clr_istats(io->pfiio_name, &io->pfiio_nzero,
2955 io->pfiio_flags);
2956 break;
2957 }
2958
2959 case DIOCSETIFFLAG: {
2960 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2961
2962 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
2963 break;
2964 }
2965
2966 case DIOCCLRIFFLAG: {
2967 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2968
2969 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
2970 break;
2971 }
2972
2973 default:
2974 error = ENODEV;
2975 break;
2976 }
2977 fail:
2978 splx(s);
2979 return (error);
2980 }
2981
2982 #ifdef __NetBSD__
2983 #ifdef INET
2984 int
2985 pfil4_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
2986 {
2987 int error;
2988
2989 /*
2990 * ensure that mbufs are writable beforehand
2991 * as it's assumed by pf code.
2992 * ip hdr (60 bytes) + tcp hdr (60 bytes) should be enough.
2993 * XXX inefficient
2994 */
2995 error = m_makewritable(mp, 0, 60 + 60, M_DONTWAIT);
2996 if (error) {
2997 m_freem(*mp);
2998 *mp = NULL;
2999 return error;
3000 }
3001
3002 /*
3003 * If the packet is out-bound, we can't delay checksums
3004 * here. For in-bound, the checksum has already been
3005 * validated.
3006 */
3007 if (dir == PFIL_OUT) {
3008 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
3009 in_delayed_cksum(*mp);
3010 (*mp)->m_pkthdr.csum_flags &=
3011 ~(M_CSUM_TCPv4|M_CSUM_UDPv4);
3012 }
3013 }
3014
3015 if (pf_test(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp, NULL)
3016 != PF_PASS) {
3017 m_freem(*mp);
3018 *mp = NULL;
3019 return EHOSTUNREACH;
3020 }
3021
3022 /*
3023 * we're not compatible with fast-forward.
3024 */
3025
3026 if (dir == PFIL_IN && *mp) {
3027 (*mp)->m_flags &= ~M_CANFASTFWD;
3028 }
3029
3030 return (0);
3031 }
3032 #endif /* INET */
3033
3034 #ifdef INET6
3035 int
3036 pfil6_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
3037 {
3038 int error;
3039
3040 /*
3041 * ensure that mbufs are writable beforehand
3042 * as it's assumed by pf code.
3043 * XXX inefficient
3044 */
3045 error = m_makewritable(mp, 0, M_COPYALL, M_DONTWAIT);
3046 if (error) {
3047 m_freem(*mp);
3048 *mp = NULL;
3049 return error;
3050 }
3051
3052 /*
3053 * If the packet is out-bound, we can't delay checksums
3054 * here. For in-bound, the checksum has already been
3055 * validated.
3056 */
3057 if (dir == PFIL_OUT) {
3058 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv6|M_CSUM_UDPv6)) {
3059 in6_delayed_cksum(*mp);
3060 (*mp)->m_pkthdr.csum_flags &=
3061 ~(M_CSUM_TCPv6|M_CSUM_UDPv6);
3062 }
3063 }
3064
3065 if (pf_test6(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp, NULL)
3066 != PF_PASS) {
3067 m_freem(*mp);
3068 *mp = NULL;
3069 return EHOSTUNREACH;
3070 } else
3071 return (0);
3072 }
3073 #endif
3074
3075 int
3076 pfil_ifnet_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp,
3077 int dir)
3078 {
3079 u_long cmd = (u_long)mp;
3080
3081 switch (cmd) {
3082 case PFIL_IFNET_ATTACH:
3083 pfi_attach_ifnet(ifp);
3084 break;
3085 case PFIL_IFNET_DETACH:
3086 pfi_detach_ifnet(ifp);
3087 break;
3088 }
3089
3090 return (0);
3091 }
3092
3093 int
3094 pfil_ifaddr_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp,
3095 int dir)
3096 {
3097 extern void pfi_kifaddr_update_if(struct ifnet *);
3098
3099 u_long cmd = (u_long)mp;
3100
3101 switch (cmd) {
3102 case SIOCSIFADDR:
3103 case SIOCAIFADDR:
3104 case SIOCDIFADDR:
3105 #ifdef INET6
3106 case SIOCAIFADDR_IN6:
3107 case SIOCDIFADDR_IN6:
3108 #endif
3109 pfi_kifaddr_update_if(ifp);
3110 break;
3111 default:
3112 panic("unexpected ioctl");
3113 }
3114
3115 return (0);
3116 }
3117
3118 static int
3119 pf_pfil_attach(void)
3120 {
3121 struct pfil_head *ph_inet;
3122 #ifdef INET6
3123 struct pfil_head *ph_inet6;
3124 #endif
3125 int error;
3126 int i;
3127
3128 if (pf_pfil_attached)
3129 return (0);
3130
3131 error = pfil_add_hook(pfil_ifnet_wrapper, NULL, PFIL_IFNET, &if_pfil);
3132 if (error)
3133 goto bad1;
3134 error = pfil_add_hook(pfil_ifaddr_wrapper, NULL, PFIL_IFADDR, &if_pfil);
3135 if (error)
3136 goto bad2;
3137
3138 ph_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3139 if (ph_inet)
3140 error = pfil_add_hook((void *)pfil4_wrapper, NULL,
3141 PFIL_IN|PFIL_OUT, ph_inet);
3142 else
3143 error = ENOENT;
3144 if (error)
3145 goto bad3;
3146
3147 #ifdef INET6
3148 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3149 if (ph_inet6)
3150 error = pfil_add_hook((void *)pfil6_wrapper, NULL,
3151 PFIL_IN|PFIL_OUT, ph_inet6);
3152 else
3153 error = ENOENT;
3154 if (error)
3155 goto bad4;
3156 #endif
3157
3158 for (i = 0; i < if_indexlim; i++)
3159 if (ifindex2ifnet[i])
3160 pfi_attach_ifnet(ifindex2ifnet[i]);
3161 pf_pfil_attached = 1;
3162
3163 return (0);
3164
3165 #ifdef INET6
3166 bad4:
3167 pfil_remove_hook(pfil4_wrapper, NULL, PFIL_IN|PFIL_OUT, ph_inet);
3168 #endif
3169 bad3:
3170 pfil_remove_hook(pfil_ifaddr_wrapper, NULL, PFIL_IFADDR, &if_pfil);
3171 bad2:
3172 pfil_remove_hook(pfil_ifnet_wrapper, NULL, PFIL_IFNET, &if_pfil);
3173 bad1:
3174 return (error);
3175 }
3176
3177 static int
3178 pf_pfil_detach(void)
3179 {
3180 struct pfil_head *ph_inet;
3181 #ifdef INET6
3182 struct pfil_head *ph_inet6;
3183 #endif
3184 int i;
3185
3186 if (pf_pfil_attached == 0)
3187 return (0);
3188
3189 for (i = 0; i < if_indexlim; i++)
3190 if (pfi_index2kif[i])
3191 pfi_detach_ifnet(ifindex2ifnet[i]);
3192
3193 pfil_remove_hook(pfil_ifaddr_wrapper, NULL, PFIL_IFADDR, &if_pfil);
3194 pfil_remove_hook(pfil_ifnet_wrapper, NULL, PFIL_IFNET, &if_pfil);
3195
3196 ph_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3197 if (ph_inet)
3198 pfil_remove_hook((void *)pfil4_wrapper, NULL,
3199 PFIL_IN|PFIL_OUT, ph_inet);
3200 #ifdef INET6
3201 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3202 if (ph_inet6)
3203 pfil_remove_hook((void *)pfil6_wrapper, NULL,
3204 PFIL_IN|PFIL_OUT, ph_inet6);
3205 #endif
3206 pf_pfil_attached = 0;
3207
3208 return (0);
3209 }
3210 #endif
3211