pf_ioctl.c revision 1.12 1 /* $NetBSD: pf_ioctl.c,v 1.12 2004/11/14 11:12:16 yamt Exp $ */
2 /* $OpenBSD: pf_ioctl.c,v 1.130 2004/09/09 22:08:42 dhartmei Exp $ */
3
4 /*
5 * Copyright (c) 2001 Daniel Hartmeier
6 * Copyright (c) 2002,2003 Henning Brauer
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 *
13 * - Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * - Redistributions in binary form must reproduce the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer in the documentation and/or other materials provided
18 * with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Effort sponsored in part by the Defense Advanced Research Projects
34 * Agency (DARPA) and Air Force Research Laboratory, Air Force
35 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36 *
37 */
38
39 #ifdef _KERNEL_OPT
40 #include "opt_inet.h"
41 #include "opt_altq.h"
42 #include "opt_pfil_hooks.h"
43 #endif
44
45 #ifdef __OpenBSD__
46 #include "pfsync.h"
47 #else
48 #define NPFSYNC 0
49 #endif
50
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/mbuf.h>
54 #include <sys/filio.h>
55 #include <sys/fcntl.h>
56 #include <sys/socket.h>
57 #include <sys/socketvar.h>
58 #include <sys/kernel.h>
59 #include <sys/time.h>
60 #ifdef __OpenBSD__
61 #include <sys/timeout.h>
62 #else
63 #include <sys/callout.h>
64 #endif
65 #include <sys/pool.h>
66 #include <sys/malloc.h>
67 #ifdef __NetBSD__
68 #include <sys/conf.h>
69 #endif
70
71 #include <net/if.h>
72 #include <net/if_types.h>
73 #include <net/route.h>
74
75 #include <netinet/in.h>
76 #include <netinet/in_var.h>
77 #include <netinet/in_systm.h>
78 #include <netinet/ip.h>
79 #include <netinet/ip_var.h>
80 #include <netinet/ip_icmp.h>
81
82 #ifdef __OpenBSD__
83 #include <dev/rndvar.h>
84 #endif
85 #include <net/pfvar.h>
86
87 #if NPFSYNC > 0
88 #include <net/if_pfsync.h>
89 #endif /* NPFSYNC > 0 */
90
91 #ifdef INET6
92 #include <netinet/ip6.h>
93 #include <netinet/in_pcb.h>
94 #endif /* INET6 */
95
96 #ifdef ALTQ
97 #include <altq/altq.h>
98 #endif
99
100 void pfattach(int);
101 #ifdef _LKM
102 void pfdetach(void);
103 #endif
104 int pfopen(dev_t, int, int, struct proc *);
105 int pfclose(dev_t, int, int, struct proc *);
106 struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
107 u_int8_t, u_int8_t, u_int8_t);
108 int pf_get_ruleset_number(u_int8_t);
109 void pf_init_ruleset(struct pf_ruleset *);
110 int pf_anchor_setup(struct pf_rule *,
111 const struct pf_ruleset *, const char *);
112 int pf_anchor_copyout(const struct pf_ruleset *,
113 const struct pf_rule *, struct pfioc_rule *);
114 void pf_anchor_remove(struct pf_rule *);
115
116 void pf_mv_pool(struct pf_palist *, struct pf_palist *);
117 void pf_empty_pool(struct pf_palist *);
118 int pfioctl(dev_t, u_long, caddr_t, int, struct proc *);
119 #ifdef ALTQ
120 int pf_begin_altq(u_int32_t *);
121 int pf_rollback_altq(u_int32_t);
122 int pf_commit_altq(u_int32_t);
123 int pf_enable_altq(struct pf_altq *);
124 int pf_disable_altq(struct pf_altq *);
125 #endif /* ALTQ */
126 int pf_begin_rules(u_int32_t *, int, const char *);
127 int pf_rollback_rules(u_int32_t, int, char *);
128 int pf_commit_rules(u_int32_t, int, char *);
129
130 #ifdef __NetBSD__
131 const struct cdevsw pf_cdevsw = {
132 pfopen, pfclose, noread, nowrite, pfioctl,
133 nostop, notty, nopoll, nommap, nokqfilter,
134 };
135
136 static int pf_pfil_attach(void);
137 static int pf_pfil_detach(void);
138
139 static int pf_pfil_attached = 0;
140 #endif
141
142 #ifdef __OpenBSD__
143 extern struct timeout pf_expire_to;
144 #else
145 extern struct callout pf_expire_to;
146 #endif
147
148 struct pf_rule pf_default_rule;
149 #ifdef ALTQ
150 static int pf_altq_running;
151 #endif
152
153 #define TAGID_MAX 50000
154 TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
155 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
156
157 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
158 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
159 #endif
160 static u_int16_t tagname2tag(struct pf_tags *, char *);
161 static void tag2tagname(struct pf_tags *, u_int16_t, char *);
162 static void tag_unref(struct pf_tags *, u_int16_t);
163
164 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
165
166 #ifdef __NetBSD__
167 extern struct pfil_head if_pfil;
168 #endif
169
170 void
171 pfattach(int num)
172 {
173 u_int32_t *timeout = pf_default_rule.timeout;
174
175 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl",
176 &pool_allocator_nointr);
177 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
178 "pfsrctrpl", NULL);
179 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl",
180 NULL);
181 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl",
182 &pool_allocator_nointr);
183 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0,
184 "pfpooladdrpl", &pool_allocator_nointr);
185 pfr_initialize();
186 pfi_initialize();
187 pf_osfp_initialize();
188
189 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
190 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
191
192 RB_INIT(&tree_src_tracking);
193 RB_INIT(&pf_anchors);
194 pf_init_ruleset(&pf_main_ruleset);
195 TAILQ_INIT(&pf_altqs[0]);
196 TAILQ_INIT(&pf_altqs[1]);
197 TAILQ_INIT(&pf_pabuf);
198 pf_altqs_active = &pf_altqs[0];
199 pf_altqs_inactive = &pf_altqs[1];
200 TAILQ_INIT(&state_updates);
201
202 /* default rule should never be garbage collected */
203 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
204 pf_default_rule.action = PF_PASS;
205 pf_default_rule.nr = -1;
206
207 /* initialize default timeouts */
208 timeout[PFTM_TCP_FIRST_PACKET] = 120; /* First TCP packet */
209 timeout[PFTM_TCP_OPENING] = 30; /* No response yet */
210 timeout[PFTM_TCP_ESTABLISHED] = 24*60*60; /* Established */
211 timeout[PFTM_TCP_CLOSING] = 15 * 60; /* Half closed */
212 timeout[PFTM_TCP_FIN_WAIT] = 45; /* Got both FINs */
213 timeout[PFTM_TCP_CLOSED] = 90; /* Got a RST */
214 timeout[PFTM_UDP_FIRST_PACKET] = 60; /* First UDP packet */
215 timeout[PFTM_UDP_SINGLE] = 30; /* Unidirectional */
216 timeout[PFTM_UDP_MULTIPLE] = 60; /* Bidirectional */
217 timeout[PFTM_ICMP_FIRST_PACKET] = 20; /* First ICMP packet */
218 timeout[PFTM_ICMP_ERROR_REPLY] = 10; /* Got error response */
219 timeout[PFTM_OTHER_FIRST_PACKET] = 60; /* First packet */
220 timeout[PFTM_OTHER_SINGLE] = 30; /* Unidirectional */
221 timeout[PFTM_OTHER_MULTIPLE] = 60; /* Bidirectional */
222 timeout[PFTM_FRAG] = 30; /* Fragment expire */
223 timeout[PFTM_INTERVAL] = 10; /* Expire interval */
224 timeout[PFTM_SRC_NODE] = 0; /* Source tracking */
225 timeout[PFTM_TS_DIFF] = 30; /* Allowed TS diff */
226
227 #ifdef __OpenBSD__
228 timeout_set(&pf_expire_to, pf_purge_timeout, &pf_expire_to);
229 timeout_add(&pf_expire_to, timeout[PFTM_INTERVAL] * hz);
230 #else
231 callout_init(&pf_expire_to);
232 callout_reset(&pf_expire_to, timeout[PFTM_INTERVAL] * hz,
233 pf_purge_timeout, &pf_expire_to);
234 #endif
235
236 pf_normalize_init();
237 bzero(&pf_status, sizeof(pf_status));
238 pf_status.debug = PF_DEBUG_URGENT;
239
240 /* XXX do our best to avoid a conflict */
241 pf_status.hostid = arc4random();
242 }
243
244 #ifdef _LKM
245 #define TAILQ_DRAIN(list, element) \
246 do { \
247 while ((element = TAILQ_FIRST(list)) != NULL) { \
248 TAILQ_REMOVE(list, element, entries); \
249 free(element, M_TEMP); \
250 } \
251 } while (0)
252
253 void
254 pfdetach(void)
255 {
256 struct pf_pooladdr *pooladdr_e;
257 struct pf_altq *altq_e;
258 struct pf_anchor *anchor_e;
259
260 (void)pf_pfil_detach();
261
262 callout_stop(&pf_expire_to);
263 pf_normalize_destroy();
264 pf_osfp_destroy();
265 pfi_destroy();
266
267 TAILQ_DRAIN(&pf_pabuf, pooladdr_e);
268 TAILQ_DRAIN(&pf_altqs[1], altq_e);
269 TAILQ_DRAIN(&pf_altqs[0], altq_e);
270 while ((anchor_e = RB_ROOT(&pf_anchors)) != NULL) {
271 RB_REMOVE(pf_anchor_global, &pf_anchors, anchor_e);
272 free(anchor_e, M_TEMP);
273 }
274 /* pf_remove_if_empty_ruleset(&pf_main_ruleset); */
275 pfr_destroy();
276 pool_destroy(&pf_pooladdr_pl);
277 pool_destroy(&pf_altq_pl);
278 pool_destroy(&pf_state_pl);
279 pool_destroy(&pf_rule_pl);
280 pool_destroy(&pf_src_tree_pl);
281 }
282 #endif
283
284 int
285 pfopen(dev_t dev, int flags, int fmt, struct proc *p)
286 {
287 if (minor(dev) >= 1)
288 return (ENXIO);
289 return (0);
290 }
291
292 int
293 pfclose(dev_t dev, int flags, int fmt, struct proc *p)
294 {
295 if (minor(dev) >= 1)
296 return (ENXIO);
297 return (0);
298 }
299
300 struct pf_pool *
301 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
302 u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
303 u_int8_t check_ticket)
304 {
305 struct pf_ruleset *ruleset;
306 struct pf_rule *rule;
307 int rs_num;
308
309 ruleset = pf_find_ruleset(anchor);
310 if (ruleset == NULL)
311 return (NULL);
312 rs_num = pf_get_ruleset_number(rule_action);
313 if (rs_num >= PF_RULESET_MAX)
314 return (NULL);
315 if (active) {
316 if (check_ticket && ticket !=
317 ruleset->rules[rs_num].active.ticket)
318 return (NULL);
319 if (r_last)
320 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
321 pf_rulequeue);
322 else
323 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
324 } else {
325 if (check_ticket && ticket !=
326 ruleset->rules[rs_num].inactive.ticket)
327 return (NULL);
328 if (r_last)
329 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
330 pf_rulequeue);
331 else
332 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
333 }
334 if (!r_last) {
335 while ((rule != NULL) && (rule->nr != rule_number))
336 rule = TAILQ_NEXT(rule, entries);
337 }
338 if (rule == NULL)
339 return (NULL);
340
341 return (&rule->rpool);
342 }
343
344 int
345 pf_get_ruleset_number(u_int8_t action)
346 {
347 switch (action) {
348 case PF_SCRUB:
349 return (PF_RULESET_SCRUB);
350 break;
351 case PF_PASS:
352 case PF_DROP:
353 return (PF_RULESET_FILTER);
354 break;
355 case PF_NAT:
356 case PF_NONAT:
357 return (PF_RULESET_NAT);
358 break;
359 case PF_BINAT:
360 case PF_NOBINAT:
361 return (PF_RULESET_BINAT);
362 break;
363 case PF_RDR:
364 case PF_NORDR:
365 return (PF_RULESET_RDR);
366 break;
367 default:
368 return (PF_RULESET_MAX);
369 break;
370 }
371 }
372
373 void
374 pf_init_ruleset(struct pf_ruleset *ruleset)
375 {
376 int i;
377
378 memset(ruleset, 0, sizeof(struct pf_ruleset));
379 for (i = 0; i < PF_RULESET_MAX; i++) {
380 TAILQ_INIT(&ruleset->rules[i].queues[0]);
381 TAILQ_INIT(&ruleset->rules[i].queues[1]);
382 ruleset->rules[i].active.ptr = &ruleset->rules[i].queues[0];
383 ruleset->rules[i].inactive.ptr = &ruleset->rules[i].queues[1];
384 }
385 }
386
387 struct pf_anchor *
388 pf_find_anchor(const char *path)
389 {
390 static struct pf_anchor key;
391
392 memset(&key, 0, sizeof(key));
393 strlcpy(key.path, path, sizeof(key.path));
394 return (RB_FIND(pf_anchor_global, &pf_anchors, &key));
395 }
396
397 struct pf_ruleset *
398 pf_find_ruleset(const char *path)
399 {
400 struct pf_anchor *anchor;
401
402 while (*path == '/')
403 path++;
404 if (!*path)
405 return (&pf_main_ruleset);
406 anchor = pf_find_anchor(path);
407 if (anchor == NULL)
408 return (NULL);
409 else
410 return (&anchor->ruleset);
411 }
412
413 struct pf_ruleset *
414 pf_find_or_create_ruleset(const char *path)
415 {
416 static char p[MAXPATHLEN];
417 char *q, *r;
418 struct pf_ruleset *ruleset;
419 struct pf_anchor *anchor = NULL /* XXX gcc */,
420 *dup, *parent = NULL;
421
422 while (*path == '/')
423 path++;
424 ruleset = pf_find_ruleset(path);
425 if (ruleset != NULL)
426 return (ruleset);
427 strlcpy(p, path, sizeof(p));
428 while (parent == NULL && (q = strrchr(p, '/')) != NULL) {
429 *q = 0;
430 if ((ruleset = pf_find_ruleset(p)) != NULL) {
431 parent = ruleset->anchor;
432 break;
433 }
434 }
435 if (q == NULL)
436 q = p;
437 else
438 q++;
439 strlcpy(p, path, sizeof(p));
440 if (!*q)
441 return (NULL);
442 while ((r = strchr(q, '/')) != NULL || *q) {
443 if (r != NULL)
444 *r = 0;
445 if (!*q || strlen(q) >= PF_ANCHOR_NAME_SIZE ||
446 (parent != NULL && strlen(parent->path) >=
447 MAXPATHLEN - PF_ANCHOR_NAME_SIZE - 1))
448 return (NULL);
449 anchor = (struct pf_anchor *)malloc(sizeof(*anchor), M_TEMP,
450 M_NOWAIT);
451 if (anchor == NULL)
452 return (NULL);
453 memset(anchor, 0, sizeof(*anchor));
454 RB_INIT(&anchor->children);
455 strlcpy(anchor->name, q, sizeof(anchor->name));
456 if (parent != NULL) {
457 strlcpy(anchor->path, parent->path,
458 sizeof(anchor->path));
459 strlcat(anchor->path, "/", sizeof(anchor->path));
460 }
461 strlcat(anchor->path, anchor->name, sizeof(anchor->path));
462 if ((dup = RB_INSERT(pf_anchor_global, &pf_anchors, anchor)) !=
463 NULL) {
464 printf("pf_find_or_create_ruleset: RB_INSERT1 "
465 "'%s' '%s' collides with '%s' '%s'\n",
466 anchor->path, anchor->name, dup->path, dup->name);
467 free(anchor, M_TEMP);
468 return (NULL);
469 }
470 if (parent != NULL) {
471 anchor->parent = parent;
472 if ((dup = RB_INSERT(pf_anchor_node, &parent->children,
473 anchor)) != NULL) {
474 printf("pf_find_or_create_ruleset: "
475 "RB_INSERT2 '%s' '%s' collides with "
476 "'%s' '%s'\n", anchor->path, anchor->name,
477 dup->path, dup->name);
478 RB_REMOVE(pf_anchor_global, &pf_anchors,
479 anchor);
480 free(anchor, M_TEMP);
481 return (NULL);
482 }
483 }
484 pf_init_ruleset(&anchor->ruleset);
485 anchor->ruleset.anchor = anchor;
486 parent = anchor;
487 if (r != NULL)
488 q = r + 1;
489 else
490 *q = 0;
491 }
492 return (&anchor->ruleset);
493 }
494
495 void
496 pf_remove_if_empty_ruleset(struct pf_ruleset *ruleset)
497 {
498 struct pf_anchor *parent;
499 int i;
500
501 while (ruleset != NULL) {
502 if (ruleset == &pf_main_ruleset || ruleset->anchor == NULL ||
503 !RB_EMPTY(&ruleset->anchor->children) ||
504 ruleset->anchor->refcnt > 0 || ruleset->tables > 0 ||
505 ruleset->topen)
506 return;
507 for (i = 0; i < PF_RULESET_MAX; ++i)
508 if (!TAILQ_EMPTY(ruleset->rules[i].active.ptr) ||
509 !TAILQ_EMPTY(ruleset->rules[i].inactive.ptr) ||
510 ruleset->rules[i].inactive.open)
511 return;
512 RB_REMOVE(pf_anchor_global, &pf_anchors, ruleset->anchor);
513 if ((parent = ruleset->anchor->parent) != NULL)
514 RB_REMOVE(pf_anchor_node, &parent->children,
515 ruleset->anchor);
516 free(ruleset->anchor, M_TEMP);
517 if (parent == NULL)
518 return;
519 ruleset = &parent->ruleset;
520 }
521 }
522
523 int
524 pf_anchor_setup(struct pf_rule *r, const struct pf_ruleset *s,
525 const char *name)
526 {
527 static char *p, path[MAXPATHLEN];
528 struct pf_ruleset *ruleset;
529
530 r->anchor = NULL;
531 r->anchor_relative = 0;
532 r->anchor_wildcard = 0;
533 if (!name[0])
534 return (0);
535 if (name[0] == '/')
536 strlcpy(path, name + 1, sizeof(path));
537 else {
538 /* relative path */
539 r->anchor_relative = 1;
540 if (s->anchor == NULL || !s->anchor->path[0])
541 path[0] = 0;
542 else
543 strlcpy(path, s->anchor->path, sizeof(path));
544 while (name[0] == '.' && name[1] == '.' && name[2] == '/') {
545 if (!path[0]) {
546 printf("pf_anchor_setup: .. beyond root\n");
547 return (1);
548 }
549 if ((p = strrchr(path, '/')) != NULL)
550 *p = 0;
551 else
552 path[0] = 0;
553 r->anchor_relative++;
554 name += 3;
555 }
556 if (path[0])
557 strlcat(path, "/", sizeof(path));
558 strlcat(path, name, sizeof(path));
559 }
560 if ((p = strrchr(path, '/')) != NULL && !strcmp(p, "/*")) {
561 r->anchor_wildcard = 1;
562 *p = 0;
563 }
564 ruleset = pf_find_or_create_ruleset(path);
565 if (ruleset == NULL || ruleset->anchor == NULL) {
566 printf("pf_anchor_setup: ruleset\n");
567 return (1);
568 }
569 r->anchor = ruleset->anchor;
570 r->anchor->refcnt++;
571 return (0);
572 }
573
574 int
575 pf_anchor_copyout(const struct pf_ruleset *rs, const struct pf_rule *r,
576 struct pfioc_rule *pr)
577 {
578 pr->anchor_call[0] = 0;
579 if (r->anchor == NULL)
580 return (0);
581 if (!r->anchor_relative) {
582 strlcpy(pr->anchor_call, "/", sizeof(pr->anchor_call));
583 strlcat(pr->anchor_call, r->anchor->path,
584 sizeof(pr->anchor_call));
585 } else {
586 char a[MAXPATHLEN], b[MAXPATHLEN], *p;
587 int i;
588
589 if (rs->anchor == NULL)
590 a[0] = 0;
591 else
592 strlcpy(a, rs->anchor->path, sizeof(a));
593 strlcpy(b, r->anchor->path, sizeof(b));
594 for (i = 1; i < r->anchor_relative; ++i) {
595 if ((p = strrchr(a, '/')) == NULL)
596 p = a;
597 *p = 0;
598 strlcat(pr->anchor_call, "../",
599 sizeof(pr->anchor_call));
600 }
601 if (strncmp(a, b, strlen(a))) {
602 printf("pf_anchor_copyout: '%s' '%s'\n", a, b);
603 return (1);
604 }
605 if (strlen(b) > strlen(a))
606 strlcat(pr->anchor_call, b + (a[0] ? strlen(a) + 1 : 0),
607 sizeof(pr->anchor_call));
608 }
609 if (r->anchor_wildcard)
610 strlcat(pr->anchor_call, pr->anchor_call[0] ? "/*" : "*",
611 sizeof(pr->anchor_call));
612 return (0);
613 }
614
615 void
616 pf_anchor_remove(struct pf_rule *r)
617 {
618 if (r->anchor == NULL)
619 return;
620 if (r->anchor->refcnt <= 0) {
621 printf("pf_anchor_remove: broken refcount");
622 r->anchor = NULL;
623 return;
624 }
625 if (!--r->anchor->refcnt)
626 pf_remove_if_empty_ruleset(&r->anchor->ruleset);
627 r->anchor = NULL;
628 }
629
630 void
631 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
632 {
633 struct pf_pooladdr *mv_pool_pa;
634
635 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
636 TAILQ_REMOVE(poola, mv_pool_pa, entries);
637 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
638 }
639 }
640
641 void
642 pf_empty_pool(struct pf_palist *poola)
643 {
644 struct pf_pooladdr *empty_pool_pa;
645
646 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
647 pfi_dynaddr_remove(&empty_pool_pa->addr);
648 pf_tbladdr_remove(&empty_pool_pa->addr);
649 pfi_detach_rule(empty_pool_pa->kif);
650 TAILQ_REMOVE(poola, empty_pool_pa, entries);
651 pool_put(&pf_pooladdr_pl, empty_pool_pa);
652 }
653 }
654
655 void
656 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
657 {
658 if (rulequeue != NULL) {
659 if (rule->states <= 0) {
660 /*
661 * XXX - we need to remove the table *before* detaching
662 * the rule to make sure the table code does not delete
663 * the anchor under our feet.
664 */
665 pf_tbladdr_remove(&rule->src.addr);
666 pf_tbladdr_remove(&rule->dst.addr);
667 }
668 TAILQ_REMOVE(rulequeue, rule, entries);
669 rule->entries.tqe_prev = NULL;
670 rule->nr = -1;
671 }
672
673 if (rule->states > 0 || rule->src_nodes > 0 ||
674 rule->entries.tqe_prev != NULL)
675 return;
676 pf_tag_unref(rule->tag);
677 pf_tag_unref(rule->match_tag);
678 #ifdef ALTQ
679 if (rule->pqid != rule->qid)
680 pf_qid_unref(rule->pqid);
681 pf_qid_unref(rule->qid);
682 #endif
683 pfi_dynaddr_remove(&rule->src.addr);
684 pfi_dynaddr_remove(&rule->dst.addr);
685 if (rulequeue == NULL) {
686 pf_tbladdr_remove(&rule->src.addr);
687 pf_tbladdr_remove(&rule->dst.addr);
688 }
689 pfi_detach_rule(rule->kif);
690 pf_anchor_remove(rule);
691 pf_empty_pool(&rule->rpool.list);
692 pool_put(&pf_rule_pl, rule);
693 }
694
695 static u_int16_t
696 tagname2tag(struct pf_tags *head, char *tagname)
697 {
698 struct pf_tagname *tag, *p = NULL;
699 u_int16_t new_tagid = 1;
700
701 TAILQ_FOREACH(tag, head, entries)
702 if (strcmp(tagname, tag->name) == 0) {
703 tag->ref++;
704 return (tag->tag);
705 }
706
707 /*
708 * to avoid fragmentation, we do a linear search from the beginning
709 * and take the first free slot we find. if there is none or the list
710 * is empty, append a new entry at the end.
711 */
712
713 /* new entry */
714 if (!TAILQ_EMPTY(head))
715 for (p = TAILQ_FIRST(head); p != NULL &&
716 p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
717 new_tagid = p->tag + 1;
718
719 if (new_tagid > TAGID_MAX)
720 return (0);
721
722 /* allocate and fill new struct pf_tagname */
723 tag = (struct pf_tagname *)malloc(sizeof(struct pf_tagname),
724 M_TEMP, M_NOWAIT);
725 if (tag == NULL)
726 return (0);
727 bzero(tag, sizeof(struct pf_tagname));
728 strlcpy(tag->name, tagname, sizeof(tag->name));
729 tag->tag = new_tagid;
730 tag->ref++;
731
732 if (p != NULL) /* insert new entry before p */
733 TAILQ_INSERT_BEFORE(p, tag, entries);
734 else /* either list empty or no free slot in between */
735 TAILQ_INSERT_TAIL(head, tag, entries);
736
737 return (tag->tag);
738 }
739
740 static void
741 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
742 {
743 struct pf_tagname *tag;
744
745 TAILQ_FOREACH(tag, head, entries)
746 if (tag->tag == tagid) {
747 strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
748 return;
749 }
750 }
751
752 static void
753 tag_unref(struct pf_tags *head, u_int16_t tag)
754 {
755 struct pf_tagname *p, *next;
756
757 if (tag == 0)
758 return;
759
760 for (p = TAILQ_FIRST(head); p != NULL; p = next) {
761 next = TAILQ_NEXT(p, entries);
762 if (tag == p->tag) {
763 if (--p->ref == 0) {
764 TAILQ_REMOVE(head, p, entries);
765 free(p, M_TEMP);
766 }
767 break;
768 }
769 }
770 }
771
772 u_int16_t
773 pf_tagname2tag(char *tagname)
774 {
775 return (tagname2tag(&pf_tags, tagname));
776 }
777
778 void
779 pf_tag2tagname(u_int16_t tagid, char *p)
780 {
781 return (tag2tagname(&pf_tags, tagid, p));
782 }
783
784 void
785 pf_tag_unref(u_int16_t tag)
786 {
787 return (tag_unref(&pf_tags, tag));
788 }
789
790 #ifdef ALTQ
791 u_int32_t
792 pf_qname2qid(char *qname)
793 {
794 return ((u_int32_t)tagname2tag(&pf_qids, qname));
795 }
796
797 void
798 pf_qid2qname(u_int32_t qid, char *p)
799 {
800 return (tag2tagname(&pf_qids, (u_int16_t)qid, p));
801 }
802
803 void
804 pf_qid_unref(u_int32_t qid)
805 {
806 return (tag_unref(&pf_qids, (u_int16_t)qid));
807 }
808
809 int
810 pf_begin_altq(u_int32_t *ticket)
811 {
812 struct pf_altq *altq;
813 int error = 0;
814
815 /* Purge the old altq list */
816 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
817 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
818 if (altq->qname[0] == 0) {
819 /* detach and destroy the discipline */
820 error = altq_remove(altq);
821 } else
822 pf_qid_unref(altq->qid);
823 pool_put(&pf_altq_pl, altq);
824 }
825 if (error)
826 return (error);
827 *ticket = ++ticket_altqs_inactive;
828 altqs_inactive_open = 1;
829 return (0);
830 }
831
832 int
833 pf_rollback_altq(u_int32_t ticket)
834 {
835 struct pf_altq *altq;
836 int error = 0;
837
838 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
839 return (0);
840 /* Purge the old altq list */
841 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
842 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
843 if (altq->qname[0] == 0) {
844 /* detach and destroy the discipline */
845 error = altq_remove(altq);
846 } else
847 pf_qid_unref(altq->qid);
848 pool_put(&pf_altq_pl, altq);
849 }
850 altqs_inactive_open = 0;
851 return (error);
852 }
853
854 int
855 pf_commit_altq(u_int32_t ticket)
856 {
857 struct pf_altqqueue *old_altqs;
858 struct pf_altq *altq;
859 int s, err, error = 0;
860
861 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
862 return (EBUSY);
863
864 /* swap altqs, keep the old. */
865 s = splsoftnet();
866 old_altqs = pf_altqs_active;
867 pf_altqs_active = pf_altqs_inactive;
868 pf_altqs_inactive = old_altqs;
869 ticket_altqs_active = ticket_altqs_inactive;
870
871 /* Attach new disciplines */
872 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
873 if (altq->qname[0] == 0) {
874 /* attach the discipline */
875 error = altq_pfattach(altq);
876 if (error == 0 && pf_altq_running)
877 error = pf_enable_altq(altq);
878 if (error != 0) {
879 splx(s);
880 return (error);
881 }
882 }
883 }
884
885 /* Purge the old altq list */
886 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
887 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
888 if (altq->qname[0] == 0) {
889 /* detach and destroy the discipline */
890 if (pf_altq_running)
891 error = pf_disable_altq(altq);
892 err = altq_pfdetach(altq);
893 if (err != 0 && error == 0)
894 error = err;
895 err = altq_remove(altq);
896 if (err != 0 && error == 0)
897 error = err;
898 } else
899 pf_qid_unref(altq->qid);
900 pool_put(&pf_altq_pl, altq);
901 }
902 splx(s);
903
904 altqs_inactive_open = 0;
905 return (error);
906 }
907
908 int
909 pf_enable_altq(struct pf_altq *altq)
910 {
911 struct ifnet *ifp;
912 struct tb_profile tb;
913 int s, error = 0;
914
915 if ((ifp = ifunit(altq->ifname)) == NULL)
916 return (EINVAL);
917
918 if (ifp->if_snd.altq_type != ALTQT_NONE)
919 error = altq_enable(&ifp->if_snd);
920
921 /* set tokenbucket regulator */
922 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
923 tb.rate = altq->ifbandwidth;
924 tb.depth = altq->tbrsize;
925 s = splimp();
926 error = tbr_set(&ifp->if_snd, &tb);
927 splx(s);
928 }
929
930 return (error);
931 }
932
933 int
934 pf_disable_altq(struct pf_altq *altq)
935 {
936 struct ifnet *ifp;
937 struct tb_profile tb;
938 int s, error;
939
940 if ((ifp = ifunit(altq->ifname)) == NULL)
941 return (EINVAL);
942
943 /*
944 * when the discipline is no longer referenced, it was overridden
945 * by a new one. if so, just return.
946 */
947 if (altq->altq_disc != ifp->if_snd.altq_disc)
948 return (0);
949
950 error = altq_disable(&ifp->if_snd);
951
952 if (error == 0) {
953 /* clear tokenbucket regulator */
954 tb.rate = 0;
955 s = splimp();
956 error = tbr_set(&ifp->if_snd, &tb);
957 splx(s);
958 }
959
960 return (error);
961 }
962 #endif /* ALTQ */
963
964 int
965 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
966 {
967 struct pf_ruleset *rs;
968 struct pf_rule *rule;
969
970 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
971 return (EINVAL);
972 rs = pf_find_or_create_ruleset(anchor);
973 if (rs == NULL)
974 return (EINVAL);
975 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL)
976 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
977 *ticket = ++rs->rules[rs_num].inactive.ticket;
978 rs->rules[rs_num].inactive.open = 1;
979 return (0);
980 }
981
982 int
983 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
984 {
985 struct pf_ruleset *rs;
986 struct pf_rule *rule;
987
988 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
989 return (EINVAL);
990 rs = pf_find_ruleset(anchor);
991 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
992 rs->rules[rs_num].inactive.ticket != ticket)
993 return (0);
994 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL)
995 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
996 rs->rules[rs_num].inactive.open = 0;
997 return (0);
998 }
999
1000 int
1001 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1002 {
1003 struct pf_ruleset *rs;
1004 struct pf_rule *rule;
1005 struct pf_rulequeue *old_rules;
1006 int s;
1007
1008 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1009 return (EINVAL);
1010 rs = pf_find_ruleset(anchor);
1011 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1012 ticket != rs->rules[rs_num].inactive.ticket)
1013 return (EBUSY);
1014
1015 /* Swap rules, keep the old. */
1016 s = splsoftnet();
1017 old_rules = rs->rules[rs_num].active.ptr;
1018 rs->rules[rs_num].active.ptr =
1019 rs->rules[rs_num].inactive.ptr;
1020 rs->rules[rs_num].inactive.ptr = old_rules;
1021 rs->rules[rs_num].active.ticket =
1022 rs->rules[rs_num].inactive.ticket;
1023 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1024
1025 /* Purge the old rule list. */
1026 while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1027 pf_rm_rule(old_rules, rule);
1028 rs->rules[rs_num].inactive.open = 0;
1029 pf_remove_if_empty_ruleset(rs);
1030 splx(s);
1031 return (0);
1032 }
1033
1034 int
1035 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
1036 {
1037 struct pf_pooladdr *pa = NULL;
1038 struct pf_pool *pool = NULL;
1039 int s;
1040 int error = 0;
1041
1042 /* XXX keep in sync with switch() below */
1043 if (securelevel > 1)
1044 switch (cmd) {
1045 case DIOCGETRULES:
1046 case DIOCGETRULE:
1047 case DIOCGETADDRS:
1048 case DIOCGETADDR:
1049 case DIOCGETSTATE:
1050 case DIOCSETSTATUSIF:
1051 case DIOCGETSTATUS:
1052 case DIOCCLRSTATUS:
1053 case DIOCNATLOOK:
1054 case DIOCSETDEBUG:
1055 case DIOCGETSTATES:
1056 case DIOCGETTIMEOUT:
1057 case DIOCCLRRULECTRS:
1058 case DIOCGETLIMIT:
1059 case DIOCGETALTQS:
1060 case DIOCGETALTQ:
1061 case DIOCGETQSTATS:
1062 case DIOCGETRULESETS:
1063 case DIOCGETRULESET:
1064 case DIOCRGETTABLES:
1065 case DIOCRGETTSTATS:
1066 case DIOCRCLRTSTATS:
1067 case DIOCRCLRADDRS:
1068 case DIOCRADDADDRS:
1069 case DIOCRDELADDRS:
1070 case DIOCRSETADDRS:
1071 case DIOCRGETADDRS:
1072 case DIOCRGETASTATS:
1073 case DIOCRCLRASTATS:
1074 case DIOCRTSTADDRS:
1075 case DIOCOSFPGET:
1076 case DIOCGETSRCNODES:
1077 case DIOCCLRSRCNODES:
1078 case DIOCIGETIFACES:
1079 case DIOCICLRISTATS:
1080 break;
1081 case DIOCRCLRTABLES:
1082 case DIOCRADDTABLES:
1083 case DIOCRDELTABLES:
1084 case DIOCRSETTFLAGS:
1085 if (((struct pfioc_table *)addr)->pfrio_flags &
1086 PFR_FLAG_DUMMY)
1087 break; /* dummy operation ok */
1088 return (EPERM);
1089 default:
1090 return (EPERM);
1091 }
1092
1093 if (!(flags & FWRITE))
1094 switch (cmd) {
1095 case DIOCGETRULES:
1096 case DIOCGETRULE:
1097 case DIOCGETADDRS:
1098 case DIOCGETADDR:
1099 case DIOCGETSTATE:
1100 case DIOCGETSTATUS:
1101 case DIOCGETSTATES:
1102 case DIOCGETTIMEOUT:
1103 case DIOCGETLIMIT:
1104 case DIOCGETALTQS:
1105 case DIOCGETALTQ:
1106 case DIOCGETQSTATS:
1107 case DIOCGETRULESETS:
1108 case DIOCGETRULESET:
1109 case DIOCRGETTABLES:
1110 case DIOCRGETTSTATS:
1111 case DIOCRGETADDRS:
1112 case DIOCRGETASTATS:
1113 case DIOCRTSTADDRS:
1114 case DIOCOSFPGET:
1115 case DIOCGETSRCNODES:
1116 case DIOCIGETIFACES:
1117 break;
1118 case DIOCRCLRTABLES:
1119 case DIOCRADDTABLES:
1120 case DIOCRDELTABLES:
1121 case DIOCRCLRTSTATS:
1122 case DIOCRCLRADDRS:
1123 case DIOCRADDADDRS:
1124 case DIOCRDELADDRS:
1125 case DIOCRSETADDRS:
1126 case DIOCRSETTFLAGS:
1127 if (((struct pfioc_table *)addr)->pfrio_flags &
1128 PFR_FLAG_DUMMY)
1129 break; /* dummy operation ok */
1130 return (EACCES);
1131 default:
1132 return (EACCES);
1133 }
1134
1135 switch (cmd) {
1136
1137 case DIOCSTART:
1138 if (pf_status.running)
1139 error = EEXIST;
1140 else {
1141 #ifdef __NetBSD__
1142 error = pf_pfil_attach();
1143 if (error)
1144 break;
1145 #endif
1146 pf_status.running = 1;
1147 pf_status.since = time_second;
1148 if (pf_status.stateid == 0) {
1149 pf_status.stateid = time_second;
1150 pf_status.stateid = pf_status.stateid << 32;
1151 }
1152 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1153 }
1154 break;
1155
1156 case DIOCSTOP:
1157 if (!pf_status.running)
1158 error = ENOENT;
1159 else {
1160 #ifdef __NetBSD__
1161 error = pf_pfil_detach();
1162 if (error)
1163 break;
1164 #endif
1165 pf_status.running = 0;
1166 pf_status.since = time_second;
1167 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1168 }
1169 break;
1170
1171 case DIOCADDRULE: {
1172 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1173 struct pf_ruleset *ruleset;
1174 struct pf_rule *rule, *tail;
1175 struct pf_pooladdr *pa;
1176 int rs_num;
1177
1178 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1179 ruleset = pf_find_ruleset(pr->anchor);
1180 if (ruleset == NULL) {
1181 error = EINVAL;
1182 break;
1183 }
1184 rs_num = pf_get_ruleset_number(pr->rule.action);
1185 if (rs_num >= PF_RULESET_MAX) {
1186 error = EINVAL;
1187 break;
1188 }
1189 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1190 error = EINVAL;
1191 break;
1192 }
1193 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
1194 error = EBUSY;
1195 break;
1196 }
1197 if (pr->pool_ticket != ticket_pabuf) {
1198 error = EBUSY;
1199 break;
1200 }
1201 rule = pool_get(&pf_rule_pl, PR_NOWAIT);
1202 if (rule == NULL) {
1203 error = ENOMEM;
1204 break;
1205 }
1206 bcopy(&pr->rule, rule, sizeof(struct pf_rule));
1207 rule->anchor = NULL;
1208 rule->kif = NULL;
1209 TAILQ_INIT(&rule->rpool.list);
1210 /* initialize refcounting */
1211 rule->states = 0;
1212 rule->src_nodes = 0;
1213 rule->entries.tqe_prev = NULL;
1214 #ifndef INET
1215 if (rule->af == AF_INET) {
1216 pool_put(&pf_rule_pl, rule);
1217 error = EAFNOSUPPORT;
1218 break;
1219 }
1220 #endif /* INET */
1221 #ifndef INET6
1222 if (rule->af == AF_INET6) {
1223 pool_put(&pf_rule_pl, rule);
1224 error = EAFNOSUPPORT;
1225 break;
1226 }
1227 #endif /* INET6 */
1228 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
1229 pf_rulequeue);
1230 if (tail)
1231 rule->nr = tail->nr + 1;
1232 else
1233 rule->nr = 0;
1234 if (rule->ifname[0]) {
1235 rule->kif = pfi_attach_rule(rule->ifname);
1236 if (rule->kif == NULL) {
1237 pool_put(&pf_rule_pl, rule);
1238 error = EINVAL;
1239 break;
1240 }
1241 }
1242
1243 #ifdef ALTQ
1244 /* set queue IDs */
1245 if (rule->qname[0] != 0) {
1246 if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
1247 error = EBUSY;
1248 else if (rule->pqname[0] != 0) {
1249 if ((rule->pqid =
1250 pf_qname2qid(rule->pqname)) == 0)
1251 error = EBUSY;
1252 } else
1253 rule->pqid = rule->qid;
1254 }
1255 #endif
1256 if (rule->tagname[0])
1257 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
1258 error = EBUSY;
1259 if (rule->match_tagname[0])
1260 if ((rule->match_tag =
1261 pf_tagname2tag(rule->match_tagname)) == 0)
1262 error = EBUSY;
1263 if (rule->rt && !rule->direction)
1264 error = EINVAL;
1265 if (pfi_dynaddr_setup(&rule->src.addr, rule->af))
1266 error = EINVAL;
1267 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af))
1268 error = EINVAL;
1269 if (pf_tbladdr_setup(ruleset, &rule->src.addr))
1270 error = EINVAL;
1271 if (pf_tbladdr_setup(ruleset, &rule->dst.addr))
1272 error = EINVAL;
1273 if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1274 error = EINVAL;
1275 TAILQ_FOREACH(pa, &pf_pabuf, entries)
1276 if (pf_tbladdr_setup(ruleset, &pa->addr))
1277 error = EINVAL;
1278
1279 pf_mv_pool(&pf_pabuf, &rule->rpool.list);
1280 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
1281 (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
1282 (rule->rt > PF_FASTROUTE)) &&
1283 (TAILQ_FIRST(&rule->rpool.list) == NULL))
1284 error = EINVAL;
1285
1286 if (error) {
1287 pf_rm_rule(NULL, rule);
1288 break;
1289 }
1290 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
1291 rule->evaluations = rule->packets = rule->bytes = 0;
1292 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
1293 rule, entries);
1294 break;
1295 }
1296
1297 case DIOCGETRULES: {
1298 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1299 struct pf_ruleset *ruleset;
1300 struct pf_rule *tail;
1301 int rs_num;
1302
1303 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1304 ruleset = pf_find_ruleset(pr->anchor);
1305 if (ruleset == NULL) {
1306 error = EINVAL;
1307 break;
1308 }
1309 rs_num = pf_get_ruleset_number(pr->rule.action);
1310 if (rs_num >= PF_RULESET_MAX) {
1311 error = EINVAL;
1312 break;
1313 }
1314 s = splsoftnet();
1315 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
1316 pf_rulequeue);
1317 if (tail)
1318 pr->nr = tail->nr + 1;
1319 else
1320 pr->nr = 0;
1321 pr->ticket = ruleset->rules[rs_num].active.ticket;
1322 splx(s);
1323 break;
1324 }
1325
1326 case DIOCGETRULE: {
1327 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1328 struct pf_ruleset *ruleset;
1329 struct pf_rule *rule;
1330 int rs_num, i;
1331
1332 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1333 ruleset = pf_find_ruleset(pr->anchor);
1334 if (ruleset == NULL) {
1335 error = EINVAL;
1336 break;
1337 }
1338 rs_num = pf_get_ruleset_number(pr->rule.action);
1339 if (rs_num >= PF_RULESET_MAX) {
1340 error = EINVAL;
1341 break;
1342 }
1343 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
1344 error = EBUSY;
1345 break;
1346 }
1347 s = splsoftnet();
1348 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
1349 while ((rule != NULL) && (rule->nr != pr->nr))
1350 rule = TAILQ_NEXT(rule, entries);
1351 if (rule == NULL) {
1352 error = EBUSY;
1353 splx(s);
1354 break;
1355 }
1356 bcopy(rule, &pr->rule, sizeof(struct pf_rule));
1357 if (pf_anchor_copyout(ruleset, rule, pr)) {
1358 error = EBUSY;
1359 splx(s);
1360 break;
1361 }
1362 pfi_dynaddr_copyout(&pr->rule.src.addr);
1363 pfi_dynaddr_copyout(&pr->rule.dst.addr);
1364 pf_tbladdr_copyout(&pr->rule.src.addr);
1365 pf_tbladdr_copyout(&pr->rule.dst.addr);
1366 for (i = 0; i < PF_SKIP_COUNT; ++i)
1367 if (rule->skip[i].ptr == NULL)
1368 pr->rule.skip[i].nr = -1;
1369 else
1370 pr->rule.skip[i].nr =
1371 rule->skip[i].ptr->nr;
1372 splx(s);
1373 break;
1374 }
1375
1376 case DIOCCHANGERULE: {
1377 struct pfioc_rule *pcr = (struct pfioc_rule *)addr;
1378 struct pf_ruleset *ruleset;
1379 struct pf_rule *oldrule = NULL, *newrule = NULL;
1380 u_int32_t nr = 0;
1381 int rs_num;
1382
1383 if (!(pcr->action == PF_CHANGE_REMOVE ||
1384 pcr->action == PF_CHANGE_GET_TICKET) &&
1385 pcr->pool_ticket != ticket_pabuf) {
1386 error = EBUSY;
1387 break;
1388 }
1389
1390 if (pcr->action < PF_CHANGE_ADD_HEAD ||
1391 pcr->action > PF_CHANGE_GET_TICKET) {
1392 error = EINVAL;
1393 break;
1394 }
1395 ruleset = pf_find_ruleset(pcr->anchor);
1396 if (ruleset == NULL) {
1397 error = EINVAL;
1398 break;
1399 }
1400 rs_num = pf_get_ruleset_number(pcr->rule.action);
1401 if (rs_num >= PF_RULESET_MAX) {
1402 error = EINVAL;
1403 break;
1404 }
1405
1406 if (pcr->action == PF_CHANGE_GET_TICKET) {
1407 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
1408 break;
1409 } else {
1410 if (pcr->ticket !=
1411 ruleset->rules[rs_num].active.ticket) {
1412 error = EINVAL;
1413 break;
1414 }
1415 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1416 error = EINVAL;
1417 break;
1418 }
1419 }
1420
1421 if (pcr->action != PF_CHANGE_REMOVE) {
1422 newrule = pool_get(&pf_rule_pl, PR_NOWAIT);
1423 if (newrule == NULL) {
1424 error = ENOMEM;
1425 break;
1426 }
1427 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule));
1428 TAILQ_INIT(&newrule->rpool.list);
1429 /* initialize refcounting */
1430 newrule->states = 0;
1431 newrule->entries.tqe_prev = NULL;
1432 #ifndef INET
1433 if (newrule->af == AF_INET) {
1434 pool_put(&pf_rule_pl, newrule);
1435 error = EAFNOSUPPORT;
1436 break;
1437 }
1438 #endif /* INET */
1439 #ifndef INET6
1440 if (newrule->af == AF_INET6) {
1441 pool_put(&pf_rule_pl, newrule);
1442 error = EAFNOSUPPORT;
1443 break;
1444 }
1445 #endif /* INET6 */
1446 if (newrule->ifname[0]) {
1447 newrule->kif = pfi_attach_rule(newrule->ifname);
1448 if (newrule->kif == NULL) {
1449 pool_put(&pf_rule_pl, newrule);
1450 error = EINVAL;
1451 break;
1452 }
1453 } else
1454 newrule->kif = NULL;
1455
1456 #ifdef ALTQ
1457 /* set queue IDs */
1458 if (newrule->qname[0] != 0) {
1459 if ((newrule->qid =
1460 pf_qname2qid(newrule->qname)) == 0)
1461 error = EBUSY;
1462 else if (newrule->pqname[0] != 0) {
1463 if ((newrule->pqid =
1464 pf_qname2qid(newrule->pqname)) == 0)
1465 error = EBUSY;
1466 } else
1467 newrule->pqid = newrule->qid;
1468 }
1469 #endif /* ALTQ */
1470 if (newrule->tagname[0])
1471 if ((newrule->tag =
1472 pf_tagname2tag(newrule->tagname)) == 0)
1473 error = EBUSY;
1474 if (newrule->match_tagname[0])
1475 if ((newrule->match_tag = pf_tagname2tag(
1476 newrule->match_tagname)) == 0)
1477 error = EBUSY;
1478
1479 if (newrule->rt && !newrule->direction)
1480 error = EINVAL;
1481 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af))
1482 error = EINVAL;
1483 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af))
1484 error = EINVAL;
1485 if (pf_tbladdr_setup(ruleset, &newrule->src.addr))
1486 error = EINVAL;
1487 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr))
1488 error = EINVAL;
1489 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1490 error = EINVAL;
1491
1492 pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
1493 if (((((newrule->action == PF_NAT) ||
1494 (newrule->action == PF_RDR) ||
1495 (newrule->action == PF_BINAT) ||
1496 (newrule->rt > PF_FASTROUTE)) &&
1497 !pcr->anchor[0])) &&
1498 (TAILQ_FIRST(&newrule->rpool.list) == NULL))
1499 error = EINVAL;
1500
1501 if (error) {
1502 pf_rm_rule(NULL, newrule);
1503 break;
1504 }
1505 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
1506 newrule->evaluations = newrule->packets = 0;
1507 newrule->bytes = 0;
1508 }
1509 pf_empty_pool(&pf_pabuf);
1510
1511 s = splsoftnet();
1512
1513 if (pcr->action == PF_CHANGE_ADD_HEAD)
1514 oldrule = TAILQ_FIRST(
1515 ruleset->rules[rs_num].active.ptr);
1516 else if (pcr->action == PF_CHANGE_ADD_TAIL)
1517 oldrule = TAILQ_LAST(
1518 ruleset->rules[rs_num].active.ptr, pf_rulequeue);
1519 else {
1520 oldrule = TAILQ_FIRST(
1521 ruleset->rules[rs_num].active.ptr);
1522 while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1523 oldrule = TAILQ_NEXT(oldrule, entries);
1524 if (oldrule == NULL) {
1525 if (newrule != NULL)
1526 pf_rm_rule(NULL, newrule);
1527 error = EINVAL;
1528 splx(s);
1529 break;
1530 }
1531 }
1532
1533 if (pcr->action == PF_CHANGE_REMOVE)
1534 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
1535 else {
1536 if (oldrule == NULL)
1537 TAILQ_INSERT_TAIL(
1538 ruleset->rules[rs_num].active.ptr,
1539 newrule, entries);
1540 else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1541 pcr->action == PF_CHANGE_ADD_BEFORE)
1542 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1543 else
1544 TAILQ_INSERT_AFTER(
1545 ruleset->rules[rs_num].active.ptr,
1546 oldrule, newrule, entries);
1547 }
1548
1549 nr = 0;
1550 TAILQ_FOREACH(oldrule,
1551 ruleset->rules[rs_num].active.ptr, entries)
1552 oldrule->nr = nr++;
1553
1554 ruleset->rules[rs_num].active.ticket++;
1555
1556 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
1557 pf_remove_if_empty_ruleset(ruleset);
1558
1559 splx(s);
1560 break;
1561 }
1562
1563 case DIOCCLRSTATES: {
1564 struct pf_state *state;
1565 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1566 int killed = 0;
1567
1568 s = splsoftnet();
1569 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1570 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1571 state->u.s.kif->pfik_name)) {
1572 state->timeout = PFTM_PURGE;
1573 #if NPFSYNC
1574 /* don't send out individual delete messages */
1575 state->sync_flags = PFSTATE_NOSYNC;
1576 #endif
1577 killed++;
1578 }
1579 }
1580 pf_purge_expired_states();
1581 pf_status.states = 0;
1582 psk->psk_af = killed;
1583 #if NPFSYNC
1584 pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
1585 #endif
1586 splx(s);
1587 break;
1588 }
1589
1590 case DIOCKILLSTATES: {
1591 struct pf_state *state;
1592 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1593 int killed = 0;
1594
1595 s = splsoftnet();
1596 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1597 if ((!psk->psk_af || state->af == psk->psk_af)
1598 && (!psk->psk_proto || psk->psk_proto ==
1599 state->proto) &&
1600 PF_MATCHA(psk->psk_src.neg,
1601 &psk->psk_src.addr.v.a.addr,
1602 &psk->psk_src.addr.v.a.mask,
1603 &state->lan.addr, state->af) &&
1604 PF_MATCHA(psk->psk_dst.neg,
1605 &psk->psk_dst.addr.v.a.addr,
1606 &psk->psk_dst.addr.v.a.mask,
1607 &state->ext.addr, state->af) &&
1608 (psk->psk_src.port_op == 0 ||
1609 pf_match_port(psk->psk_src.port_op,
1610 psk->psk_src.port[0], psk->psk_src.port[1],
1611 state->lan.port)) &&
1612 (psk->psk_dst.port_op == 0 ||
1613 pf_match_port(psk->psk_dst.port_op,
1614 psk->psk_dst.port[0], psk->psk_dst.port[1],
1615 state->ext.port)) &&
1616 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1617 state->u.s.kif->pfik_name))) {
1618 state->timeout = PFTM_PURGE;
1619 killed++;
1620 }
1621 }
1622 pf_purge_expired_states();
1623 splx(s);
1624 psk->psk_af = killed;
1625 break;
1626 }
1627
1628 case DIOCADDSTATE: {
1629 struct pfioc_state *ps = (struct pfioc_state *)addr;
1630 struct pf_state *state;
1631 struct pfi_kif *kif;
1632
1633 if (ps->state.timeout >= PFTM_MAX &&
1634 ps->state.timeout != PFTM_UNTIL_PACKET) {
1635 error = EINVAL;
1636 break;
1637 }
1638 state = pool_get(&pf_state_pl, PR_NOWAIT);
1639 if (state == NULL) {
1640 error = ENOMEM;
1641 break;
1642 }
1643 s = splsoftnet();
1644 kif = pfi_lookup_create(ps->state.u.ifname);
1645 if (kif == NULL) {
1646 pool_put(&pf_state_pl, state);
1647 error = ENOENT;
1648 splx(s);
1649 break;
1650 }
1651 bcopy(&ps->state, state, sizeof(struct pf_state));
1652 bzero(&state->u, sizeof(state->u));
1653 state->rule.ptr = &pf_default_rule;
1654 state->nat_rule.ptr = NULL;
1655 state->anchor.ptr = NULL;
1656 state->rt_kif = NULL;
1657 state->creation = time_second;
1658 state->pfsync_time = 0;
1659 state->packets[0] = state->packets[1] = 0;
1660 state->bytes[0] = state->bytes[1] = 0;
1661
1662 if (pf_insert_state(kif, state)) {
1663 pfi_maybe_destroy(kif);
1664 pool_put(&pf_state_pl, state);
1665 error = ENOMEM;
1666 }
1667 splx(s);
1668 break;
1669 }
1670
1671 case DIOCGETSTATE: {
1672 struct pfioc_state *ps = (struct pfioc_state *)addr;
1673 struct pf_state *state;
1674 u_int32_t nr;
1675
1676 nr = 0;
1677 s = splsoftnet();
1678 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1679 if (nr >= ps->nr)
1680 break;
1681 nr++;
1682 }
1683 if (state == NULL) {
1684 error = EBUSY;
1685 splx(s);
1686 break;
1687 }
1688 bcopy(state, &ps->state, sizeof(struct pf_state));
1689 ps->state.rule.nr = state->rule.ptr->nr;
1690 ps->state.nat_rule.nr = (state->nat_rule.ptr == NULL) ?
1691 -1 : state->nat_rule.ptr->nr;
1692 ps->state.anchor.nr = (state->anchor.ptr == NULL) ?
1693 -1 : state->anchor.ptr->nr;
1694 splx(s);
1695 ps->state.expire = pf_state_expires(state);
1696 if (ps->state.expire > time_second)
1697 ps->state.expire -= time_second;
1698 else
1699 ps->state.expire = 0;
1700 break;
1701 }
1702
1703 case DIOCGETSTATES: {
1704 struct pfioc_states *ps = (struct pfioc_states *)addr;
1705 struct pf_state *state;
1706 struct pf_state *p, pstore;
1707 struct pfi_kif *kif;
1708 u_int32_t nr = 0;
1709 int space = ps->ps_len;
1710
1711 if (space == 0) {
1712 s = splsoftnet();
1713 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states)
1714 nr += kif->pfik_states;
1715 splx(s);
1716 ps->ps_len = sizeof(struct pf_state) * nr;
1717 return (0);
1718 }
1719
1720 s = splsoftnet();
1721 p = ps->ps_states;
1722 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states)
1723 RB_FOREACH(state, pf_state_tree_ext_gwy,
1724 &kif->pfik_ext_gwy) {
1725 int secs = time_second;
1726
1727 if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len)
1728 break;
1729
1730 bcopy(state, &pstore, sizeof(pstore));
1731 strlcpy(pstore.u.ifname, kif->pfik_name,
1732 sizeof(pstore.u.ifname));
1733 pstore.rule.nr = state->rule.ptr->nr;
1734 pstore.nat_rule.nr = (state->nat_rule.ptr ==
1735 NULL) ? -1 : state->nat_rule.ptr->nr;
1736 pstore.anchor.nr = (state->anchor.ptr ==
1737 NULL) ? -1 : state->anchor.ptr->nr;
1738 pstore.creation = secs - pstore.creation;
1739 pstore.expire = pf_state_expires(state);
1740 if (pstore.expire > secs)
1741 pstore.expire -= secs;
1742 else
1743 pstore.expire = 0;
1744 error = copyout(&pstore, p, sizeof(*p));
1745 if (error) {
1746 splx(s);
1747 goto fail;
1748 }
1749 p++;
1750 nr++;
1751 }
1752 ps->ps_len = sizeof(struct pf_state) * nr;
1753 splx(s);
1754 break;
1755 }
1756
1757 case DIOCGETSTATUS: {
1758 struct pf_status *s = (struct pf_status *)addr;
1759 bcopy(&pf_status, s, sizeof(struct pf_status));
1760 pfi_fill_oldstatus(s);
1761 break;
1762 }
1763
1764 case DIOCSETSTATUSIF: {
1765 struct pfioc_if *pi = (struct pfioc_if *)addr;
1766
1767 if (pi->ifname[0] == 0) {
1768 bzero(pf_status.ifname, IFNAMSIZ);
1769 break;
1770 }
1771 if (ifunit(pi->ifname) == NULL) {
1772 error = EINVAL;
1773 break;
1774 }
1775 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
1776 break;
1777 }
1778
1779 case DIOCCLRSTATUS: {
1780 bzero(pf_status.counters, sizeof(pf_status.counters));
1781 bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
1782 bzero(pf_status.scounters, sizeof(pf_status.scounters));
1783 if (*pf_status.ifname)
1784 pfi_clr_istats(pf_status.ifname, NULL,
1785 PFI_FLAG_INSTANCE);
1786 break;
1787 }
1788
1789 case DIOCNATLOOK: {
1790 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr;
1791 struct pf_state *state;
1792 struct pf_state key;
1793 int m = 0, direction = pnl->direction;
1794
1795 key.af = pnl->af;
1796 key.proto = pnl->proto;
1797
1798 if (!pnl->proto ||
1799 PF_AZERO(&pnl->saddr, pnl->af) ||
1800 PF_AZERO(&pnl->daddr, pnl->af) ||
1801 !pnl->dport || !pnl->sport)
1802 error = EINVAL;
1803 else {
1804 s = splsoftnet();
1805
1806 /*
1807 * userland gives us source and dest of connection,
1808 * reverse the lookup so we ask for what happens with
1809 * the return traffic, enabling us to find it in the
1810 * state tree.
1811 */
1812 if (direction == PF_IN) {
1813 PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af);
1814 key.ext.port = pnl->dport;
1815 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af);
1816 key.gwy.port = pnl->sport;
1817 state = pf_find_state_all(&key, PF_EXT_GWY, &m);
1818 } else {
1819 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af);
1820 key.lan.port = pnl->dport;
1821 PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af);
1822 key.ext.port = pnl->sport;
1823 state = pf_find_state_all(&key, PF_LAN_EXT, &m);
1824 }
1825 if (m > 1)
1826 error = E2BIG; /* more than one state */
1827 else if (state != NULL) {
1828 if (direction == PF_IN) {
1829 PF_ACPY(&pnl->rsaddr, &state->lan.addr,
1830 state->af);
1831 pnl->rsport = state->lan.port;
1832 PF_ACPY(&pnl->rdaddr, &pnl->daddr,
1833 pnl->af);
1834 pnl->rdport = pnl->dport;
1835 } else {
1836 PF_ACPY(&pnl->rdaddr, &state->gwy.addr,
1837 state->af);
1838 pnl->rdport = state->gwy.port;
1839 PF_ACPY(&pnl->rsaddr, &pnl->saddr,
1840 pnl->af);
1841 pnl->rsport = pnl->sport;
1842 }
1843 } else
1844 error = ENOENT;
1845 splx(s);
1846 }
1847 break;
1848 }
1849
1850 case DIOCSETTIMEOUT: {
1851 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1852 int old;
1853
1854 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
1855 pt->seconds < 0) {
1856 error = EINVAL;
1857 goto fail;
1858 }
1859 old = pf_default_rule.timeout[pt->timeout];
1860 pf_default_rule.timeout[pt->timeout] = pt->seconds;
1861 pt->seconds = old;
1862 break;
1863 }
1864
1865 case DIOCGETTIMEOUT: {
1866 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1867
1868 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
1869 error = EINVAL;
1870 goto fail;
1871 }
1872 pt->seconds = pf_default_rule.timeout[pt->timeout];
1873 break;
1874 }
1875
1876 case DIOCGETLIMIT: {
1877 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
1878
1879 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
1880 error = EINVAL;
1881 goto fail;
1882 }
1883 pl->limit = pf_pool_limits[pl->index].limit;
1884 break;
1885 }
1886
1887 case DIOCSETLIMIT: {
1888 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
1889 int old_limit;
1890
1891 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
1892 pf_pool_limits[pl->index].pp == NULL) {
1893 error = EINVAL;
1894 goto fail;
1895 }
1896 #ifdef __OpenBSD__
1897 if (pool_sethardlimit(pf_pool_limits[pl->index].pp,
1898 pl->limit, NULL, 0) != 0) {
1899 error = EBUSY;
1900 goto fail;
1901 }
1902 #else
1903 pool_sethardlimit(pf_pool_limits[pl->index].pp,
1904 pl->limit, NULL, 0);
1905 #endif
1906 old_limit = pf_pool_limits[pl->index].limit;
1907 pf_pool_limits[pl->index].limit = pl->limit;
1908 pl->limit = old_limit;
1909 break;
1910 }
1911
1912 case DIOCSETDEBUG: {
1913 u_int32_t *level = (u_int32_t *)addr;
1914
1915 pf_status.debug = *level;
1916 break;
1917 }
1918
1919 case DIOCCLRRULECTRS: {
1920 struct pf_ruleset *ruleset = &pf_main_ruleset;
1921 struct pf_rule *rule;
1922
1923 s = splsoftnet();
1924 TAILQ_FOREACH(rule,
1925 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries)
1926 rule->evaluations = rule->packets =
1927 rule->bytes = 0;
1928 splx(s);
1929 break;
1930 }
1931
1932 #ifdef ALTQ
1933 case DIOCSTARTALTQ: {
1934 struct pf_altq *altq;
1935
1936 /* enable all altq interfaces on active list */
1937 s = splsoftnet();
1938 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
1939 if (altq->qname[0] == 0) {
1940 error = pf_enable_altq(altq);
1941 if (error != 0)
1942 break;
1943 }
1944 }
1945 if (error == 0)
1946 pf_altq_running = 1;
1947 splx(s);
1948 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
1949 break;
1950 }
1951
1952 case DIOCSTOPALTQ: {
1953 struct pf_altq *altq;
1954
1955 /* disable all altq interfaces on active list */
1956 s = splsoftnet();
1957 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
1958 if (altq->qname[0] == 0) {
1959 error = pf_disable_altq(altq);
1960 if (error != 0)
1961 break;
1962 }
1963 }
1964 if (error == 0)
1965 pf_altq_running = 0;
1966 splx(s);
1967 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
1968 break;
1969 }
1970
1971 case DIOCADDALTQ: {
1972 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
1973 struct pf_altq *altq, *a;
1974
1975 if (pa->ticket != ticket_altqs_inactive) {
1976 error = EBUSY;
1977 break;
1978 }
1979 altq = pool_get(&pf_altq_pl, PR_NOWAIT);
1980 if (altq == NULL) {
1981 error = ENOMEM;
1982 break;
1983 }
1984 bcopy(&pa->altq, altq, sizeof(struct pf_altq));
1985
1986 /*
1987 * if this is for a queue, find the discipline and
1988 * copy the necessary fields
1989 */
1990 if (altq->qname[0] != 0) {
1991 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
1992 error = EBUSY;
1993 pool_put(&pf_altq_pl, altq);
1994 break;
1995 }
1996 TAILQ_FOREACH(a, pf_altqs_inactive, entries) {
1997 if (strncmp(a->ifname, altq->ifname,
1998 IFNAMSIZ) == 0 && a->qname[0] == 0) {
1999 altq->altq_disc = a->altq_disc;
2000 break;
2001 }
2002 }
2003 }
2004
2005 error = altq_add(altq);
2006 if (error) {
2007 pool_put(&pf_altq_pl, altq);
2008 break;
2009 }
2010
2011 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries);
2012 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2013 break;
2014 }
2015
2016 case DIOCGETALTQS: {
2017 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2018 struct pf_altq *altq;
2019
2020 pa->nr = 0;
2021 s = splsoftnet();
2022 TAILQ_FOREACH(altq, pf_altqs_active, entries)
2023 pa->nr++;
2024 pa->ticket = ticket_altqs_active;
2025 splx(s);
2026 break;
2027 }
2028
2029 case DIOCGETALTQ: {
2030 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2031 struct pf_altq *altq;
2032 u_int32_t nr;
2033
2034 if (pa->ticket != ticket_altqs_active) {
2035 error = EBUSY;
2036 break;
2037 }
2038 nr = 0;
2039 s = splsoftnet();
2040 altq = TAILQ_FIRST(pf_altqs_active);
2041 while ((altq != NULL) && (nr < pa->nr)) {
2042 altq = TAILQ_NEXT(altq, entries);
2043 nr++;
2044 }
2045 if (altq == NULL) {
2046 error = EBUSY;
2047 splx(s);
2048 break;
2049 }
2050 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2051 splx(s);
2052 break;
2053 }
2054
2055 case DIOCCHANGEALTQ:
2056 /* CHANGEALTQ not supported yet! */
2057 error = ENODEV;
2058 break;
2059
2060 case DIOCGETQSTATS: {
2061 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr;
2062 struct pf_altq *altq;
2063 u_int32_t nr;
2064 int nbytes;
2065
2066 if (pq->ticket != ticket_altqs_active) {
2067 error = EBUSY;
2068 break;
2069 }
2070 nbytes = pq->nbytes;
2071 nr = 0;
2072 s = splsoftnet();
2073 altq = TAILQ_FIRST(pf_altqs_active);
2074 while ((altq != NULL) && (nr < pq->nr)) {
2075 altq = TAILQ_NEXT(altq, entries);
2076 nr++;
2077 }
2078 if (altq == NULL) {
2079 error = EBUSY;
2080 splx(s);
2081 break;
2082 }
2083 error = altq_getqstats(altq, pq->buf, &nbytes);
2084 splx(s);
2085 if (error == 0) {
2086 pq->scheduler = altq->scheduler;
2087 pq->nbytes = nbytes;
2088 }
2089 break;
2090 }
2091 #endif /* ALTQ */
2092
2093 case DIOCBEGINADDRS: {
2094 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2095
2096 pf_empty_pool(&pf_pabuf);
2097 pp->ticket = ++ticket_pabuf;
2098 break;
2099 }
2100
2101 case DIOCADDADDR: {
2102 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2103
2104 #ifndef INET
2105 if (pp->af == AF_INET) {
2106 error = EAFNOSUPPORT;
2107 break;
2108 }
2109 #endif /* INET */
2110 #ifndef INET6
2111 if (pp->af == AF_INET6) {
2112 error = EAFNOSUPPORT;
2113 break;
2114 }
2115 #endif /* INET6 */
2116 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2117 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2118 pp->addr.addr.type != PF_ADDR_TABLE) {
2119 error = EINVAL;
2120 break;
2121 }
2122 pa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
2123 if (pa == NULL) {
2124 error = ENOMEM;
2125 break;
2126 }
2127 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr));
2128 if (pa->ifname[0]) {
2129 pa->kif = pfi_attach_rule(pa->ifname);
2130 if (pa->kif == NULL) {
2131 pool_put(&pf_pooladdr_pl, pa);
2132 error = EINVAL;
2133 break;
2134 }
2135 }
2136 if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
2137 pfi_dynaddr_remove(&pa->addr);
2138 pfi_detach_rule(pa->kif);
2139 pool_put(&pf_pooladdr_pl, pa);
2140 error = EINVAL;
2141 break;
2142 }
2143 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
2144 break;
2145 }
2146
2147 case DIOCGETADDRS: {
2148 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2149
2150 pp->nr = 0;
2151 s = splsoftnet();
2152 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2153 pp->r_num, 0, 1, 0);
2154 if (pool == NULL) {
2155 error = EBUSY;
2156 splx(s);
2157 break;
2158 }
2159 TAILQ_FOREACH(pa, &pool->list, entries)
2160 pp->nr++;
2161 splx(s);
2162 break;
2163 }
2164
2165 case DIOCGETADDR: {
2166 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2167 u_int32_t nr = 0;
2168
2169 s = splsoftnet();
2170 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2171 pp->r_num, 0, 1, 1);
2172 if (pool == NULL) {
2173 error = EBUSY;
2174 splx(s);
2175 break;
2176 }
2177 pa = TAILQ_FIRST(&pool->list);
2178 while ((pa != NULL) && (nr < pp->nr)) {
2179 pa = TAILQ_NEXT(pa, entries);
2180 nr++;
2181 }
2182 if (pa == NULL) {
2183 error = EBUSY;
2184 splx(s);
2185 break;
2186 }
2187 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr));
2188 pfi_dynaddr_copyout(&pp->addr.addr);
2189 pf_tbladdr_copyout(&pp->addr.addr);
2190 splx(s);
2191 break;
2192 }
2193
2194 case DIOCCHANGEADDR: {
2195 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr;
2196 struct pf_pooladdr *oldpa = NULL, *newpa = NULL;
2197 struct pf_ruleset *ruleset;
2198
2199 if (pca->action < PF_CHANGE_ADD_HEAD ||
2200 pca->action > PF_CHANGE_REMOVE) {
2201 error = EINVAL;
2202 break;
2203 }
2204 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
2205 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
2206 pca->addr.addr.type != PF_ADDR_TABLE) {
2207 error = EINVAL;
2208 break;
2209 }
2210
2211 ruleset = pf_find_ruleset(pca->anchor);
2212 if (ruleset == NULL) {
2213 error = EBUSY;
2214 break;
2215 }
2216 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
2217 pca->r_num, pca->r_last, 1, 1);
2218 if (pool == NULL) {
2219 error = EBUSY;
2220 break;
2221 }
2222 if (pca->action != PF_CHANGE_REMOVE) {
2223 newpa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
2224 if (newpa == NULL) {
2225 error = ENOMEM;
2226 break;
2227 }
2228 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
2229 #ifndef INET
2230 if (pca->af == AF_INET) {
2231 pool_put(&pf_pooladdr_pl, newpa);
2232 error = EAFNOSUPPORT;
2233 break;
2234 }
2235 #endif /* INET */
2236 #ifndef INET6
2237 if (pca->af == AF_INET6) {
2238 pool_put(&pf_pooladdr_pl, newpa);
2239 error = EAFNOSUPPORT;
2240 break;
2241 }
2242 #endif /* INET6 */
2243 if (newpa->ifname[0]) {
2244 newpa->kif = pfi_attach_rule(newpa->ifname);
2245 if (newpa->kif == NULL) {
2246 pool_put(&pf_pooladdr_pl, newpa);
2247 error = EINVAL;
2248 break;
2249 }
2250 } else
2251 newpa->kif = NULL;
2252 if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
2253 pf_tbladdr_setup(ruleset, &newpa->addr)) {
2254 pfi_dynaddr_remove(&newpa->addr);
2255 pfi_detach_rule(newpa->kif);
2256 pool_put(&pf_pooladdr_pl, newpa);
2257 error = EINVAL;
2258 break;
2259 }
2260 }
2261
2262 s = splsoftnet();
2263
2264 if (pca->action == PF_CHANGE_ADD_HEAD)
2265 oldpa = TAILQ_FIRST(&pool->list);
2266 else if (pca->action == PF_CHANGE_ADD_TAIL)
2267 oldpa = TAILQ_LAST(&pool->list, pf_palist);
2268 else {
2269 int i = 0;
2270
2271 oldpa = TAILQ_FIRST(&pool->list);
2272 while ((oldpa != NULL) && (i < pca->nr)) {
2273 oldpa = TAILQ_NEXT(oldpa, entries);
2274 i++;
2275 }
2276 if (oldpa == NULL) {
2277 error = EINVAL;
2278 splx(s);
2279 break;
2280 }
2281 }
2282
2283 if (pca->action == PF_CHANGE_REMOVE) {
2284 TAILQ_REMOVE(&pool->list, oldpa, entries);
2285 pfi_dynaddr_remove(&oldpa->addr);
2286 pf_tbladdr_remove(&oldpa->addr);
2287 pfi_detach_rule(oldpa->kif);
2288 pool_put(&pf_pooladdr_pl, oldpa);
2289 } else {
2290 if (oldpa == NULL)
2291 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
2292 else if (pca->action == PF_CHANGE_ADD_HEAD ||
2293 pca->action == PF_CHANGE_ADD_BEFORE)
2294 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
2295 else
2296 TAILQ_INSERT_AFTER(&pool->list, oldpa,
2297 newpa, entries);
2298 }
2299
2300 pool->cur = TAILQ_FIRST(&pool->list);
2301 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
2302 pca->af);
2303 splx(s);
2304 break;
2305 }
2306
2307 case DIOCGETRULESETS: {
2308 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2309 struct pf_ruleset *ruleset;
2310 struct pf_anchor *anchor;
2311
2312 pr->path[sizeof(pr->path) - 1] = 0;
2313 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2314 error = EINVAL;
2315 break;
2316 }
2317 pr->nr = 0;
2318 if (ruleset->anchor == NULL) {
2319 /* XXX kludge for pf_main_ruleset */
2320 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2321 if (anchor->parent == NULL)
2322 pr->nr++;
2323 } else {
2324 RB_FOREACH(anchor, pf_anchor_node,
2325 &ruleset->anchor->children)
2326 pr->nr++;
2327 }
2328 break;
2329 }
2330
2331 case DIOCGETRULESET: {
2332 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2333 struct pf_ruleset *ruleset;
2334 struct pf_anchor *anchor;
2335 u_int32_t nr = 0;
2336
2337 pr->path[sizeof(pr->path) - 1] = 0;
2338 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2339 error = EINVAL;
2340 break;
2341 }
2342 pr->name[0] = 0;
2343 if (ruleset->anchor == NULL) {
2344 /* XXX kludge for pf_main_ruleset */
2345 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2346 if (anchor->parent == NULL && nr++ == pr->nr) {
2347 strlcpy(pr->name, anchor->name,
2348 sizeof(pr->name));
2349 break;
2350 }
2351 } else {
2352 RB_FOREACH(anchor, pf_anchor_node,
2353 &ruleset->anchor->children)
2354 if (nr++ == pr->nr) {
2355 strlcpy(pr->name, anchor->name,
2356 sizeof(pr->name));
2357 break;
2358 }
2359 }
2360 if (!pr->name[0])
2361 error = EBUSY;
2362 break;
2363 }
2364
2365 case DIOCRCLRTABLES: {
2366 struct pfioc_table *io = (struct pfioc_table *)addr;
2367
2368 if (io->pfrio_esize != 0) {
2369 error = ENODEV;
2370 break;
2371 }
2372 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
2373 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2374 break;
2375 }
2376
2377 case DIOCRADDTABLES: {
2378 struct pfioc_table *io = (struct pfioc_table *)addr;
2379
2380 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2381 error = ENODEV;
2382 break;
2383 }
2384 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
2385 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2386 break;
2387 }
2388
2389 case DIOCRDELTABLES: {
2390 struct pfioc_table *io = (struct pfioc_table *)addr;
2391
2392 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2393 error = ENODEV;
2394 break;
2395 }
2396 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
2397 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2398 break;
2399 }
2400
2401 case DIOCRGETTABLES: {
2402 struct pfioc_table *io = (struct pfioc_table *)addr;
2403
2404 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2405 error = ENODEV;
2406 break;
2407 }
2408 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
2409 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2410 break;
2411 }
2412
2413 case DIOCRGETTSTATS: {
2414 struct pfioc_table *io = (struct pfioc_table *)addr;
2415
2416 if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
2417 error = ENODEV;
2418 break;
2419 }
2420 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
2421 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2422 break;
2423 }
2424
2425 case DIOCRCLRTSTATS: {
2426 struct pfioc_table *io = (struct pfioc_table *)addr;
2427
2428 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2429 error = ENODEV;
2430 break;
2431 }
2432 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
2433 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2434 break;
2435 }
2436
2437 case DIOCRSETTFLAGS: {
2438 struct pfioc_table *io = (struct pfioc_table *)addr;
2439
2440 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2441 error = ENODEV;
2442 break;
2443 }
2444 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
2445 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
2446 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2447 break;
2448 }
2449
2450 case DIOCRCLRADDRS: {
2451 struct pfioc_table *io = (struct pfioc_table *)addr;
2452
2453 if (io->pfrio_esize != 0) {
2454 error = ENODEV;
2455 break;
2456 }
2457 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
2458 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2459 break;
2460 }
2461
2462 case DIOCRADDADDRS: {
2463 struct pfioc_table *io = (struct pfioc_table *)addr;
2464
2465 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2466 error = ENODEV;
2467 break;
2468 }
2469 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
2470 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
2471 PFR_FLAG_USERIOCTL);
2472 break;
2473 }
2474
2475 case DIOCRDELADDRS: {
2476 struct pfioc_table *io = (struct pfioc_table *)addr;
2477
2478 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2479 error = ENODEV;
2480 break;
2481 }
2482 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
2483 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
2484 PFR_FLAG_USERIOCTL);
2485 break;
2486 }
2487
2488 case DIOCRSETADDRS: {
2489 struct pfioc_table *io = (struct pfioc_table *)addr;
2490
2491 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2492 error = ENODEV;
2493 break;
2494 }
2495 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
2496 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
2497 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
2498 PFR_FLAG_USERIOCTL);
2499 break;
2500 }
2501
2502 case DIOCRGETADDRS: {
2503 struct pfioc_table *io = (struct pfioc_table *)addr;
2504
2505 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2506 error = ENODEV;
2507 break;
2508 }
2509 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
2510 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2511 break;
2512 }
2513
2514 case DIOCRGETASTATS: {
2515 struct pfioc_table *io = (struct pfioc_table *)addr;
2516
2517 if (io->pfrio_esize != sizeof(struct pfr_astats)) {
2518 error = ENODEV;
2519 break;
2520 }
2521 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
2522 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2523 break;
2524 }
2525
2526 case DIOCRCLRASTATS: {
2527 struct pfioc_table *io = (struct pfioc_table *)addr;
2528
2529 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2530 error = ENODEV;
2531 break;
2532 }
2533 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
2534 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
2535 PFR_FLAG_USERIOCTL);
2536 break;
2537 }
2538
2539 case DIOCRTSTADDRS: {
2540 struct pfioc_table *io = (struct pfioc_table *)addr;
2541
2542 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2543 error = ENODEV;
2544 break;
2545 }
2546 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
2547 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
2548 PFR_FLAG_USERIOCTL);
2549 break;
2550 }
2551
2552 case DIOCRINADEFINE: {
2553 struct pfioc_table *io = (struct pfioc_table *)addr;
2554
2555 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2556 error = ENODEV;
2557 break;
2558 }
2559 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
2560 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
2561 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2562 break;
2563 }
2564
2565 case DIOCOSFPADD: {
2566 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2567 s = splsoftnet();
2568 error = pf_osfp_add(io);
2569 splx(s);
2570 break;
2571 }
2572
2573 case DIOCOSFPGET: {
2574 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2575 s = splsoftnet();
2576 error = pf_osfp_get(io);
2577 splx(s);
2578 break;
2579 }
2580
2581 case DIOCXBEGIN: {
2582 struct pfioc_trans *io = (struct pfioc_trans *)
2583 addr;
2584 static struct pfioc_trans_e ioe;
2585 static struct pfr_table table;
2586 int i;
2587
2588 if (io->esize != sizeof(ioe)) {
2589 error = ENODEV;
2590 goto fail;
2591 }
2592 for (i = 0; i < io->size; i++) {
2593 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2594 error = EFAULT;
2595 goto fail;
2596 }
2597 switch (ioe.rs_num) {
2598 #ifdef ALTQ
2599 case PF_RULESET_ALTQ:
2600 if (ioe.anchor[0]) {
2601 error = EINVAL;
2602 goto fail;
2603 }
2604 if ((error = pf_begin_altq(&ioe.ticket)))
2605 goto fail;
2606 break;
2607 #endif /* ALTQ */
2608 case PF_RULESET_TABLE:
2609 bzero(&table, sizeof(table));
2610 strlcpy(table.pfrt_anchor, ioe.anchor,
2611 sizeof(table.pfrt_anchor));
2612 if ((error = pfr_ina_begin(&table,
2613 &ioe.ticket, NULL, 0)))
2614 goto fail;
2615 break;
2616 default:
2617 if ((error = pf_begin_rules(&ioe.ticket,
2618 ioe.rs_num, ioe.anchor)))
2619 goto fail;
2620 break;
2621 }
2622 if (copyout(&ioe, io->array+i, sizeof(io->array[i]))) {
2623 error = EFAULT;
2624 goto fail;
2625 }
2626 }
2627 break;
2628 }
2629
2630 case DIOCXROLLBACK: {
2631 struct pfioc_trans *io = (struct pfioc_trans *)
2632 addr;
2633 static struct pfioc_trans_e ioe;
2634 static struct pfr_table table;
2635 int i;
2636
2637 if (io->esize != sizeof(ioe)) {
2638 error = ENODEV;
2639 goto fail;
2640 }
2641 for (i = 0; i < io->size; i++) {
2642 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2643 error = EFAULT;
2644 goto fail;
2645 }
2646 switch (ioe.rs_num) {
2647 #ifdef ALTQ
2648 case PF_RULESET_ALTQ:
2649 if (ioe.anchor[0]) {
2650 error = EINVAL;
2651 goto fail;
2652 }
2653 if ((error = pf_rollback_altq(ioe.ticket)))
2654 goto fail; /* really bad */
2655 break;
2656 #endif /* ALTQ */
2657 case PF_RULESET_TABLE:
2658 bzero(&table, sizeof(table));
2659 strlcpy(table.pfrt_anchor, ioe.anchor,
2660 sizeof(table.pfrt_anchor));
2661 if ((error = pfr_ina_rollback(&table,
2662 ioe.ticket, NULL, 0)))
2663 goto fail; /* really bad */
2664 break;
2665 default:
2666 if ((error = pf_rollback_rules(ioe.ticket,
2667 ioe.rs_num, ioe.anchor)))
2668 goto fail; /* really bad */
2669 break;
2670 }
2671 }
2672 break;
2673 }
2674
2675 case DIOCXCOMMIT: {
2676 struct pfioc_trans *io = (struct pfioc_trans *)
2677 addr;
2678 static struct pfioc_trans_e ioe;
2679 static struct pfr_table table;
2680 struct pf_ruleset *rs;
2681 int i;
2682
2683 if (io->esize != sizeof(ioe)) {
2684 error = ENODEV;
2685 goto fail;
2686 }
2687 /* first makes sure everything will succeed */
2688 for (i = 0; i < io->size; i++) {
2689 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2690 error = EFAULT;
2691 goto fail;
2692 }
2693 switch (ioe.rs_num) {
2694 #ifdef ALTQ
2695 case PF_RULESET_ALTQ:
2696 if (ioe.anchor[0]) {
2697 error = EINVAL;
2698 goto fail;
2699 }
2700 if (!altqs_inactive_open || ioe.ticket !=
2701 ticket_altqs_inactive) {
2702 error = EBUSY;
2703 goto fail;
2704 }
2705 break;
2706 #endif /* ALTQ */
2707 case PF_RULESET_TABLE:
2708 rs = pf_find_ruleset(ioe.anchor);
2709 if (rs == NULL || !rs->topen || ioe.ticket !=
2710 rs->tticket) {
2711 error = EBUSY;
2712 goto fail;
2713 }
2714 break;
2715 default:
2716 if (ioe.rs_num < 0 || ioe.rs_num >=
2717 PF_RULESET_MAX) {
2718 error = EINVAL;
2719 goto fail;
2720 }
2721 rs = pf_find_ruleset(ioe.anchor);
2722 if (rs == NULL ||
2723 !rs->rules[ioe.rs_num].inactive.open ||
2724 rs->rules[ioe.rs_num].inactive.ticket !=
2725 ioe.ticket) {
2726 error = EBUSY;
2727 goto fail;
2728 }
2729 break;
2730 }
2731 }
2732 /* now do the commit - no errors should happen here */
2733 for (i = 0; i < io->size; i++) {
2734 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2735 error = EFAULT;
2736 goto fail;
2737 }
2738 switch (ioe.rs_num) {
2739 #ifdef ALTQ
2740 case PF_RULESET_ALTQ:
2741 if ((error = pf_commit_altq(ioe.ticket)))
2742 goto fail; /* really bad */
2743 break;
2744 #endif /* ALTQ */
2745 case PF_RULESET_TABLE:
2746 bzero(&table, sizeof(table));
2747 strlcpy(table.pfrt_anchor, ioe.anchor,
2748 sizeof(table.pfrt_anchor));
2749 if ((error = pfr_ina_commit(&table, ioe.ticket,
2750 NULL, NULL, 0)))
2751 goto fail; /* really bad */
2752 break;
2753 default:
2754 if ((error = pf_commit_rules(ioe.ticket,
2755 ioe.rs_num, ioe.anchor)))
2756 goto fail; /* really bad */
2757 break;
2758 }
2759 }
2760 break;
2761 }
2762
2763 case DIOCGETSRCNODES: {
2764 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr;
2765 struct pf_src_node *n;
2766 struct pf_src_node *p, pstore;
2767 u_int32_t nr = 0;
2768 int space = psn->psn_len;
2769
2770 if (space == 0) {
2771 s = splsoftnet();
2772 RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
2773 nr++;
2774 splx(s);
2775 psn->psn_len = sizeof(struct pf_src_node) * nr;
2776 return (0);
2777 }
2778
2779 s = splsoftnet();
2780 p = psn->psn_src_nodes;
2781 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2782 int secs = time_second;
2783
2784 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
2785 break;
2786
2787 bcopy(n, &pstore, sizeof(pstore));
2788 if (n->rule.ptr != NULL)
2789 pstore.rule.nr = n->rule.ptr->nr;
2790 pstore.creation = secs - pstore.creation;
2791 if (pstore.expire > secs)
2792 pstore.expire -= secs;
2793 else
2794 pstore.expire = 0;
2795 error = copyout(&pstore, p, sizeof(*p));
2796 if (error) {
2797 splx(s);
2798 goto fail;
2799 }
2800 p++;
2801 nr++;
2802 }
2803 psn->psn_len = sizeof(struct pf_src_node) * nr;
2804 splx(s);
2805 break;
2806 }
2807
2808 case DIOCCLRSRCNODES: {
2809 struct pf_src_node *n;
2810 struct pf_state *state;
2811
2812 s = splsoftnet();
2813 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
2814 state->src_node = NULL;
2815 state->nat_src_node = NULL;
2816 }
2817 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2818 n->expire = 1;
2819 n->states = 0;
2820 }
2821 pf_purge_expired_src_nodes();
2822 pf_status.src_nodes = 0;
2823 splx(s);
2824 break;
2825 }
2826
2827 case DIOCSETHOSTID: {
2828 u_int32_t *hostid = (u_int32_t *)addr;
2829
2830 if (*hostid == 0) {
2831 error = EINVAL;
2832 goto fail;
2833 }
2834 pf_status.hostid = *hostid;
2835 break;
2836 }
2837
2838 case DIOCOSFPFLUSH:
2839 s = splsoftnet();
2840 pf_osfp_flush();
2841 splx(s);
2842 break;
2843
2844 case DIOCIGETIFACES: {
2845 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2846
2847 if (io->pfiio_esize != sizeof(struct pfi_if)) {
2848 error = ENODEV;
2849 break;
2850 }
2851 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer,
2852 &io->pfiio_size, io->pfiio_flags);
2853 break;
2854 }
2855
2856 case DIOCICLRISTATS: {
2857 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2858
2859 error = pfi_clr_istats(io->pfiio_name, &io->pfiio_nzero,
2860 io->pfiio_flags);
2861 break;
2862 }
2863
2864 default:
2865 error = ENODEV;
2866 break;
2867 }
2868 fail:
2869
2870 return (error);
2871 }
2872
2873 #ifdef __NetBSD__
2874 int
2875 pfil4_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
2876 {
2877 int error;
2878
2879 /*
2880 * ensure that mbufs are writable beforehand
2881 * as it's assumed by pf code.
2882 * ip hdr (60 bytes) + tcp hdr (60 bytes) should be enough.
2883 * XXX inefficient
2884 */
2885 error = m_makewritable(mp, 0, 60 + 60, M_DONTWAIT);
2886 if (error) {
2887 m_freem(*mp);
2888 *mp = NULL;
2889 return error;
2890 }
2891
2892 /*
2893 * If the packet is out-bound, we can't delay checksums
2894 * here. For in-bound, the checksum has already been
2895 * validated.
2896 */
2897 if (dir == PFIL_OUT) {
2898 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
2899 in_delayed_cksum(*mp);
2900 (*mp)->m_pkthdr.csum_flags &=
2901 ~(M_CSUM_TCPv4|M_CSUM_UDPv4);
2902 }
2903 }
2904
2905 if (pf_test(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp, NULL)
2906 != PF_PASS) {
2907 m_freem(*mp);
2908 *mp = NULL;
2909 return EHOSTUNREACH;
2910 } else
2911 return (0);
2912 }
2913
2914 #ifdef INET6
2915 int
2916 pfil6_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
2917 {
2918 int error;
2919
2920 /*
2921 * ensure that mbufs are writable beforehand
2922 * as it's assumed by pf code.
2923 * XXX inefficient
2924 */
2925 error = m_makewritable(mp, 0, M_COPYALL, M_DONTWAIT);
2926 if (error) {
2927 m_freem(*mp);
2928 *mp = NULL;
2929 return error;
2930 }
2931
2932 if (pf_test6(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp, NULL)
2933 != PF_PASS) {
2934 m_freem(*mp);
2935 *mp = NULL;
2936 return EHOSTUNREACH;
2937 } else
2938 return (0);
2939 }
2940 #endif
2941
2942 int
2943 pfil_ifnet_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
2944 {
2945 u_long cmd = (u_long)mp;
2946
2947 switch (cmd) {
2948 case PFIL_IFNET_ATTACH:
2949 pfi_attach_ifnet(ifp);
2950 break;
2951 case PFIL_IFNET_DETACH:
2952 pfi_detach_ifnet(ifp);
2953 break;
2954 }
2955
2956 return (0);
2957 }
2958
2959 int
2960 pfil_ifaddr_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
2961 {
2962 extern void pfi_kifaddr_update_if(struct ifnet *);
2963
2964 u_long cmd = (u_long)mp;
2965
2966 switch (cmd) {
2967 case SIOCSIFADDR:
2968 case SIOCAIFADDR:
2969 case SIOCDIFADDR:
2970 #ifdef INET6
2971 case SIOCAIFADDR_IN6:
2972 case SIOCDIFADDR_IN6:
2973 #endif
2974 pfi_kifaddr_update_if(ifp);
2975 break;
2976 default:
2977 panic("unexpected ioctl");
2978 }
2979
2980 return (0);
2981 }
2982
2983 static int
2984 pf_pfil_attach(void)
2985 {
2986 struct pfil_head *ph_inet;
2987 #ifdef INET6
2988 struct pfil_head *ph_inet6;
2989 #endif
2990 int error;
2991 int i;
2992
2993 if (pf_pfil_attached)
2994 return (0);
2995
2996 error = pfil_add_hook(pfil_ifnet_wrapper, NULL, PFIL_IFNET, &if_pfil);
2997 if (error)
2998 goto bad1;
2999 error = pfil_add_hook(pfil_ifaddr_wrapper, NULL, PFIL_IFADDR, &if_pfil);
3000 if (error)
3001 goto bad2;
3002
3003 ph_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3004 if (ph_inet)
3005 error = pfil_add_hook((void *)pfil4_wrapper, NULL,
3006 PFIL_IN|PFIL_OUT, ph_inet);
3007 else
3008 error = ENOENT;
3009 if (error)
3010 goto bad3;
3011
3012 #ifdef INET6
3013 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3014 if (ph_inet6)
3015 error = pfil_add_hook((void *)pfil6_wrapper, NULL,
3016 PFIL_IN|PFIL_OUT, ph_inet6);
3017 else
3018 error = ENOENT;
3019 if (error)
3020 goto bad4;
3021 #endif
3022
3023 for (i = 0; i < if_indexlim; i++)
3024 if (ifindex2ifnet[i])
3025 pfi_attach_ifnet(ifindex2ifnet[i]);
3026 pf_pfil_attached = 1;
3027
3028 return (0);
3029
3030 #ifdef INET6
3031 bad4:
3032 pfil_remove_hook(pfil4_wrapper, NULL, PFIL_IN|PFIL_OUT, ph_inet);
3033 #endif
3034 bad3:
3035 pfil_remove_hook(pfil_ifaddr_wrapper, NULL, PFIL_IFADDR, &if_pfil);
3036 bad2:
3037 pfil_remove_hook(pfil_ifnet_wrapper, NULL, PFIL_IFNET, &if_pfil);
3038 bad1:
3039 return (error);
3040 }
3041
3042 static int
3043 pf_pfil_detach(void)
3044 {
3045 struct pfil_head *ph_inet;
3046 #ifdef INET6
3047 struct pfil_head *ph_inet6;
3048 #endif
3049 int i;
3050
3051 if (pf_pfil_attached == 0)
3052 return (0);
3053
3054 for (i = 0; i < if_indexlim; i++)
3055 if (pfi_index2kif[i])
3056 pfi_detach_ifnet(ifindex2ifnet[i]);
3057
3058 pfil_remove_hook(pfil_ifaddr_wrapper, NULL, PFIL_IFADDR, &if_pfil);
3059 pfil_remove_hook(pfil_ifnet_wrapper, NULL, PFIL_IFNET, &if_pfil);
3060
3061 ph_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3062 if (ph_inet)
3063 pfil_remove_hook((void *)pfil4_wrapper, NULL,
3064 PFIL_IN|PFIL_OUT, ph_inet);
3065 #ifdef INET6
3066 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3067 if (ph_inet6)
3068 pfil_remove_hook((void *)pfil6_wrapper, NULL,
3069 PFIL_IN|PFIL_OUT, ph_inet6);
3070 #endif
3071 pf_pfil_attached = 0;
3072
3073 return (0);
3074 }
3075 #endif
3076