pf_ioctl.c revision 1.21.12.1 1 /* $NetBSD: pf_ioctl.c,v 1.21.12.1 2006/03/18 14:07:52 peter Exp $ */
2 /* $OpenBSD: pf_ioctl.c,v 1.139 2005/03/03 07:13:39 dhartmei Exp $ */
3
4 /*
5 * Copyright (c) 2001 Daniel Hartmeier
6 * Copyright (c) 2002,2003 Henning Brauer
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 *
13 * - Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * - Redistributions in binary form must reproduce the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer in the documentation and/or other materials provided
18 * with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Effort sponsored in part by the Defense Advanced Research Projects
34 * Agency (DARPA) and Air Force Research Laboratory, Air Force
35 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36 *
37 */
38
39 #ifdef _KERNEL_OPT
40 #include "opt_inet.h"
41 #include "opt_altq.h"
42 #include "opt_pfil_hooks.h"
43 #endif
44
45 #ifdef __OpenBSD__
46 #include "pfsync.h"
47 #else
48 #define NPFSYNC 0
49 #endif
50
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/mbuf.h>
54 #include <sys/filio.h>
55 #include <sys/fcntl.h>
56 #include <sys/socket.h>
57 #include <sys/socketvar.h>
58 #include <sys/kernel.h>
59 #include <sys/time.h>
60 #ifdef __OpenBSD__
61 #include <sys/timeout.h>
62 #else
63 #include <sys/callout.h>
64 #endif
65 #include <sys/pool.h>
66 #include <sys/malloc.h>
67 #ifdef __NetBSD__
68 #include <sys/conf.h>
69 #endif
70
71 #include <net/if.h>
72 #include <net/if_types.h>
73 #include <net/route.h>
74
75 #include <netinet/in.h>
76 #include <netinet/in_var.h>
77 #include <netinet/in_systm.h>
78 #include <netinet/ip.h>
79 #include <netinet/ip_var.h>
80 #include <netinet/ip_icmp.h>
81
82 #ifdef __OpenBSD__
83 #include <dev/rndvar.h>
84 #endif
85 #include <net/pfvar.h>
86
87 #if NPFSYNC > 0
88 #include <net/if_pfsync.h>
89 #endif /* NPFSYNC > 0 */
90
91 #ifdef INET6
92 #include <netinet/ip6.h>
93 #include <netinet/in_pcb.h>
94 #endif /* INET6 */
95
96 #ifdef ALTQ
97 #include <altq/altq.h>
98 #endif
99
100 void pfattach(int);
101 #ifdef _LKM
102 void pfdetach(void);
103 #endif
104 int pfopen(dev_t, int, int, struct lwp *);
105 int pfclose(dev_t, int, int, struct lwp *);
106 struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
107 u_int8_t, u_int8_t, u_int8_t);
108 int pf_get_ruleset_number(u_int8_t);
109 void pf_init_ruleset(struct pf_ruleset *);
110 int pf_anchor_setup(struct pf_rule *,
111 const struct pf_ruleset *, const char *);
112 int pf_anchor_copyout(const struct pf_ruleset *,
113 const struct pf_rule *, struct pfioc_rule *);
114 void pf_anchor_remove(struct pf_rule *);
115
116 void pf_mv_pool(struct pf_palist *, struct pf_palist *);
117 void pf_empty_pool(struct pf_palist *);
118 int pfioctl(dev_t, u_long, caddr_t, int, struct lwp *);
119 #ifdef ALTQ
120 int pf_begin_altq(u_int32_t *);
121 int pf_rollback_altq(u_int32_t);
122 int pf_commit_altq(u_int32_t);
123 int pf_enable_altq(struct pf_altq *);
124 int pf_disable_altq(struct pf_altq *);
125 #endif /* ALTQ */
126 int pf_begin_rules(u_int32_t *, int, const char *);
127 int pf_rollback_rules(u_int32_t, int, char *);
128 int pf_commit_rules(u_int32_t, int, char *);
129
130 #ifdef __NetBSD__
131 const struct cdevsw pf_cdevsw = {
132 pfopen, pfclose, noread, nowrite, pfioctl,
133 nostop, notty, nopoll, nommap, nokqfilter,
134 };
135
136 static int pf_pfil_attach(void);
137 static int pf_pfil_detach(void);
138
139 static int pf_pfil_attached = 0;
140 #endif
141
142 #ifdef __OpenBSD__
143 extern struct timeout pf_expire_to;
144 #else
145 extern struct callout pf_expire_to;
146 #endif
147
148 struct pf_rule pf_default_rule;
149 #ifdef ALTQ
150 static int pf_altq_running;
151 #endif
152
153 #define TAGID_MAX 50000
154 TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
155 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
156
157 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
158 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
159 #endif
160 static u_int16_t tagname2tag(struct pf_tags *, char *);
161 static void tag2tagname(struct pf_tags *, u_int16_t, char *);
162 static void tag_unref(struct pf_tags *, u_int16_t);
163 int pf_rtlabel_add(struct pf_addr_wrap *);
164 void pf_rtlabel_remove(struct pf_addr_wrap *);
165 void pf_rtlabel_copyout(struct pf_addr_wrap *);
166
167 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
168
169 #ifdef __NetBSD__
170 extern struct pfil_head if_pfil;
171 #endif
172
173 void
174 pfattach(int num)
175 {
176 u_int32_t *timeout = pf_default_rule.timeout;
177
178 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl",
179 &pool_allocator_nointr);
180 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
181 "pfsrctrpl", NULL);
182 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl",
183 NULL);
184 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl",
185 &pool_allocator_nointr);
186 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0,
187 "pfpooladdrpl", &pool_allocator_nointr);
188 pfr_initialize();
189 pfi_initialize();
190 pf_osfp_initialize();
191
192 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
193 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
194
195 RB_INIT(&tree_src_tracking);
196 RB_INIT(&pf_anchors);
197 pf_init_ruleset(&pf_main_ruleset);
198 TAILQ_INIT(&pf_altqs[0]);
199 TAILQ_INIT(&pf_altqs[1]);
200 TAILQ_INIT(&pf_pabuf);
201 pf_altqs_active = &pf_altqs[0];
202 pf_altqs_inactive = &pf_altqs[1];
203 TAILQ_INIT(&state_updates);
204
205 /* default rule should never be garbage collected */
206 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
207 pf_default_rule.action = PF_PASS;
208 pf_default_rule.nr = -1;
209
210 /* initialize default timeouts */
211 timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
212 timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
213 timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
214 timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
215 timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
216 timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
217 timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
218 timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
219 timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
220 timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
221 timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
222 timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
223 timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
224 timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
225 timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
226 timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
227 timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
228 timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
229
230 #ifdef __OpenBSD__
231 timeout_set(&pf_expire_to, pf_purge_timeout, &pf_expire_to);
232 timeout_add(&pf_expire_to, timeout[PFTM_INTERVAL] * hz);
233 #else
234 callout_init(&pf_expire_to);
235 callout_reset(&pf_expire_to, timeout[PFTM_INTERVAL] * hz,
236 pf_purge_timeout, &pf_expire_to);
237 #endif
238
239 pf_normalize_init();
240 bzero(&pf_status, sizeof(pf_status));
241 pf_status.debug = PF_DEBUG_URGENT;
242
243 /* XXX do our best to avoid a conflict */
244 pf_status.hostid = arc4random();
245 }
246
247 #ifdef _LKM
248 void
249 pfdetach(void)
250 {
251 struct pf_anchor *anchor;
252 struct pf_state *state;
253 struct pf_src_node *node;
254 struct pfioc_table pt;
255 u_int32_t ticket;
256 int i;
257 char r = '\0';
258
259 (void)pf_pfil_detach();
260
261 callout_stop(&pf_expire_to);
262 pf_status.running = 0;
263
264 /* clear the rulesets */
265 for (i = 0; i < PF_RULESET_MAX; i++)
266 if (pf_begin_rules(&ticket, i, &r) == 0)
267 pf_commit_rules(ticket, i, &r);
268 #ifdef ALTQ
269 if (pf_begin_altq(&ticket) == 0)
270 pf_commit_altq(ticket);
271 #endif
272
273 /* clear states */
274 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
275 state->timeout = PFTM_PURGE;
276 #if NPFSYNC
277 state->sync_flags = PFSTATE_NOSYNC;
278 #endif
279 }
280 pf_purge_expired_states();
281 #if NPFSYNC
282 pfsync_clear_states(pf_status.hostid, NULL);
283 #endif
284
285 /* clear source nodes */
286 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
287 state->src_node = NULL;
288 state->nat_src_node = NULL;
289 }
290 RB_FOREACH(node, pf_src_tree, &tree_src_tracking) {
291 node->expire = 1;
292 node->states = 0;
293 }
294 pf_purge_expired_src_nodes();
295
296 /* clear tables */
297 memset(&pt, '\0', sizeof(pt));
298 pfr_clr_tables(&pt.pfrio_table, &pt.pfrio_ndel, pt.pfrio_flags);
299
300 /* destroy anchors */
301 while ((anchor = RB_MIN(pf_anchor_global, &pf_anchors)) != NULL) {
302 for (i = 0; i < PF_RULESET_MAX; i++)
303 if (pf_begin_rules(&ticket, i, anchor->name) == 0)
304 pf_commit_rules(ticket, i, anchor->name);
305 }
306
307 /* destroy main ruleset */
308 pf_remove_if_empty_ruleset(&pf_main_ruleset);
309
310 /* destroy the pools */
311 pool_destroy(&pf_pooladdr_pl);
312 pool_destroy(&pf_altq_pl);
313 pool_destroy(&pf_state_pl);
314 pool_destroy(&pf_rule_pl);
315 pool_destroy(&pf_src_tree_pl);
316
317 /* destroy subsystems */
318 pf_normalize_destroy();
319 pf_osfp_destroy();
320 pfr_destroy();
321 pfi_destroy();
322 }
323 #endif
324
325 int
326 pfopen(dev_t dev, int flags, int fmt, struct lwp *l)
327 {
328 if (minor(dev) >= 1)
329 return (ENXIO);
330 return (0);
331 }
332
333 int
334 pfclose(dev_t dev, int flags, int fmt, struct lwp *l)
335 {
336 if (minor(dev) >= 1)
337 return (ENXIO);
338 return (0);
339 }
340
341 struct pf_pool *
342 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
343 u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
344 u_int8_t check_ticket)
345 {
346 struct pf_ruleset *ruleset;
347 struct pf_rule *rule;
348 int rs_num;
349
350 ruleset = pf_find_ruleset(anchor);
351 if (ruleset == NULL)
352 return (NULL);
353 rs_num = pf_get_ruleset_number(rule_action);
354 if (rs_num >= PF_RULESET_MAX)
355 return (NULL);
356 if (active) {
357 if (check_ticket && ticket !=
358 ruleset->rules[rs_num].active.ticket)
359 return (NULL);
360 if (r_last)
361 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
362 pf_rulequeue);
363 else
364 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
365 } else {
366 if (check_ticket && ticket !=
367 ruleset->rules[rs_num].inactive.ticket)
368 return (NULL);
369 if (r_last)
370 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
371 pf_rulequeue);
372 else
373 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
374 }
375 if (!r_last) {
376 while ((rule != NULL) && (rule->nr != rule_number))
377 rule = TAILQ_NEXT(rule, entries);
378 }
379 if (rule == NULL)
380 return (NULL);
381
382 return (&rule->rpool);
383 }
384
385 int
386 pf_get_ruleset_number(u_int8_t action)
387 {
388 switch (action) {
389 case PF_SCRUB:
390 case PF_NOSCRUB:
391 return (PF_RULESET_SCRUB);
392 break;
393 case PF_PASS:
394 case PF_DROP:
395 return (PF_RULESET_FILTER);
396 break;
397 case PF_NAT:
398 case PF_NONAT:
399 return (PF_RULESET_NAT);
400 break;
401 case PF_BINAT:
402 case PF_NOBINAT:
403 return (PF_RULESET_BINAT);
404 break;
405 case PF_RDR:
406 case PF_NORDR:
407 return (PF_RULESET_RDR);
408 break;
409 default:
410 return (PF_RULESET_MAX);
411 break;
412 }
413 }
414
415 void
416 pf_init_ruleset(struct pf_ruleset *ruleset)
417 {
418 int i;
419
420 memset(ruleset, 0, sizeof(struct pf_ruleset));
421 for (i = 0; i < PF_RULESET_MAX; i++) {
422 TAILQ_INIT(&ruleset->rules[i].queues[0]);
423 TAILQ_INIT(&ruleset->rules[i].queues[1]);
424 ruleset->rules[i].active.ptr = &ruleset->rules[i].queues[0];
425 ruleset->rules[i].inactive.ptr = &ruleset->rules[i].queues[1];
426 }
427 }
428
429 struct pf_anchor *
430 pf_find_anchor(const char *path)
431 {
432 static struct pf_anchor key;
433
434 memset(&key, 0, sizeof(key));
435 strlcpy(key.path, path, sizeof(key.path));
436 return (RB_FIND(pf_anchor_global, &pf_anchors, &key));
437 }
438
439 struct pf_ruleset *
440 pf_find_ruleset(const char *path)
441 {
442 struct pf_anchor *anchor;
443
444 while (*path == '/')
445 path++;
446 if (!*path)
447 return (&pf_main_ruleset);
448 anchor = pf_find_anchor(path);
449 if (anchor == NULL)
450 return (NULL);
451 else
452 return (&anchor->ruleset);
453 }
454
455 struct pf_ruleset *
456 pf_find_or_create_ruleset(const char *path)
457 {
458 static char p[MAXPATHLEN];
459 char *q = NULL /* XXX gcc */, *r;
460 struct pf_ruleset *ruleset;
461 struct pf_anchor *anchor = NULL /* XXX gcc */,
462 *dup, *parent = NULL;
463
464 while (*path == '/')
465 path++;
466 ruleset = pf_find_ruleset(path);
467 if (ruleset != NULL)
468 return (ruleset);
469 strlcpy(p, path, sizeof(p));
470 while (parent == NULL && (q = strrchr(p, '/')) != NULL) {
471 *q = 0;
472 if ((ruleset = pf_find_ruleset(p)) != NULL) {
473 parent = ruleset->anchor;
474 break;
475 }
476 }
477 if (q == NULL)
478 q = p;
479 else
480 q++;
481 strlcpy(p, path, sizeof(p));
482 if (!*q)
483 return (NULL);
484 while ((r = strchr(q, '/')) != NULL || *q) {
485 if (r != NULL)
486 *r = 0;
487 if (!*q || strlen(q) >= PF_ANCHOR_NAME_SIZE ||
488 (parent != NULL && strlen(parent->path) >=
489 MAXPATHLEN - PF_ANCHOR_NAME_SIZE - 1))
490 return (NULL);
491 anchor = (struct pf_anchor *)malloc(sizeof(*anchor), M_TEMP,
492 M_NOWAIT);
493 if (anchor == NULL)
494 return (NULL);
495 memset(anchor, 0, sizeof(*anchor));
496 RB_INIT(&anchor->children);
497 strlcpy(anchor->name, q, sizeof(anchor->name));
498 if (parent != NULL) {
499 strlcpy(anchor->path, parent->path,
500 sizeof(anchor->path));
501 strlcat(anchor->path, "/", sizeof(anchor->path));
502 }
503 strlcat(anchor->path, anchor->name, sizeof(anchor->path));
504 if ((dup = RB_INSERT(pf_anchor_global, &pf_anchors, anchor)) !=
505 NULL) {
506 printf("pf_find_or_create_ruleset: RB_INSERT1 "
507 "'%s' '%s' collides with '%s' '%s'\n",
508 anchor->path, anchor->name, dup->path, dup->name);
509 free(anchor, M_TEMP);
510 return (NULL);
511 }
512 if (parent != NULL) {
513 anchor->parent = parent;
514 if ((dup = RB_INSERT(pf_anchor_node, &parent->children,
515 anchor)) != NULL) {
516 printf("pf_find_or_create_ruleset: "
517 "RB_INSERT2 '%s' '%s' collides with "
518 "'%s' '%s'\n", anchor->path, anchor->name,
519 dup->path, dup->name);
520 RB_REMOVE(pf_anchor_global, &pf_anchors,
521 anchor);
522 free(anchor, M_TEMP);
523 return (NULL);
524 }
525 }
526 pf_init_ruleset(&anchor->ruleset);
527 anchor->ruleset.anchor = anchor;
528 parent = anchor;
529 if (r != NULL)
530 q = r + 1;
531 else
532 *q = 0;
533 }
534 return (&anchor->ruleset);
535 }
536
537 void
538 pf_remove_if_empty_ruleset(struct pf_ruleset *ruleset)
539 {
540 struct pf_anchor *parent;
541 int i;
542
543 while (ruleset != NULL) {
544 if (ruleset == &pf_main_ruleset || ruleset->anchor == NULL ||
545 !RB_EMPTY(&ruleset->anchor->children) ||
546 ruleset->anchor->refcnt > 0 || ruleset->tables > 0 ||
547 ruleset->topen)
548 return;
549 for (i = 0; i < PF_RULESET_MAX; ++i)
550 if (!TAILQ_EMPTY(ruleset->rules[i].active.ptr) ||
551 !TAILQ_EMPTY(ruleset->rules[i].inactive.ptr) ||
552 ruleset->rules[i].inactive.open)
553 return;
554 RB_REMOVE(pf_anchor_global, &pf_anchors, ruleset->anchor);
555 if ((parent = ruleset->anchor->parent) != NULL)
556 RB_REMOVE(pf_anchor_node, &parent->children,
557 ruleset->anchor);
558 free(ruleset->anchor, M_TEMP);
559 if (parent == NULL)
560 return;
561 ruleset = &parent->ruleset;
562 }
563 }
564
565 int
566 pf_anchor_setup(struct pf_rule *r, const struct pf_ruleset *s,
567 const char *name)
568 {
569 static char *p, path[MAXPATHLEN];
570 struct pf_ruleset *ruleset;
571
572 r->anchor = NULL;
573 r->anchor_relative = 0;
574 r->anchor_wildcard = 0;
575 if (!name[0])
576 return (0);
577 if (name[0] == '/')
578 strlcpy(path, name + 1, sizeof(path));
579 else {
580 /* relative path */
581 r->anchor_relative = 1;
582 if (s->anchor == NULL || !s->anchor->path[0])
583 path[0] = 0;
584 else
585 strlcpy(path, s->anchor->path, sizeof(path));
586 while (name[0] == '.' && name[1] == '.' && name[2] == '/') {
587 if (!path[0]) {
588 printf("pf_anchor_setup: .. beyond root\n");
589 return (1);
590 }
591 if ((p = strrchr(path, '/')) != NULL)
592 *p = 0;
593 else
594 path[0] = 0;
595 r->anchor_relative++;
596 name += 3;
597 }
598 if (path[0])
599 strlcat(path, "/", sizeof(path));
600 strlcat(path, name, sizeof(path));
601 }
602 if ((p = strrchr(path, '/')) != NULL && !strcmp(p, "/*")) {
603 r->anchor_wildcard = 1;
604 *p = 0;
605 }
606 ruleset = pf_find_or_create_ruleset(path);
607 if (ruleset == NULL || ruleset->anchor == NULL) {
608 printf("pf_anchor_setup: ruleset\n");
609 return (1);
610 }
611 r->anchor = ruleset->anchor;
612 r->anchor->refcnt++;
613 return (0);
614 }
615
616 int
617 pf_anchor_copyout(const struct pf_ruleset *rs, const struct pf_rule *r,
618 struct pfioc_rule *pr)
619 {
620 pr->anchor_call[0] = 0;
621 if (r->anchor == NULL)
622 return (0);
623 if (!r->anchor_relative) {
624 strlcpy(pr->anchor_call, "/", sizeof(pr->anchor_call));
625 strlcat(pr->anchor_call, r->anchor->path,
626 sizeof(pr->anchor_call));
627 } else {
628 char a[MAXPATHLEN], b[MAXPATHLEN], *p;
629 int i;
630
631 if (rs->anchor == NULL)
632 a[0] = 0;
633 else
634 strlcpy(a, rs->anchor->path, sizeof(a));
635 strlcpy(b, r->anchor->path, sizeof(b));
636 for (i = 1; i < r->anchor_relative; ++i) {
637 if ((p = strrchr(a, '/')) == NULL)
638 p = a;
639 *p = 0;
640 strlcat(pr->anchor_call, "../",
641 sizeof(pr->anchor_call));
642 }
643 if (strncmp(a, b, strlen(a))) {
644 printf("pf_anchor_copyout: '%s' '%s'\n", a, b);
645 return (1);
646 }
647 if (strlen(b) > strlen(a))
648 strlcat(pr->anchor_call, b + (a[0] ? strlen(a) + 1 : 0),
649 sizeof(pr->anchor_call));
650 }
651 if (r->anchor_wildcard)
652 strlcat(pr->anchor_call, pr->anchor_call[0] ? "/*" : "*",
653 sizeof(pr->anchor_call));
654 return (0);
655 }
656
657 void
658 pf_anchor_remove(struct pf_rule *r)
659 {
660 if (r->anchor == NULL)
661 return;
662 if (r->anchor->refcnt <= 0) {
663 printf("pf_anchor_remove: broken refcount");
664 r->anchor = NULL;
665 return;
666 }
667 if (!--r->anchor->refcnt)
668 pf_remove_if_empty_ruleset(&r->anchor->ruleset);
669 r->anchor = NULL;
670 }
671
672 void
673 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
674 {
675 struct pf_pooladdr *mv_pool_pa;
676
677 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
678 TAILQ_REMOVE(poola, mv_pool_pa, entries);
679 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
680 }
681 }
682
683 void
684 pf_empty_pool(struct pf_palist *poola)
685 {
686 struct pf_pooladdr *empty_pool_pa;
687
688 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
689 pfi_dynaddr_remove(&empty_pool_pa->addr);
690 pf_tbladdr_remove(&empty_pool_pa->addr);
691 pfi_detach_rule(empty_pool_pa->kif);
692 TAILQ_REMOVE(poola, empty_pool_pa, entries);
693 pool_put(&pf_pooladdr_pl, empty_pool_pa);
694 }
695 }
696
697 void
698 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
699 {
700 if (rulequeue != NULL) {
701 if (rule->states <= 0) {
702 /*
703 * XXX - we need to remove the table *before* detaching
704 * the rule to make sure the table code does not delete
705 * the anchor under our feet.
706 */
707 pf_tbladdr_remove(&rule->src.addr);
708 pf_tbladdr_remove(&rule->dst.addr);
709 if (rule->overload_tbl)
710 pfr_detach_table(rule->overload_tbl);
711 }
712 TAILQ_REMOVE(rulequeue, rule, entries);
713 rule->entries.tqe_prev = NULL;
714 rule->nr = -1;
715 }
716
717 if (rule->states > 0 || rule->src_nodes > 0 ||
718 rule->entries.tqe_prev != NULL)
719 return;
720 pf_tag_unref(rule->tag);
721 pf_tag_unref(rule->match_tag);
722 #ifdef ALTQ
723 if (rule->pqid != rule->qid)
724 pf_qid_unref(rule->pqid);
725 pf_qid_unref(rule->qid);
726 #endif
727 pf_rtlabel_remove(&rule->src.addr);
728 pf_rtlabel_remove(&rule->dst.addr);
729 pfi_dynaddr_remove(&rule->src.addr);
730 pfi_dynaddr_remove(&rule->dst.addr);
731 if (rulequeue == NULL) {
732 pf_tbladdr_remove(&rule->src.addr);
733 pf_tbladdr_remove(&rule->dst.addr);
734 if (rule->overload_tbl)
735 pfr_detach_table(rule->overload_tbl);
736 }
737 pfi_detach_rule(rule->kif);
738 pf_anchor_remove(rule);
739 pf_empty_pool(&rule->rpool.list);
740 pool_put(&pf_rule_pl, rule);
741 }
742
743 static u_int16_t
744 tagname2tag(struct pf_tags *head, char *tagname)
745 {
746 struct pf_tagname *tag, *p = NULL;
747 u_int16_t new_tagid = 1;
748
749 TAILQ_FOREACH(tag, head, entries)
750 if (strcmp(tagname, tag->name) == 0) {
751 tag->ref++;
752 return (tag->tag);
753 }
754
755 /*
756 * to avoid fragmentation, we do a linear search from the beginning
757 * and take the first free slot we find. if there is none or the list
758 * is empty, append a new entry at the end.
759 */
760
761 /* new entry */
762 if (!TAILQ_EMPTY(head))
763 for (p = TAILQ_FIRST(head); p != NULL &&
764 p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
765 new_tagid = p->tag + 1;
766
767 if (new_tagid > TAGID_MAX)
768 return (0);
769
770 /* allocate and fill new struct pf_tagname */
771 tag = (struct pf_tagname *)malloc(sizeof(struct pf_tagname),
772 M_TEMP, M_NOWAIT);
773 if (tag == NULL)
774 return (0);
775 bzero(tag, sizeof(struct pf_tagname));
776 strlcpy(tag->name, tagname, sizeof(tag->name));
777 tag->tag = new_tagid;
778 tag->ref++;
779
780 if (p != NULL) /* insert new entry before p */
781 TAILQ_INSERT_BEFORE(p, tag, entries);
782 else /* either list empty or no free slot in between */
783 TAILQ_INSERT_TAIL(head, tag, entries);
784
785 return (tag->tag);
786 }
787
788 static void
789 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
790 {
791 struct pf_tagname *tag;
792
793 TAILQ_FOREACH(tag, head, entries)
794 if (tag->tag == tagid) {
795 strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
796 return;
797 }
798 }
799
800 static void
801 tag_unref(struct pf_tags *head, u_int16_t tag)
802 {
803 struct pf_tagname *p, *next;
804
805 if (tag == 0)
806 return;
807
808 for (p = TAILQ_FIRST(head); p != NULL; p = next) {
809 next = TAILQ_NEXT(p, entries);
810 if (tag == p->tag) {
811 if (--p->ref == 0) {
812 TAILQ_REMOVE(head, p, entries);
813 free(p, M_TEMP);
814 }
815 break;
816 }
817 }
818 }
819
820 u_int16_t
821 pf_tagname2tag(char *tagname)
822 {
823 return (tagname2tag(&pf_tags, tagname));
824 }
825
826 void
827 pf_tag2tagname(u_int16_t tagid, char *p)
828 {
829 return (tag2tagname(&pf_tags, tagid, p));
830 }
831
832 void
833 pf_tag_ref(u_int16_t tag)
834 {
835 struct pf_tagname *t;
836
837 TAILQ_FOREACH(t, &pf_tags, entries)
838 if (t->tag == tag)
839 break;
840 if (t != NULL)
841 t->ref++;
842 }
843
844 void
845 pf_tag_unref(u_int16_t tag)
846 {
847 return (tag_unref(&pf_tags, tag));
848 }
849
850 int
851 pf_rtlabel_add(struct pf_addr_wrap *a)
852 {
853 #ifdef __OpenBSD__
854 if (a->type == PF_ADDR_RTLABEL &&
855 (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0)
856 return (-1);
857 #endif
858 return (0);
859 }
860
861 void
862 pf_rtlabel_remove(struct pf_addr_wrap *a)
863 {
864 #ifdef __OpenBSD__
865 if (a->type == PF_ADDR_RTLABEL)
866 rtlabel_unref(a->v.rtlabel);
867 #endif
868 }
869
870 void
871 pf_rtlabel_copyout(struct pf_addr_wrap *a)
872 {
873 #ifdef __OpenBSD__
874 const char *name;
875
876 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) {
877 if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL)
878 strlcpy(a->v.rtlabelname, "?",
879 sizeof(a->v.rtlabelname));
880 else
881 strlcpy(a->v.rtlabelname, name,
882 sizeof(a->v.rtlabelname));
883 }
884 #endif
885 }
886
887 #ifdef ALTQ
888 u_int32_t
889 pf_qname2qid(char *qname)
890 {
891 return ((u_int32_t)tagname2tag(&pf_qids, qname));
892 }
893
894 void
895 pf_qid2qname(u_int32_t qid, char *p)
896 {
897 return (tag2tagname(&pf_qids, (u_int16_t)qid, p));
898 }
899
900 void
901 pf_qid_unref(u_int32_t qid)
902 {
903 return (tag_unref(&pf_qids, (u_int16_t)qid));
904 }
905
906 int
907 pf_begin_altq(u_int32_t *ticket)
908 {
909 struct pf_altq *altq;
910 int error = 0;
911
912 /* Purge the old altq list */
913 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
914 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
915 if (altq->qname[0] == 0) {
916 /* detach and destroy the discipline */
917 error = altq_remove(altq);
918 } else
919 pf_qid_unref(altq->qid);
920 pool_put(&pf_altq_pl, altq);
921 }
922 if (error)
923 return (error);
924 *ticket = ++ticket_altqs_inactive;
925 altqs_inactive_open = 1;
926 return (0);
927 }
928
929 int
930 pf_rollback_altq(u_int32_t ticket)
931 {
932 struct pf_altq *altq;
933 int error = 0;
934
935 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
936 return (0);
937 /* Purge the old altq list */
938 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
939 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
940 if (altq->qname[0] == 0) {
941 /* detach and destroy the discipline */
942 error = altq_remove(altq);
943 } else
944 pf_qid_unref(altq->qid);
945 pool_put(&pf_altq_pl, altq);
946 }
947 altqs_inactive_open = 0;
948 return (error);
949 }
950
951 int
952 pf_commit_altq(u_int32_t ticket)
953 {
954 struct pf_altqqueue *old_altqs;
955 struct pf_altq *altq;
956 int s, err, error = 0;
957
958 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
959 return (EBUSY);
960
961 /* swap altqs, keep the old. */
962 s = splsoftnet();
963 old_altqs = pf_altqs_active;
964 pf_altqs_active = pf_altqs_inactive;
965 pf_altqs_inactive = old_altqs;
966 ticket_altqs_active = ticket_altqs_inactive;
967
968 /* Attach new disciplines */
969 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
970 if (altq->qname[0] == 0) {
971 /* attach the discipline */
972 error = altq_pfattach(altq);
973 if (error == 0 && pf_altq_running)
974 error = pf_enable_altq(altq);
975 if (error != 0) {
976 splx(s);
977 return (error);
978 }
979 }
980 }
981
982 /* Purge the old altq list */
983 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
984 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
985 if (altq->qname[0] == 0) {
986 /* detach and destroy the discipline */
987 if (pf_altq_running)
988 error = pf_disable_altq(altq);
989 err = altq_pfdetach(altq);
990 if (err != 0 && error == 0)
991 error = err;
992 err = altq_remove(altq);
993 if (err != 0 && error == 0)
994 error = err;
995 } else
996 pf_qid_unref(altq->qid);
997 pool_put(&pf_altq_pl, altq);
998 }
999 splx(s);
1000
1001 altqs_inactive_open = 0;
1002 return (error);
1003 }
1004
1005 int
1006 pf_enable_altq(struct pf_altq *altq)
1007 {
1008 struct ifnet *ifp;
1009 struct tb_profile tb;
1010 int s, error = 0;
1011
1012 if ((ifp = ifunit(altq->ifname)) == NULL)
1013 return (EINVAL);
1014
1015 if (ifp->if_snd.altq_type != ALTQT_NONE)
1016 error = altq_enable(&ifp->if_snd);
1017
1018 /* set tokenbucket regulator */
1019 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
1020 tb.rate = altq->ifbandwidth;
1021 tb.depth = altq->tbrsize;
1022 #ifdef __NetBSD__
1023 s = splnet();
1024 #else
1025 s = splimp();
1026 #endif
1027 error = tbr_set(&ifp->if_snd, &tb);
1028 splx(s);
1029 }
1030
1031 return (error);
1032 }
1033
1034 int
1035 pf_disable_altq(struct pf_altq *altq)
1036 {
1037 struct ifnet *ifp;
1038 struct tb_profile tb;
1039 int s, error;
1040
1041 if ((ifp = ifunit(altq->ifname)) == NULL)
1042 return (EINVAL);
1043
1044 /*
1045 * when the discipline is no longer referenced, it was overridden
1046 * by a new one. if so, just return.
1047 */
1048 if (altq->altq_disc != ifp->if_snd.altq_disc)
1049 return (0);
1050
1051 error = altq_disable(&ifp->if_snd);
1052
1053 if (error == 0) {
1054 /* clear tokenbucket regulator */
1055 tb.rate = 0;
1056 #ifdef __NetBSD__
1057 s = splnet();
1058 #else
1059 s = splimp();
1060 #endif
1061 error = tbr_set(&ifp->if_snd, &tb);
1062 splx(s);
1063 }
1064
1065 return (error);
1066 }
1067 #endif /* ALTQ */
1068
1069 int
1070 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
1071 {
1072 struct pf_ruleset *rs;
1073 struct pf_rule *rule;
1074
1075 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1076 return (EINVAL);
1077 rs = pf_find_or_create_ruleset(anchor);
1078 if (rs == NULL)
1079 return (EINVAL);
1080 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL)
1081 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
1082 *ticket = ++rs->rules[rs_num].inactive.ticket;
1083 rs->rules[rs_num].inactive.open = 1;
1084 return (0);
1085 }
1086
1087 int
1088 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
1089 {
1090 struct pf_ruleset *rs;
1091 struct pf_rule *rule;
1092
1093 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1094 return (EINVAL);
1095 rs = pf_find_ruleset(anchor);
1096 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1097 rs->rules[rs_num].inactive.ticket != ticket)
1098 return (0);
1099 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL)
1100 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
1101 rs->rules[rs_num].inactive.open = 0;
1102 return (0);
1103 }
1104
1105 int
1106 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1107 {
1108 struct pf_ruleset *rs;
1109 struct pf_rule *rule;
1110 struct pf_rulequeue *old_rules;
1111 int s;
1112
1113 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1114 return (EINVAL);
1115 rs = pf_find_ruleset(anchor);
1116 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1117 ticket != rs->rules[rs_num].inactive.ticket)
1118 return (EBUSY);
1119
1120 /* Swap rules, keep the old. */
1121 s = splsoftnet();
1122 old_rules = rs->rules[rs_num].active.ptr;
1123 rs->rules[rs_num].active.ptr =
1124 rs->rules[rs_num].inactive.ptr;
1125 rs->rules[rs_num].inactive.ptr = old_rules;
1126 rs->rules[rs_num].active.ticket =
1127 rs->rules[rs_num].inactive.ticket;
1128 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1129
1130 /* Purge the old rule list. */
1131 while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1132 pf_rm_rule(old_rules, rule);
1133 rs->rules[rs_num].inactive.open = 0;
1134 pf_remove_if_empty_ruleset(rs);
1135 splx(s);
1136 return (0);
1137 }
1138
1139 int
1140 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct lwp *l)
1141 {
1142 struct pf_pooladdr *pa = NULL;
1143 struct pf_pool *pool = NULL;
1144 int s;
1145 int error = 0;
1146
1147 /* XXX keep in sync with switch() below */
1148 if (securelevel > 1)
1149 switch (cmd) {
1150 case DIOCGETRULES:
1151 case DIOCGETRULE:
1152 case DIOCGETADDRS:
1153 case DIOCGETADDR:
1154 case DIOCGETSTATE:
1155 case DIOCSETSTATUSIF:
1156 case DIOCGETSTATUS:
1157 case DIOCCLRSTATUS:
1158 case DIOCNATLOOK:
1159 case DIOCSETDEBUG:
1160 case DIOCGETSTATES:
1161 case DIOCGETTIMEOUT:
1162 case DIOCCLRRULECTRS:
1163 case DIOCGETLIMIT:
1164 case DIOCGETALTQS:
1165 case DIOCGETALTQ:
1166 case DIOCGETQSTATS:
1167 case DIOCGETRULESETS:
1168 case DIOCGETRULESET:
1169 case DIOCRGETTABLES:
1170 case DIOCRGETTSTATS:
1171 case DIOCRCLRTSTATS:
1172 case DIOCRCLRADDRS:
1173 case DIOCRADDADDRS:
1174 case DIOCRDELADDRS:
1175 case DIOCRSETADDRS:
1176 case DIOCRGETADDRS:
1177 case DIOCRGETASTATS:
1178 case DIOCRCLRASTATS:
1179 case DIOCRTSTADDRS:
1180 case DIOCOSFPGET:
1181 case DIOCGETSRCNODES:
1182 case DIOCCLRSRCNODES:
1183 case DIOCIGETIFACES:
1184 case DIOCICLRISTATS:
1185 case DIOCSETIFFLAG:
1186 case DIOCCLRIFFLAG:
1187 break;
1188 case DIOCRCLRTABLES:
1189 case DIOCRADDTABLES:
1190 case DIOCRDELTABLES:
1191 case DIOCRSETTFLAGS:
1192 if (((struct pfioc_table *)addr)->pfrio_flags &
1193 PFR_FLAG_DUMMY)
1194 break; /* dummy operation ok */
1195 return (EPERM);
1196 default:
1197 return (EPERM);
1198 }
1199
1200 if (!(flags & FWRITE))
1201 switch (cmd) {
1202 case DIOCGETRULES:
1203 case DIOCGETRULE:
1204 case DIOCGETADDRS:
1205 case DIOCGETADDR:
1206 case DIOCGETSTATE:
1207 case DIOCGETSTATUS:
1208 case DIOCGETSTATES:
1209 case DIOCGETTIMEOUT:
1210 case DIOCGETLIMIT:
1211 case DIOCGETALTQS:
1212 case DIOCGETALTQ:
1213 case DIOCGETQSTATS:
1214 case DIOCGETRULESETS:
1215 case DIOCGETRULESET:
1216 case DIOCRGETTABLES:
1217 case DIOCRGETTSTATS:
1218 case DIOCRGETADDRS:
1219 case DIOCRGETASTATS:
1220 case DIOCRTSTADDRS:
1221 case DIOCOSFPGET:
1222 case DIOCGETSRCNODES:
1223 case DIOCIGETIFACES:
1224 break;
1225 case DIOCRCLRTABLES:
1226 case DIOCRADDTABLES:
1227 case DIOCRDELTABLES:
1228 case DIOCRCLRTSTATS:
1229 case DIOCRCLRADDRS:
1230 case DIOCRADDADDRS:
1231 case DIOCRDELADDRS:
1232 case DIOCRSETADDRS:
1233 case DIOCRSETTFLAGS:
1234 if (((struct pfioc_table *)addr)->pfrio_flags &
1235 PFR_FLAG_DUMMY)
1236 break; /* dummy operation ok */
1237 return (EACCES);
1238 default:
1239 return (EACCES);
1240 }
1241
1242 s = splsoftnet();
1243 switch (cmd) {
1244
1245 case DIOCSTART:
1246 if (pf_status.running)
1247 error = EEXIST;
1248 else {
1249 #ifdef __NetBSD__
1250 error = pf_pfil_attach();
1251 if (error)
1252 break;
1253 #endif
1254 pf_status.running = 1;
1255 pf_status.since = time_second;
1256 if (pf_status.stateid == 0) {
1257 pf_status.stateid = time_second;
1258 pf_status.stateid = pf_status.stateid << 32;
1259 }
1260 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1261 }
1262 break;
1263
1264 case DIOCSTOP:
1265 if (!pf_status.running)
1266 error = ENOENT;
1267 else {
1268 #ifdef __NetBSD__
1269 error = pf_pfil_detach();
1270 if (error)
1271 break;
1272 #endif
1273 pf_status.running = 0;
1274 pf_status.since = time_second;
1275 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1276 }
1277 break;
1278
1279 case DIOCADDRULE: {
1280 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1281 struct pf_ruleset *ruleset;
1282 struct pf_rule *rule, *tail;
1283 struct pf_pooladdr *pa;
1284 int rs_num;
1285
1286 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1287 ruleset = pf_find_ruleset(pr->anchor);
1288 if (ruleset == NULL) {
1289 error = EINVAL;
1290 break;
1291 }
1292 rs_num = pf_get_ruleset_number(pr->rule.action);
1293 if (rs_num >= PF_RULESET_MAX) {
1294 error = EINVAL;
1295 break;
1296 }
1297 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1298 error = EINVAL;
1299 break;
1300 }
1301 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
1302 error = EBUSY;
1303 break;
1304 }
1305 if (pr->pool_ticket != ticket_pabuf) {
1306 error = EBUSY;
1307 break;
1308 }
1309 rule = pool_get(&pf_rule_pl, PR_NOWAIT);
1310 if (rule == NULL) {
1311 error = ENOMEM;
1312 break;
1313 }
1314 bcopy(&pr->rule, rule, sizeof(struct pf_rule));
1315 rule->anchor = NULL;
1316 rule->kif = NULL;
1317 TAILQ_INIT(&rule->rpool.list);
1318 /* initialize refcounting */
1319 rule->states = 0;
1320 rule->src_nodes = 0;
1321 rule->entries.tqe_prev = NULL;
1322 #ifndef INET
1323 if (rule->af == AF_INET) {
1324 pool_put(&pf_rule_pl, rule);
1325 error = EAFNOSUPPORT;
1326 break;
1327 }
1328 #endif /* INET */
1329 #ifndef INET6
1330 if (rule->af == AF_INET6) {
1331 pool_put(&pf_rule_pl, rule);
1332 error = EAFNOSUPPORT;
1333 break;
1334 }
1335 #endif /* INET6 */
1336 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
1337 pf_rulequeue);
1338 if (tail)
1339 rule->nr = tail->nr + 1;
1340 else
1341 rule->nr = 0;
1342 if (rule->ifname[0]) {
1343 rule->kif = pfi_attach_rule(rule->ifname);
1344 if (rule->kif == NULL) {
1345 pool_put(&pf_rule_pl, rule);
1346 error = EINVAL;
1347 break;
1348 }
1349 }
1350
1351 #ifdef ALTQ
1352 /* set queue IDs */
1353 if (rule->qname[0] != 0) {
1354 if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
1355 error = EBUSY;
1356 else if (rule->pqname[0] != 0) {
1357 if ((rule->pqid =
1358 pf_qname2qid(rule->pqname)) == 0)
1359 error = EBUSY;
1360 } else
1361 rule->pqid = rule->qid;
1362 }
1363 #endif
1364 if (rule->tagname[0])
1365 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
1366 error = EBUSY;
1367 if (rule->match_tagname[0])
1368 if ((rule->match_tag =
1369 pf_tagname2tag(rule->match_tagname)) == 0)
1370 error = EBUSY;
1371 if (rule->rt && !rule->direction)
1372 error = EINVAL;
1373 if (pf_rtlabel_add(&rule->src.addr) ||
1374 pf_rtlabel_add(&rule->dst.addr))
1375 error = EBUSY;
1376 if (pfi_dynaddr_setup(&rule->src.addr, rule->af))
1377 error = EINVAL;
1378 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af))
1379 error = EINVAL;
1380 if (pf_tbladdr_setup(ruleset, &rule->src.addr))
1381 error = EINVAL;
1382 if (pf_tbladdr_setup(ruleset, &rule->dst.addr))
1383 error = EINVAL;
1384 if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1385 error = EINVAL;
1386 TAILQ_FOREACH(pa, &pf_pabuf, entries)
1387 if (pf_tbladdr_setup(ruleset, &pa->addr))
1388 error = EINVAL;
1389
1390 if (rule->overload_tblname[0]) {
1391 if ((rule->overload_tbl = pfr_attach_table(ruleset,
1392 rule->overload_tblname)) == NULL)
1393 error = EINVAL;
1394 else
1395 rule->overload_tbl->pfrkt_flags |=
1396 PFR_TFLAG_ACTIVE;
1397 }
1398
1399 pf_mv_pool(&pf_pabuf, &rule->rpool.list);
1400 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
1401 (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
1402 (rule->rt > PF_FASTROUTE)) &&
1403 (TAILQ_FIRST(&rule->rpool.list) == NULL))
1404 error = EINVAL;
1405
1406 if (error) {
1407 pf_rm_rule(NULL, rule);
1408 break;
1409 }
1410 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
1411 rule->evaluations = rule->packets = rule->bytes = 0;
1412 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
1413 rule, entries);
1414 break;
1415 }
1416
1417 case DIOCGETRULES: {
1418 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1419 struct pf_ruleset *ruleset;
1420 struct pf_rule *tail;
1421 int rs_num;
1422
1423 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1424 ruleset = pf_find_ruleset(pr->anchor);
1425 if (ruleset == NULL) {
1426 error = EINVAL;
1427 break;
1428 }
1429 rs_num = pf_get_ruleset_number(pr->rule.action);
1430 if (rs_num >= PF_RULESET_MAX) {
1431 error = EINVAL;
1432 break;
1433 }
1434 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
1435 pf_rulequeue);
1436 if (tail)
1437 pr->nr = tail->nr + 1;
1438 else
1439 pr->nr = 0;
1440 pr->ticket = ruleset->rules[rs_num].active.ticket;
1441 break;
1442 }
1443
1444 case DIOCGETRULE: {
1445 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1446 struct pf_ruleset *ruleset;
1447 struct pf_rule *rule;
1448 int rs_num, i;
1449
1450 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1451 ruleset = pf_find_ruleset(pr->anchor);
1452 if (ruleset == NULL) {
1453 error = EINVAL;
1454 break;
1455 }
1456 rs_num = pf_get_ruleset_number(pr->rule.action);
1457 if (rs_num >= PF_RULESET_MAX) {
1458 error = EINVAL;
1459 break;
1460 }
1461 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
1462 error = EBUSY;
1463 break;
1464 }
1465 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
1466 while ((rule != NULL) && (rule->nr != pr->nr))
1467 rule = TAILQ_NEXT(rule, entries);
1468 if (rule == NULL) {
1469 error = EBUSY;
1470 break;
1471 }
1472 bcopy(rule, &pr->rule, sizeof(struct pf_rule));
1473 if (pf_anchor_copyout(ruleset, rule, pr)) {
1474 error = EBUSY;
1475 break;
1476 }
1477 pfi_dynaddr_copyout(&pr->rule.src.addr);
1478 pfi_dynaddr_copyout(&pr->rule.dst.addr);
1479 pf_tbladdr_copyout(&pr->rule.src.addr);
1480 pf_tbladdr_copyout(&pr->rule.dst.addr);
1481 pf_rtlabel_copyout(&pr->rule.src.addr);
1482 pf_rtlabel_copyout(&pr->rule.dst.addr);
1483 for (i = 0; i < PF_SKIP_COUNT; ++i)
1484 if (rule->skip[i].ptr == NULL)
1485 pr->rule.skip[i].nr = -1;
1486 else
1487 pr->rule.skip[i].nr =
1488 rule->skip[i].ptr->nr;
1489 break;
1490 }
1491
1492 case DIOCCHANGERULE: {
1493 struct pfioc_rule *pcr = (struct pfioc_rule *)addr;
1494 struct pf_ruleset *ruleset;
1495 struct pf_rule *oldrule = NULL, *newrule = NULL;
1496 u_int32_t nr = 0;
1497 int rs_num;
1498
1499 if (!(pcr->action == PF_CHANGE_REMOVE ||
1500 pcr->action == PF_CHANGE_GET_TICKET) &&
1501 pcr->pool_ticket != ticket_pabuf) {
1502 error = EBUSY;
1503 break;
1504 }
1505
1506 if (pcr->action < PF_CHANGE_ADD_HEAD ||
1507 pcr->action > PF_CHANGE_GET_TICKET) {
1508 error = EINVAL;
1509 break;
1510 }
1511 ruleset = pf_find_ruleset(pcr->anchor);
1512 if (ruleset == NULL) {
1513 error = EINVAL;
1514 break;
1515 }
1516 rs_num = pf_get_ruleset_number(pcr->rule.action);
1517 if (rs_num >= PF_RULESET_MAX) {
1518 error = EINVAL;
1519 break;
1520 }
1521
1522 if (pcr->action == PF_CHANGE_GET_TICKET) {
1523 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
1524 break;
1525 } else {
1526 if (pcr->ticket !=
1527 ruleset->rules[rs_num].active.ticket) {
1528 error = EINVAL;
1529 break;
1530 }
1531 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1532 error = EINVAL;
1533 break;
1534 }
1535 }
1536
1537 if (pcr->action != PF_CHANGE_REMOVE) {
1538 newrule = pool_get(&pf_rule_pl, PR_NOWAIT);
1539 if (newrule == NULL) {
1540 error = ENOMEM;
1541 break;
1542 }
1543 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule));
1544 TAILQ_INIT(&newrule->rpool.list);
1545 /* initialize refcounting */
1546 newrule->states = 0;
1547 newrule->entries.tqe_prev = NULL;
1548 #ifndef INET
1549 if (newrule->af == AF_INET) {
1550 pool_put(&pf_rule_pl, newrule);
1551 error = EAFNOSUPPORT;
1552 break;
1553 }
1554 #endif /* INET */
1555 #ifndef INET6
1556 if (newrule->af == AF_INET6) {
1557 pool_put(&pf_rule_pl, newrule);
1558 error = EAFNOSUPPORT;
1559 break;
1560 }
1561 #endif /* INET6 */
1562 if (newrule->ifname[0]) {
1563 newrule->kif = pfi_attach_rule(newrule->ifname);
1564 if (newrule->kif == NULL) {
1565 pool_put(&pf_rule_pl, newrule);
1566 error = EINVAL;
1567 break;
1568 }
1569 } else
1570 newrule->kif = NULL;
1571
1572 #ifdef ALTQ
1573 /* set queue IDs */
1574 if (newrule->qname[0] != 0) {
1575 if ((newrule->qid =
1576 pf_qname2qid(newrule->qname)) == 0)
1577 error = EBUSY;
1578 else if (newrule->pqname[0] != 0) {
1579 if ((newrule->pqid =
1580 pf_qname2qid(newrule->pqname)) == 0)
1581 error = EBUSY;
1582 } else
1583 newrule->pqid = newrule->qid;
1584 }
1585 #endif /* ALTQ */
1586 if (newrule->tagname[0])
1587 if ((newrule->tag =
1588 pf_tagname2tag(newrule->tagname)) == 0)
1589 error = EBUSY;
1590 if (newrule->match_tagname[0])
1591 if ((newrule->match_tag = pf_tagname2tag(
1592 newrule->match_tagname)) == 0)
1593 error = EBUSY;
1594 if (newrule->rt && !newrule->direction)
1595 error = EINVAL;
1596 if (pf_rtlabel_add(&newrule->src.addr) ||
1597 pf_rtlabel_add(&newrule->dst.addr))
1598 error = EBUSY;
1599 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af))
1600 error = EINVAL;
1601 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af))
1602 error = EINVAL;
1603 if (pf_tbladdr_setup(ruleset, &newrule->src.addr))
1604 error = EINVAL;
1605 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr))
1606 error = EINVAL;
1607 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1608 error = EINVAL;
1609
1610 if (newrule->overload_tblname[0]) {
1611 if ((newrule->overload_tbl = pfr_attach_table(
1612 ruleset, newrule->overload_tblname)) ==
1613 NULL)
1614 error = EINVAL;
1615 else
1616 newrule->overload_tbl->pfrkt_flags |=
1617 PFR_TFLAG_ACTIVE;
1618 }
1619
1620 pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
1621 if (((((newrule->action == PF_NAT) ||
1622 (newrule->action == PF_RDR) ||
1623 (newrule->action == PF_BINAT) ||
1624 (newrule->rt > PF_FASTROUTE)) &&
1625 !pcr->anchor[0])) &&
1626 (TAILQ_FIRST(&newrule->rpool.list) == NULL))
1627 error = EINVAL;
1628
1629 if (error) {
1630 pf_rm_rule(NULL, newrule);
1631 break;
1632 }
1633 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
1634 newrule->evaluations = newrule->packets = 0;
1635 newrule->bytes = 0;
1636 }
1637 pf_empty_pool(&pf_pabuf);
1638
1639 if (pcr->action == PF_CHANGE_ADD_HEAD)
1640 oldrule = TAILQ_FIRST(
1641 ruleset->rules[rs_num].active.ptr);
1642 else if (pcr->action == PF_CHANGE_ADD_TAIL)
1643 oldrule = TAILQ_LAST(
1644 ruleset->rules[rs_num].active.ptr, pf_rulequeue);
1645 else {
1646 oldrule = TAILQ_FIRST(
1647 ruleset->rules[rs_num].active.ptr);
1648 while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1649 oldrule = TAILQ_NEXT(oldrule, entries);
1650 if (oldrule == NULL) {
1651 if (newrule != NULL)
1652 pf_rm_rule(NULL, newrule);
1653 error = EINVAL;
1654 break;
1655 }
1656 }
1657
1658 if (pcr->action == PF_CHANGE_REMOVE)
1659 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
1660 else {
1661 if (oldrule == NULL)
1662 TAILQ_INSERT_TAIL(
1663 ruleset->rules[rs_num].active.ptr,
1664 newrule, entries);
1665 else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1666 pcr->action == PF_CHANGE_ADD_BEFORE)
1667 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1668 else
1669 TAILQ_INSERT_AFTER(
1670 ruleset->rules[rs_num].active.ptr,
1671 oldrule, newrule, entries);
1672 }
1673
1674 nr = 0;
1675 TAILQ_FOREACH(oldrule,
1676 ruleset->rules[rs_num].active.ptr, entries)
1677 oldrule->nr = nr++;
1678
1679 ruleset->rules[rs_num].active.ticket++;
1680
1681 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
1682 pf_remove_if_empty_ruleset(ruleset);
1683
1684 break;
1685 }
1686
1687 case DIOCCLRSTATES: {
1688 struct pf_state *state;
1689 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1690 int killed = 0;
1691
1692 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1693 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1694 state->u.s.kif->pfik_name)) {
1695 state->timeout = PFTM_PURGE;
1696 #if NPFSYNC
1697 /* don't send out individual delete messages */
1698 state->sync_flags = PFSTATE_NOSYNC;
1699 #endif
1700 killed++;
1701 }
1702 }
1703 pf_purge_expired_states();
1704 pf_status.states = 0;
1705 psk->psk_af = killed;
1706 #if NPFSYNC
1707 pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
1708 #endif
1709 break;
1710 }
1711
1712 case DIOCKILLSTATES: {
1713 struct pf_state *state;
1714 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1715 int killed = 0;
1716
1717 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1718 if ((!psk->psk_af || state->af == psk->psk_af)
1719 && (!psk->psk_proto || psk->psk_proto ==
1720 state->proto) &&
1721 PF_MATCHA(psk->psk_src.neg,
1722 &psk->psk_src.addr.v.a.addr,
1723 &psk->psk_src.addr.v.a.mask,
1724 &state->lan.addr, state->af) &&
1725 PF_MATCHA(psk->psk_dst.neg,
1726 &psk->psk_dst.addr.v.a.addr,
1727 &psk->psk_dst.addr.v.a.mask,
1728 &state->ext.addr, state->af) &&
1729 (psk->psk_src.port_op == 0 ||
1730 pf_match_port(psk->psk_src.port_op,
1731 psk->psk_src.port[0], psk->psk_src.port[1],
1732 state->lan.port)) &&
1733 (psk->psk_dst.port_op == 0 ||
1734 pf_match_port(psk->psk_dst.port_op,
1735 psk->psk_dst.port[0], psk->psk_dst.port[1],
1736 state->ext.port)) &&
1737 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1738 state->u.s.kif->pfik_name))) {
1739 state->timeout = PFTM_PURGE;
1740 killed++;
1741 }
1742 }
1743 pf_purge_expired_states();
1744 psk->psk_af = killed;
1745 break;
1746 }
1747
1748 case DIOCADDSTATE: {
1749 struct pfioc_state *ps = (struct pfioc_state *)addr;
1750 struct pf_state *state;
1751 struct pfi_kif *kif;
1752
1753 if (ps->state.timeout >= PFTM_MAX &&
1754 ps->state.timeout != PFTM_UNTIL_PACKET) {
1755 error = EINVAL;
1756 break;
1757 }
1758 state = pool_get(&pf_state_pl, PR_NOWAIT);
1759 if (state == NULL) {
1760 error = ENOMEM;
1761 break;
1762 }
1763 kif = pfi_lookup_create(ps->state.u.ifname);
1764 if (kif == NULL) {
1765 pool_put(&pf_state_pl, state);
1766 error = ENOENT;
1767 break;
1768 }
1769 bcopy(&ps->state, state, sizeof(struct pf_state));
1770 bzero(&state->u, sizeof(state->u));
1771 state->rule.ptr = &pf_default_rule;
1772 state->nat_rule.ptr = NULL;
1773 state->anchor.ptr = NULL;
1774 state->rt_kif = NULL;
1775 state->creation = time_second;
1776 state->pfsync_time = 0;
1777 state->packets[0] = state->packets[1] = 0;
1778 state->bytes[0] = state->bytes[1] = 0;
1779
1780 if (pf_insert_state(kif, state)) {
1781 pfi_maybe_destroy(kif);
1782 pool_put(&pf_state_pl, state);
1783 error = ENOMEM;
1784 }
1785 break;
1786 }
1787
1788 case DIOCGETSTATE: {
1789 struct pfioc_state *ps = (struct pfioc_state *)addr;
1790 struct pf_state *state;
1791 u_int32_t nr;
1792
1793 nr = 0;
1794 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1795 if (nr >= ps->nr)
1796 break;
1797 nr++;
1798 }
1799 if (state == NULL) {
1800 error = EBUSY;
1801 break;
1802 }
1803 bcopy(state, &ps->state, sizeof(struct pf_state));
1804 ps->state.rule.nr = state->rule.ptr->nr;
1805 ps->state.nat_rule.nr = (state->nat_rule.ptr == NULL) ?
1806 -1 : state->nat_rule.ptr->nr;
1807 ps->state.anchor.nr = (state->anchor.ptr == NULL) ?
1808 -1 : state->anchor.ptr->nr;
1809 ps->state.expire = pf_state_expires(state);
1810 if (ps->state.expire > time_second)
1811 ps->state.expire -= time_second;
1812 else
1813 ps->state.expire = 0;
1814 break;
1815 }
1816
1817 case DIOCGETSTATES: {
1818 struct pfioc_states *ps = (struct pfioc_states *)addr;
1819 struct pf_state *state;
1820 struct pf_state *p, pstore;
1821 struct pfi_kif *kif;
1822 u_int32_t nr = 0;
1823 int space = ps->ps_len;
1824
1825 if (space == 0) {
1826 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states)
1827 nr += kif->pfik_states;
1828 ps->ps_len = sizeof(struct pf_state) * nr;
1829 break;
1830 }
1831
1832 p = ps->ps_states;
1833 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states)
1834 RB_FOREACH(state, pf_state_tree_ext_gwy,
1835 &kif->pfik_ext_gwy) {
1836 int secs = time_second;
1837
1838 if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len)
1839 break;
1840
1841 bcopy(state, &pstore, sizeof(pstore));
1842 strlcpy(pstore.u.ifname, kif->pfik_name,
1843 sizeof(pstore.u.ifname));
1844 pstore.rule.nr = state->rule.ptr->nr;
1845 pstore.nat_rule.nr = (state->nat_rule.ptr ==
1846 NULL) ? -1 : state->nat_rule.ptr->nr;
1847 pstore.anchor.nr = (state->anchor.ptr ==
1848 NULL) ? -1 : state->anchor.ptr->nr;
1849 pstore.creation = secs - pstore.creation;
1850 pstore.expire = pf_state_expires(state);
1851 if (pstore.expire > secs)
1852 pstore.expire -= secs;
1853 else
1854 pstore.expire = 0;
1855 error = copyout(&pstore, p, sizeof(*p));
1856 if (error)
1857 goto fail;
1858 p++;
1859 nr++;
1860 }
1861 ps->ps_len = sizeof(struct pf_state) * nr;
1862 break;
1863 }
1864
1865 case DIOCGETSTATUS: {
1866 struct pf_status *s = (struct pf_status *)addr;
1867 bcopy(&pf_status, s, sizeof(struct pf_status));
1868 pfi_fill_oldstatus(s);
1869 break;
1870 }
1871
1872 case DIOCSETSTATUSIF: {
1873 struct pfioc_if *pi = (struct pfioc_if *)addr;
1874
1875 if (pi->ifname[0] == 0) {
1876 bzero(pf_status.ifname, IFNAMSIZ);
1877 break;
1878 }
1879 if (ifunit(pi->ifname) == NULL) {
1880 error = EINVAL;
1881 break;
1882 }
1883 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
1884 break;
1885 }
1886
1887 case DIOCCLRSTATUS: {
1888 bzero(pf_status.counters, sizeof(pf_status.counters));
1889 bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
1890 bzero(pf_status.scounters, sizeof(pf_status.scounters));
1891 if (*pf_status.ifname)
1892 pfi_clr_istats(pf_status.ifname, NULL,
1893 PFI_FLAG_INSTANCE);
1894 break;
1895 }
1896
1897 case DIOCNATLOOK: {
1898 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr;
1899 struct pf_state *state;
1900 struct pf_state key;
1901 int m = 0, direction = pnl->direction;
1902
1903 key.af = pnl->af;
1904 key.proto = pnl->proto;
1905
1906 if (!pnl->proto ||
1907 PF_AZERO(&pnl->saddr, pnl->af) ||
1908 PF_AZERO(&pnl->daddr, pnl->af) ||
1909 !pnl->dport || !pnl->sport)
1910 error = EINVAL;
1911 else {
1912 /*
1913 * userland gives us source and dest of connection,
1914 * reverse the lookup so we ask for what happens with
1915 * the return traffic, enabling us to find it in the
1916 * state tree.
1917 */
1918 if (direction == PF_IN) {
1919 PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af);
1920 key.ext.port = pnl->dport;
1921 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af);
1922 key.gwy.port = pnl->sport;
1923 state = pf_find_state_all(&key, PF_EXT_GWY, &m);
1924 } else {
1925 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af);
1926 key.lan.port = pnl->dport;
1927 PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af);
1928 key.ext.port = pnl->sport;
1929 state = pf_find_state_all(&key, PF_LAN_EXT, &m);
1930 }
1931 if (m > 1)
1932 error = E2BIG; /* more than one state */
1933 else if (state != NULL) {
1934 if (direction == PF_IN) {
1935 PF_ACPY(&pnl->rsaddr, &state->lan.addr,
1936 state->af);
1937 pnl->rsport = state->lan.port;
1938 PF_ACPY(&pnl->rdaddr, &pnl->daddr,
1939 pnl->af);
1940 pnl->rdport = pnl->dport;
1941 } else {
1942 PF_ACPY(&pnl->rdaddr, &state->gwy.addr,
1943 state->af);
1944 pnl->rdport = state->gwy.port;
1945 PF_ACPY(&pnl->rsaddr, &pnl->saddr,
1946 pnl->af);
1947 pnl->rsport = pnl->sport;
1948 }
1949 } else
1950 error = ENOENT;
1951 }
1952 break;
1953 }
1954
1955 case DIOCSETTIMEOUT: {
1956 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1957 int old;
1958
1959 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
1960 pt->seconds < 0) {
1961 error = EINVAL;
1962 goto fail;
1963 }
1964 old = pf_default_rule.timeout[pt->timeout];
1965 pf_default_rule.timeout[pt->timeout] = pt->seconds;
1966 pt->seconds = old;
1967 break;
1968 }
1969
1970 case DIOCGETTIMEOUT: {
1971 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1972
1973 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
1974 error = EINVAL;
1975 goto fail;
1976 }
1977 pt->seconds = pf_default_rule.timeout[pt->timeout];
1978 break;
1979 }
1980
1981 case DIOCGETLIMIT: {
1982 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
1983
1984 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
1985 error = EINVAL;
1986 goto fail;
1987 }
1988 pl->limit = pf_pool_limits[pl->index].limit;
1989 break;
1990 }
1991
1992 case DIOCSETLIMIT: {
1993 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
1994 int old_limit;
1995
1996 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
1997 pf_pool_limits[pl->index].pp == NULL) {
1998 error = EINVAL;
1999 goto fail;
2000 }
2001 #ifdef __OpenBSD__
2002 if (pool_sethardlimit(pf_pool_limits[pl->index].pp,
2003 pl->limit, NULL, 0) != 0) {
2004 error = EBUSY;
2005 goto fail;
2006 }
2007 #else
2008 pool_sethardlimit(pf_pool_limits[pl->index].pp,
2009 pl->limit, NULL, 0);
2010 #endif
2011 old_limit = pf_pool_limits[pl->index].limit;
2012 pf_pool_limits[pl->index].limit = pl->limit;
2013 pl->limit = old_limit;
2014 break;
2015 }
2016
2017 case DIOCSETDEBUG: {
2018 u_int32_t *level = (u_int32_t *)addr;
2019
2020 pf_status.debug = *level;
2021 break;
2022 }
2023
2024 case DIOCCLRRULECTRS: {
2025 struct pf_ruleset *ruleset = &pf_main_ruleset;
2026 struct pf_rule *rule;
2027
2028 TAILQ_FOREACH(rule,
2029 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries)
2030 rule->evaluations = rule->packets =
2031 rule->bytes = 0;
2032 break;
2033 }
2034
2035 #ifdef ALTQ
2036 case DIOCSTARTALTQ: {
2037 struct pf_altq *altq;
2038
2039 /* enable all altq interfaces on active list */
2040 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
2041 if (altq->qname[0] == 0) {
2042 error = pf_enable_altq(altq);
2043 if (error != 0)
2044 break;
2045 }
2046 }
2047 if (error == 0)
2048 pf_altq_running = 1;
2049 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
2050 break;
2051 }
2052
2053 case DIOCSTOPALTQ: {
2054 struct pf_altq *altq;
2055
2056 /* disable all altq interfaces on active list */
2057 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
2058 if (altq->qname[0] == 0) {
2059 error = pf_disable_altq(altq);
2060 if (error != 0)
2061 break;
2062 }
2063 }
2064 if (error == 0)
2065 pf_altq_running = 0;
2066 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
2067 break;
2068 }
2069
2070 case DIOCADDALTQ: {
2071 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2072 struct pf_altq *altq, *a;
2073
2074 if (pa->ticket != ticket_altqs_inactive) {
2075 error = EBUSY;
2076 break;
2077 }
2078 altq = pool_get(&pf_altq_pl, PR_NOWAIT);
2079 if (altq == NULL) {
2080 error = ENOMEM;
2081 break;
2082 }
2083 bcopy(&pa->altq, altq, sizeof(struct pf_altq));
2084
2085 /*
2086 * if this is for a queue, find the discipline and
2087 * copy the necessary fields
2088 */
2089 if (altq->qname[0] != 0) {
2090 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
2091 error = EBUSY;
2092 pool_put(&pf_altq_pl, altq);
2093 break;
2094 }
2095 TAILQ_FOREACH(a, pf_altqs_inactive, entries) {
2096 if (strncmp(a->ifname, altq->ifname,
2097 IFNAMSIZ) == 0 && a->qname[0] == 0) {
2098 altq->altq_disc = a->altq_disc;
2099 break;
2100 }
2101 }
2102 }
2103
2104 error = altq_add(altq);
2105 if (error) {
2106 pool_put(&pf_altq_pl, altq);
2107 break;
2108 }
2109
2110 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries);
2111 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2112 break;
2113 }
2114
2115 case DIOCGETALTQS: {
2116 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2117 struct pf_altq *altq;
2118
2119 pa->nr = 0;
2120 TAILQ_FOREACH(altq, pf_altqs_active, entries)
2121 pa->nr++;
2122 pa->ticket = ticket_altqs_active;
2123 break;
2124 }
2125
2126 case DIOCGETALTQ: {
2127 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2128 struct pf_altq *altq;
2129 u_int32_t nr;
2130
2131 if (pa->ticket != ticket_altqs_active) {
2132 error = EBUSY;
2133 break;
2134 }
2135 nr = 0;
2136 altq = TAILQ_FIRST(pf_altqs_active);
2137 while ((altq != NULL) && (nr < pa->nr)) {
2138 altq = TAILQ_NEXT(altq, entries);
2139 nr++;
2140 }
2141 if (altq == NULL) {
2142 error = EBUSY;
2143 break;
2144 }
2145 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2146 break;
2147 }
2148
2149 case DIOCCHANGEALTQ:
2150 /* CHANGEALTQ not supported yet! */
2151 error = ENODEV;
2152 break;
2153
2154 case DIOCGETQSTATS: {
2155 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr;
2156 struct pf_altq *altq;
2157 u_int32_t nr;
2158 int nbytes;
2159
2160 if (pq->ticket != ticket_altqs_active) {
2161 error = EBUSY;
2162 break;
2163 }
2164 nbytes = pq->nbytes;
2165 nr = 0;
2166 altq = TAILQ_FIRST(pf_altqs_active);
2167 while ((altq != NULL) && (nr < pq->nr)) {
2168 altq = TAILQ_NEXT(altq, entries);
2169 nr++;
2170 }
2171 if (altq == NULL) {
2172 error = EBUSY;
2173 break;
2174 }
2175 error = altq_getqstats(altq, pq->buf, &nbytes);
2176 if (error == 0) {
2177 pq->scheduler = altq->scheduler;
2178 pq->nbytes = nbytes;
2179 }
2180 break;
2181 }
2182 #endif /* ALTQ */
2183
2184 case DIOCBEGINADDRS: {
2185 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2186
2187 pf_empty_pool(&pf_pabuf);
2188 pp->ticket = ++ticket_pabuf;
2189 break;
2190 }
2191
2192 case DIOCADDADDR: {
2193 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2194
2195 #ifndef INET
2196 if (pp->af == AF_INET) {
2197 error = EAFNOSUPPORT;
2198 break;
2199 }
2200 #endif /* INET */
2201 #ifndef INET6
2202 if (pp->af == AF_INET6) {
2203 error = EAFNOSUPPORT;
2204 break;
2205 }
2206 #endif /* INET6 */
2207 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2208 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2209 pp->addr.addr.type != PF_ADDR_TABLE) {
2210 error = EINVAL;
2211 break;
2212 }
2213 pa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
2214 if (pa == NULL) {
2215 error = ENOMEM;
2216 break;
2217 }
2218 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr));
2219 if (pa->ifname[0]) {
2220 pa->kif = pfi_attach_rule(pa->ifname);
2221 if (pa->kif == NULL) {
2222 pool_put(&pf_pooladdr_pl, pa);
2223 error = EINVAL;
2224 break;
2225 }
2226 }
2227 if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
2228 pfi_dynaddr_remove(&pa->addr);
2229 pfi_detach_rule(pa->kif);
2230 pool_put(&pf_pooladdr_pl, pa);
2231 error = EINVAL;
2232 break;
2233 }
2234 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
2235 break;
2236 }
2237
2238 case DIOCGETADDRS: {
2239 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2240
2241 pp->nr = 0;
2242 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2243 pp->r_num, 0, 1, 0);
2244 if (pool == NULL) {
2245 error = EBUSY;
2246 break;
2247 }
2248 TAILQ_FOREACH(pa, &pool->list, entries)
2249 pp->nr++;
2250 break;
2251 }
2252
2253 case DIOCGETADDR: {
2254 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2255 u_int32_t nr = 0;
2256
2257 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2258 pp->r_num, 0, 1, 1);
2259 if (pool == NULL) {
2260 error = EBUSY;
2261 break;
2262 }
2263 pa = TAILQ_FIRST(&pool->list);
2264 while ((pa != NULL) && (nr < pp->nr)) {
2265 pa = TAILQ_NEXT(pa, entries);
2266 nr++;
2267 }
2268 if (pa == NULL) {
2269 error = EBUSY;
2270 break;
2271 }
2272 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr));
2273 pfi_dynaddr_copyout(&pp->addr.addr);
2274 pf_tbladdr_copyout(&pp->addr.addr);
2275 pf_rtlabel_copyout(&pp->addr.addr);
2276 break;
2277 }
2278
2279 case DIOCCHANGEADDR: {
2280 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr;
2281 struct pf_pooladdr *oldpa = NULL, *newpa = NULL;
2282 struct pf_ruleset *ruleset;
2283
2284 if (pca->action < PF_CHANGE_ADD_HEAD ||
2285 pca->action > PF_CHANGE_REMOVE) {
2286 error = EINVAL;
2287 break;
2288 }
2289 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
2290 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
2291 pca->addr.addr.type != PF_ADDR_TABLE) {
2292 error = EINVAL;
2293 break;
2294 }
2295
2296 ruleset = pf_find_ruleset(pca->anchor);
2297 if (ruleset == NULL) {
2298 error = EBUSY;
2299 break;
2300 }
2301 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
2302 pca->r_num, pca->r_last, 1, 1);
2303 if (pool == NULL) {
2304 error = EBUSY;
2305 break;
2306 }
2307 if (pca->action != PF_CHANGE_REMOVE) {
2308 newpa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
2309 if (newpa == NULL) {
2310 error = ENOMEM;
2311 break;
2312 }
2313 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
2314 #ifndef INET
2315 if (pca->af == AF_INET) {
2316 pool_put(&pf_pooladdr_pl, newpa);
2317 error = EAFNOSUPPORT;
2318 break;
2319 }
2320 #endif /* INET */
2321 #ifndef INET6
2322 if (pca->af == AF_INET6) {
2323 pool_put(&pf_pooladdr_pl, newpa);
2324 error = EAFNOSUPPORT;
2325 break;
2326 }
2327 #endif /* INET6 */
2328 if (newpa->ifname[0]) {
2329 newpa->kif = pfi_attach_rule(newpa->ifname);
2330 if (newpa->kif == NULL) {
2331 pool_put(&pf_pooladdr_pl, newpa);
2332 error = EINVAL;
2333 break;
2334 }
2335 } else
2336 newpa->kif = NULL;
2337 if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
2338 pf_tbladdr_setup(ruleset, &newpa->addr)) {
2339 pfi_dynaddr_remove(&newpa->addr);
2340 pfi_detach_rule(newpa->kif);
2341 pool_put(&pf_pooladdr_pl, newpa);
2342 error = EINVAL;
2343 break;
2344 }
2345 }
2346
2347 if (pca->action == PF_CHANGE_ADD_HEAD)
2348 oldpa = TAILQ_FIRST(&pool->list);
2349 else if (pca->action == PF_CHANGE_ADD_TAIL)
2350 oldpa = TAILQ_LAST(&pool->list, pf_palist);
2351 else {
2352 int i = 0;
2353
2354 oldpa = TAILQ_FIRST(&pool->list);
2355 while ((oldpa != NULL) && (i < pca->nr)) {
2356 oldpa = TAILQ_NEXT(oldpa, entries);
2357 i++;
2358 }
2359 if (oldpa == NULL) {
2360 error = EINVAL;
2361 break;
2362 }
2363 }
2364
2365 if (pca->action == PF_CHANGE_REMOVE) {
2366 TAILQ_REMOVE(&pool->list, oldpa, entries);
2367 pfi_dynaddr_remove(&oldpa->addr);
2368 pf_tbladdr_remove(&oldpa->addr);
2369 pfi_detach_rule(oldpa->kif);
2370 pool_put(&pf_pooladdr_pl, oldpa);
2371 } else {
2372 if (oldpa == NULL)
2373 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
2374 else if (pca->action == PF_CHANGE_ADD_HEAD ||
2375 pca->action == PF_CHANGE_ADD_BEFORE)
2376 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
2377 else
2378 TAILQ_INSERT_AFTER(&pool->list, oldpa,
2379 newpa, entries);
2380 }
2381
2382 pool->cur = TAILQ_FIRST(&pool->list);
2383 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
2384 pca->af);
2385 break;
2386 }
2387
2388 case DIOCGETRULESETS: {
2389 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2390 struct pf_ruleset *ruleset;
2391 struct pf_anchor *anchor;
2392
2393 pr->path[sizeof(pr->path) - 1] = 0;
2394 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2395 error = EINVAL;
2396 break;
2397 }
2398 pr->nr = 0;
2399 if (ruleset->anchor == NULL) {
2400 /* XXX kludge for pf_main_ruleset */
2401 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2402 if (anchor->parent == NULL)
2403 pr->nr++;
2404 } else {
2405 RB_FOREACH(anchor, pf_anchor_node,
2406 &ruleset->anchor->children)
2407 pr->nr++;
2408 }
2409 break;
2410 }
2411
2412 case DIOCGETRULESET: {
2413 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2414 struct pf_ruleset *ruleset;
2415 struct pf_anchor *anchor;
2416 u_int32_t nr = 0;
2417
2418 pr->path[sizeof(pr->path) - 1] = 0;
2419 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2420 error = EINVAL;
2421 break;
2422 }
2423 pr->name[0] = 0;
2424 if (ruleset->anchor == NULL) {
2425 /* XXX kludge for pf_main_ruleset */
2426 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2427 if (anchor->parent == NULL && nr++ == pr->nr) {
2428 strlcpy(pr->name, anchor->name,
2429 sizeof(pr->name));
2430 break;
2431 }
2432 } else {
2433 RB_FOREACH(anchor, pf_anchor_node,
2434 &ruleset->anchor->children)
2435 if (nr++ == pr->nr) {
2436 strlcpy(pr->name, anchor->name,
2437 sizeof(pr->name));
2438 break;
2439 }
2440 }
2441 if (!pr->name[0])
2442 error = EBUSY;
2443 break;
2444 }
2445
2446 case DIOCRCLRTABLES: {
2447 struct pfioc_table *io = (struct pfioc_table *)addr;
2448
2449 if (io->pfrio_esize != 0) {
2450 error = ENODEV;
2451 break;
2452 }
2453 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
2454 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2455 break;
2456 }
2457
2458 case DIOCRADDTABLES: {
2459 struct pfioc_table *io = (struct pfioc_table *)addr;
2460
2461 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2462 error = ENODEV;
2463 break;
2464 }
2465 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
2466 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2467 break;
2468 }
2469
2470 case DIOCRDELTABLES: {
2471 struct pfioc_table *io = (struct pfioc_table *)addr;
2472
2473 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2474 error = ENODEV;
2475 break;
2476 }
2477 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
2478 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2479 break;
2480 }
2481
2482 case DIOCRGETTABLES: {
2483 struct pfioc_table *io = (struct pfioc_table *)addr;
2484
2485 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2486 error = ENODEV;
2487 break;
2488 }
2489 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
2490 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2491 break;
2492 }
2493
2494 case DIOCRGETTSTATS: {
2495 struct pfioc_table *io = (struct pfioc_table *)addr;
2496
2497 if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
2498 error = ENODEV;
2499 break;
2500 }
2501 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
2502 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2503 break;
2504 }
2505
2506 case DIOCRCLRTSTATS: {
2507 struct pfioc_table *io = (struct pfioc_table *)addr;
2508
2509 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2510 error = ENODEV;
2511 break;
2512 }
2513 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
2514 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2515 break;
2516 }
2517
2518 case DIOCRSETTFLAGS: {
2519 struct pfioc_table *io = (struct pfioc_table *)addr;
2520
2521 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2522 error = ENODEV;
2523 break;
2524 }
2525 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
2526 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
2527 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2528 break;
2529 }
2530
2531 case DIOCRCLRADDRS: {
2532 struct pfioc_table *io = (struct pfioc_table *)addr;
2533
2534 if (io->pfrio_esize != 0) {
2535 error = ENODEV;
2536 break;
2537 }
2538 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
2539 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2540 break;
2541 }
2542
2543 case DIOCRADDADDRS: {
2544 struct pfioc_table *io = (struct pfioc_table *)addr;
2545
2546 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2547 error = ENODEV;
2548 break;
2549 }
2550 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
2551 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
2552 PFR_FLAG_USERIOCTL);
2553 break;
2554 }
2555
2556 case DIOCRDELADDRS: {
2557 struct pfioc_table *io = (struct pfioc_table *)addr;
2558
2559 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2560 error = ENODEV;
2561 break;
2562 }
2563 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
2564 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
2565 PFR_FLAG_USERIOCTL);
2566 break;
2567 }
2568
2569 case DIOCRSETADDRS: {
2570 struct pfioc_table *io = (struct pfioc_table *)addr;
2571
2572 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2573 error = ENODEV;
2574 break;
2575 }
2576 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
2577 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
2578 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
2579 PFR_FLAG_USERIOCTL);
2580 break;
2581 }
2582
2583 case DIOCRGETADDRS: {
2584 struct pfioc_table *io = (struct pfioc_table *)addr;
2585
2586 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2587 error = ENODEV;
2588 break;
2589 }
2590 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
2591 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2592 break;
2593 }
2594
2595 case DIOCRGETASTATS: {
2596 struct pfioc_table *io = (struct pfioc_table *)addr;
2597
2598 if (io->pfrio_esize != sizeof(struct pfr_astats)) {
2599 error = ENODEV;
2600 break;
2601 }
2602 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
2603 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2604 break;
2605 }
2606
2607 case DIOCRCLRASTATS: {
2608 struct pfioc_table *io = (struct pfioc_table *)addr;
2609
2610 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2611 error = ENODEV;
2612 break;
2613 }
2614 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
2615 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
2616 PFR_FLAG_USERIOCTL);
2617 break;
2618 }
2619
2620 case DIOCRTSTADDRS: {
2621 struct pfioc_table *io = (struct pfioc_table *)addr;
2622
2623 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2624 error = ENODEV;
2625 break;
2626 }
2627 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
2628 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
2629 PFR_FLAG_USERIOCTL);
2630 break;
2631 }
2632
2633 case DIOCRINADEFINE: {
2634 struct pfioc_table *io = (struct pfioc_table *)addr;
2635
2636 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2637 error = ENODEV;
2638 break;
2639 }
2640 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
2641 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
2642 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2643 break;
2644 }
2645
2646 case DIOCOSFPADD: {
2647 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2648 error = pf_osfp_add(io);
2649 break;
2650 }
2651
2652 case DIOCOSFPGET: {
2653 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2654 error = pf_osfp_get(io);
2655 break;
2656 }
2657
2658 case DIOCXBEGIN: {
2659 struct pfioc_trans *io = (struct pfioc_trans *)
2660 addr;
2661 static struct pfioc_trans_e ioe;
2662 static struct pfr_table table;
2663 int i;
2664
2665 if (io->esize != sizeof(ioe)) {
2666 error = ENODEV;
2667 goto fail;
2668 }
2669 for (i = 0; i < io->size; i++) {
2670 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2671 error = EFAULT;
2672 goto fail;
2673 }
2674 switch (ioe.rs_num) {
2675 #ifdef ALTQ
2676 case PF_RULESET_ALTQ:
2677 if (ioe.anchor[0]) {
2678 error = EINVAL;
2679 goto fail;
2680 }
2681 if ((error = pf_begin_altq(&ioe.ticket)))
2682 goto fail;
2683 break;
2684 #endif /* ALTQ */
2685 case PF_RULESET_TABLE:
2686 bzero(&table, sizeof(table));
2687 strlcpy(table.pfrt_anchor, ioe.anchor,
2688 sizeof(table.pfrt_anchor));
2689 if ((error = pfr_ina_begin(&table,
2690 &ioe.ticket, NULL, 0)))
2691 goto fail;
2692 break;
2693 default:
2694 if ((error = pf_begin_rules(&ioe.ticket,
2695 ioe.rs_num, ioe.anchor)))
2696 goto fail;
2697 break;
2698 }
2699 if (copyout(&ioe, io->array+i, sizeof(io->array[i]))) {
2700 error = EFAULT;
2701 goto fail;
2702 }
2703 }
2704 break;
2705 }
2706
2707 case DIOCXROLLBACK: {
2708 struct pfioc_trans *io = (struct pfioc_trans *)
2709 addr;
2710 static struct pfioc_trans_e ioe;
2711 static struct pfr_table table;
2712 int i;
2713
2714 if (io->esize != sizeof(ioe)) {
2715 error = ENODEV;
2716 goto fail;
2717 }
2718 for (i = 0; i < io->size; i++) {
2719 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2720 error = EFAULT;
2721 goto fail;
2722 }
2723 switch (ioe.rs_num) {
2724 #ifdef ALTQ
2725 case PF_RULESET_ALTQ:
2726 if (ioe.anchor[0]) {
2727 error = EINVAL;
2728 goto fail;
2729 }
2730 if ((error = pf_rollback_altq(ioe.ticket)))
2731 goto fail; /* really bad */
2732 break;
2733 #endif /* ALTQ */
2734 case PF_RULESET_TABLE:
2735 bzero(&table, sizeof(table));
2736 strlcpy(table.pfrt_anchor, ioe.anchor,
2737 sizeof(table.pfrt_anchor));
2738 if ((error = pfr_ina_rollback(&table,
2739 ioe.ticket, NULL, 0)))
2740 goto fail; /* really bad */
2741 break;
2742 default:
2743 if ((error = pf_rollback_rules(ioe.ticket,
2744 ioe.rs_num, ioe.anchor)))
2745 goto fail; /* really bad */
2746 break;
2747 }
2748 }
2749 break;
2750 }
2751
2752 case DIOCXCOMMIT: {
2753 struct pfioc_trans *io = (struct pfioc_trans *)
2754 addr;
2755 static struct pfioc_trans_e ioe;
2756 static struct pfr_table table;
2757 struct pf_ruleset *rs;
2758 int i;
2759
2760 if (io->esize != sizeof(ioe)) {
2761 error = ENODEV;
2762 goto fail;
2763 }
2764 /* first makes sure everything will succeed */
2765 for (i = 0; i < io->size; i++) {
2766 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2767 error = EFAULT;
2768 goto fail;
2769 }
2770 switch (ioe.rs_num) {
2771 #ifdef ALTQ
2772 case PF_RULESET_ALTQ:
2773 if (ioe.anchor[0]) {
2774 error = EINVAL;
2775 goto fail;
2776 }
2777 if (!altqs_inactive_open || ioe.ticket !=
2778 ticket_altqs_inactive) {
2779 error = EBUSY;
2780 goto fail;
2781 }
2782 break;
2783 #endif /* ALTQ */
2784 case PF_RULESET_TABLE:
2785 rs = pf_find_ruleset(ioe.anchor);
2786 if (rs == NULL || !rs->topen || ioe.ticket !=
2787 rs->tticket) {
2788 error = EBUSY;
2789 goto fail;
2790 }
2791 break;
2792 default:
2793 if (ioe.rs_num < 0 || ioe.rs_num >=
2794 PF_RULESET_MAX) {
2795 error = EINVAL;
2796 goto fail;
2797 }
2798 rs = pf_find_ruleset(ioe.anchor);
2799 if (rs == NULL ||
2800 !rs->rules[ioe.rs_num].inactive.open ||
2801 rs->rules[ioe.rs_num].inactive.ticket !=
2802 ioe.ticket) {
2803 error = EBUSY;
2804 goto fail;
2805 }
2806 break;
2807 }
2808 }
2809 /* now do the commit - no errors should happen here */
2810 for (i = 0; i < io->size; i++) {
2811 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2812 error = EFAULT;
2813 goto fail;
2814 }
2815 switch (ioe.rs_num) {
2816 #ifdef ALTQ
2817 case PF_RULESET_ALTQ:
2818 if ((error = pf_commit_altq(ioe.ticket)))
2819 goto fail; /* really bad */
2820 break;
2821 #endif /* ALTQ */
2822 case PF_RULESET_TABLE:
2823 bzero(&table, sizeof(table));
2824 strlcpy(table.pfrt_anchor, ioe.anchor,
2825 sizeof(table.pfrt_anchor));
2826 if ((error = pfr_ina_commit(&table, ioe.ticket,
2827 NULL, NULL, 0)))
2828 goto fail; /* really bad */
2829 break;
2830 default:
2831 if ((error = pf_commit_rules(ioe.ticket,
2832 ioe.rs_num, ioe.anchor)))
2833 goto fail; /* really bad */
2834 break;
2835 }
2836 }
2837 break;
2838 }
2839
2840 case DIOCGETSRCNODES: {
2841 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr;
2842 struct pf_src_node *n;
2843 struct pf_src_node *p, pstore;
2844 u_int32_t nr = 0;
2845 int space = psn->psn_len;
2846
2847 if (space == 0) {
2848 RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
2849 nr++;
2850 psn->psn_len = sizeof(struct pf_src_node) * nr;
2851 break;
2852 }
2853
2854 p = psn->psn_src_nodes;
2855 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2856 int secs = time_second, diff;
2857
2858 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
2859 break;
2860
2861 bcopy(n, &pstore, sizeof(pstore));
2862 if (n->rule.ptr != NULL)
2863 pstore.rule.nr = n->rule.ptr->nr;
2864 pstore.creation = secs - pstore.creation;
2865 if (pstore.expire > secs)
2866 pstore.expire -= secs;
2867 else
2868 pstore.expire = 0;
2869
2870 /* adjust the connection rate estimate */
2871 diff = secs - n->conn_rate.last;
2872 if (diff >= n->conn_rate.seconds)
2873 pstore.conn_rate.count = 0;
2874 else
2875 pstore.conn_rate.count -=
2876 n->conn_rate.count * diff /
2877 n->conn_rate.seconds;
2878
2879 error = copyout(&pstore, p, sizeof(*p));
2880 if (error)
2881 goto fail;
2882 p++;
2883 nr++;
2884 }
2885 psn->psn_len = sizeof(struct pf_src_node) * nr;
2886 break;
2887 }
2888
2889 case DIOCCLRSRCNODES: {
2890 struct pf_src_node *n;
2891 struct pf_state *state;
2892
2893 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
2894 state->src_node = NULL;
2895 state->nat_src_node = NULL;
2896 }
2897 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2898 n->expire = 1;
2899 n->states = 0;
2900 }
2901 pf_purge_expired_src_nodes();
2902 pf_status.src_nodes = 0;
2903 break;
2904 }
2905
2906 case DIOCSETHOSTID: {
2907 u_int32_t *hostid = (u_int32_t *)addr;
2908
2909 if (*hostid == 0)
2910 pf_status.hostid = arc4random();
2911 else
2912 pf_status.hostid = *hostid;
2913 break;
2914 }
2915
2916 case DIOCOSFPFLUSH:
2917 pf_osfp_flush();
2918 break;
2919
2920 case DIOCIGETIFACES: {
2921 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2922
2923 if (io->pfiio_esize != sizeof(struct pfi_if)) {
2924 error = ENODEV;
2925 break;
2926 }
2927 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer,
2928 &io->pfiio_size, io->pfiio_flags);
2929 break;
2930 }
2931
2932 case DIOCICLRISTATS: {
2933 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2934
2935 error = pfi_clr_istats(io->pfiio_name, &io->pfiio_nzero,
2936 io->pfiio_flags);
2937 break;
2938 }
2939
2940 case DIOCSETIFFLAG: {
2941 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2942
2943 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
2944 break;
2945 }
2946
2947 case DIOCCLRIFFLAG: {
2948 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2949
2950 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
2951 break;
2952 }
2953
2954 default:
2955 error = ENODEV;
2956 break;
2957 }
2958 fail:
2959 splx(s);
2960 return (error);
2961 }
2962
2963 #ifdef __NetBSD__
2964 #ifdef INET
2965 int
2966 pfil4_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
2967 {
2968 int error;
2969
2970 /*
2971 * ensure that mbufs are writable beforehand
2972 * as it's assumed by pf code.
2973 * ip hdr (60 bytes) + tcp hdr (60 bytes) should be enough.
2974 * XXX inefficient
2975 */
2976 error = m_makewritable(mp, 0, 60 + 60, M_DONTWAIT);
2977 if (error) {
2978 m_freem(*mp);
2979 *mp = NULL;
2980 return error;
2981 }
2982
2983 /*
2984 * If the packet is out-bound, we can't delay checksums
2985 * here. For in-bound, the checksum has already been
2986 * validated.
2987 */
2988 if (dir == PFIL_OUT) {
2989 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
2990 in_delayed_cksum(*mp);
2991 (*mp)->m_pkthdr.csum_flags &=
2992 ~(M_CSUM_TCPv4|M_CSUM_UDPv4);
2993 }
2994 }
2995
2996 if (pf_test(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp, NULL)
2997 != PF_PASS) {
2998 m_freem(*mp);
2999 *mp = NULL;
3000 return EHOSTUNREACH;
3001 }
3002
3003 /*
3004 * we're not compatible with fast-forward.
3005 */
3006
3007 if (dir == PFIL_IN && *mp) {
3008 (*mp)->m_flags &= ~M_CANFASTFWD;
3009 }
3010
3011 return (0);
3012 }
3013 #endif /* INET */
3014
3015 #ifdef INET6
3016 int
3017 pfil6_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
3018 {
3019 int error;
3020
3021 /*
3022 * ensure that mbufs are writable beforehand
3023 * as it's assumed by pf code.
3024 * XXX inefficient
3025 */
3026 error = m_makewritable(mp, 0, M_COPYALL, M_DONTWAIT);
3027 if (error) {
3028 m_freem(*mp);
3029 *mp = NULL;
3030 return error;
3031 }
3032
3033 /*
3034 * If the packet is out-bound, we can't delay checksums
3035 * here. For in-bound, the checksum has already been
3036 * validated.
3037 */
3038 if (dir == PFIL_OUT) {
3039 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv6|M_CSUM_UDPv6)) {
3040 in6_delayed_cksum(*mp);
3041 (*mp)->m_pkthdr.csum_flags &=
3042 ~(M_CSUM_TCPv6|M_CSUM_UDPv6);
3043 }
3044 }
3045
3046 if (pf_test6(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp, NULL)
3047 != PF_PASS) {
3048 m_freem(*mp);
3049 *mp = NULL;
3050 return EHOSTUNREACH;
3051 } else
3052 return (0);
3053 }
3054 #endif
3055
3056 int
3057 pfil_ifnet_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
3058 {
3059 u_long cmd = (u_long)mp;
3060
3061 switch (cmd) {
3062 case PFIL_IFNET_ATTACH:
3063 pfi_attach_ifnet(ifp);
3064 break;
3065 case PFIL_IFNET_DETACH:
3066 pfi_detach_ifnet(ifp);
3067 break;
3068 }
3069
3070 return (0);
3071 }
3072
3073 int
3074 pfil_ifaddr_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
3075 {
3076 extern void pfi_kifaddr_update_if(struct ifnet *);
3077
3078 u_long cmd = (u_long)mp;
3079
3080 switch (cmd) {
3081 case SIOCSIFADDR:
3082 case SIOCAIFADDR:
3083 case SIOCDIFADDR:
3084 #ifdef INET6
3085 case SIOCAIFADDR_IN6:
3086 case SIOCDIFADDR_IN6:
3087 #endif
3088 pfi_kifaddr_update_if(ifp);
3089 break;
3090 default:
3091 panic("unexpected ioctl");
3092 }
3093
3094 return (0);
3095 }
3096
3097 static int
3098 pf_pfil_attach(void)
3099 {
3100 struct pfil_head *ph_inet;
3101 #ifdef INET6
3102 struct pfil_head *ph_inet6;
3103 #endif
3104 int error;
3105 int i;
3106
3107 if (pf_pfil_attached)
3108 return (0);
3109
3110 error = pfil_add_hook(pfil_ifnet_wrapper, NULL, PFIL_IFNET, &if_pfil);
3111 if (error)
3112 goto bad1;
3113 error = pfil_add_hook(pfil_ifaddr_wrapper, NULL, PFIL_IFADDR, &if_pfil);
3114 if (error)
3115 goto bad2;
3116
3117 ph_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3118 if (ph_inet)
3119 error = pfil_add_hook((void *)pfil4_wrapper, NULL,
3120 PFIL_IN|PFIL_OUT, ph_inet);
3121 else
3122 error = ENOENT;
3123 if (error)
3124 goto bad3;
3125
3126 #ifdef INET6
3127 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3128 if (ph_inet6)
3129 error = pfil_add_hook((void *)pfil6_wrapper, NULL,
3130 PFIL_IN|PFIL_OUT, ph_inet6);
3131 else
3132 error = ENOENT;
3133 if (error)
3134 goto bad4;
3135 #endif
3136
3137 for (i = 0; i < if_indexlim; i++)
3138 if (ifindex2ifnet[i])
3139 pfi_attach_ifnet(ifindex2ifnet[i]);
3140 pf_pfil_attached = 1;
3141
3142 return (0);
3143
3144 #ifdef INET6
3145 bad4:
3146 pfil_remove_hook(pfil4_wrapper, NULL, PFIL_IN|PFIL_OUT, ph_inet);
3147 #endif
3148 bad3:
3149 pfil_remove_hook(pfil_ifaddr_wrapper, NULL, PFIL_IFADDR, &if_pfil);
3150 bad2:
3151 pfil_remove_hook(pfil_ifnet_wrapper, NULL, PFIL_IFNET, &if_pfil);
3152 bad1:
3153 return (error);
3154 }
3155
3156 static int
3157 pf_pfil_detach(void)
3158 {
3159 struct pfil_head *ph_inet;
3160 #ifdef INET6
3161 struct pfil_head *ph_inet6;
3162 #endif
3163 int i;
3164
3165 if (pf_pfil_attached == 0)
3166 return (0);
3167
3168 for (i = 0; i < if_indexlim; i++)
3169 if (pfi_index2kif[i])
3170 pfi_detach_ifnet(ifindex2ifnet[i]);
3171
3172 pfil_remove_hook(pfil_ifaddr_wrapper, NULL, PFIL_IFADDR, &if_pfil);
3173 pfil_remove_hook(pfil_ifnet_wrapper, NULL, PFIL_IFNET, &if_pfil);
3174
3175 ph_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3176 if (ph_inet)
3177 pfil_remove_hook((void *)pfil4_wrapper, NULL,
3178 PFIL_IN|PFIL_OUT, ph_inet);
3179 #ifdef INET6
3180 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3181 if (ph_inet6)
3182 pfil_remove_hook((void *)pfil6_wrapper, NULL,
3183 PFIL_IN|PFIL_OUT, ph_inet6);
3184 #endif
3185 pf_pfil_attached = 0;
3186
3187 return (0);
3188 }
3189 #endif
3190